From 0dae08ead9112adb81ad507b81187f00f77bc168 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Sat, 14 Apr 2012 13:55:11 -0700 Subject: Move JNI methods out of top-level. Change-Id: Ia9c883ba9a13b205cda7b72024e2939f02583acb --- src/native/java_lang_System.cc | 259 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 259 insertions(+) create mode 100644 src/native/java_lang_System.cc (limited to 'src/native/java_lang_System.cc') diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc new file mode 100644 index 0000000000..741b319718 --- /dev/null +++ b/src/native/java_lang_System.cc @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni_internal.h" +#include "object.h" + +#include "JniConstants.h" // Last to avoid problems with LOG redefinition. + +/* + * We make guarantees about the atomicity of accesses to primitive + * variables. These guarantees also apply to elements of arrays. + * In particular, 8-bit, 16-bit, and 32-bit accesses must be atomic and + * must not cause "word tearing". Accesses to 64-bit array elements must + * either be atomic or treated as two 32-bit operations. References are + * always read and written atomically, regardless of the number of bits + * used to represent them. + * + * We can't rely on standard libc functions like memcpy(3) and memmove(3) + * in our implementation of System.arraycopy, because they may copy + * byte-by-byte (either for the full run or for "unaligned" parts at the + * start or end). We need to use functions that guarantee 16-bit or 32-bit + * atomicity as appropriate. + * + * System.arraycopy() is heavily used, so having an efficient implementation + * is important. The bionic libc provides a platform-optimized memory move + * function that should be used when possible. If it's not available, + * the trivial "reference implementation" versions below can be used until + * a proper version can be written. + * + * For these functions, The caller must guarantee that dst/src are aligned + * appropriately for the element type, and that n is a multiple of the + * element size. + */ +#ifdef __BIONIC__ +#define HAVE_MEMMOVE_WORDS +#endif + +#ifdef HAVE_MEMMOVE_WORDS +extern "C" void _memmove_words(void* dst, const void* src, size_t n); +#define move16 _memmove_words +#define move32 _memmove_words +#else +static void move16(void* dst, const void* src, size_t n) { + DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x01), 0U); + + uint16_t* d = reinterpret_cast(dst); + const uint16_t* s = reinterpret_cast(src); + + n /= sizeof(uint16_t); + + if (d < s) { + // Copy forwards. + while (n--) { + *d++ = *s++; + } + } else { + // Copy backwards. + d += n; + s += n; + while (n--) { + *--d = *--s; + } + } +} + +static void move32(void* dst, const void* src, size_t n) { + DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x03), 0U); + + uint32_t* d = reinterpret_cast(dst); + const uint32_t* s = reinterpret_cast(src); + + n /= sizeof(uint32_t); + + if (d < s) { + // Copy forwards. + while (n--) { + *d++ = *s++; + } + } else { + // Copy backwards. + d += n; + s += n; + while (n--) { + *--d = *--s; + } + } +} +#endif // HAVE_MEMMOVE_WORDS + +namespace art { + +static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) { + std::string actualType(PrettyTypeOf(array)); + Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + "%s of type %s is not an array", identifier, actualType.c_str()); +} + +static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) { + ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + Thread* self = Thread::Current(); + + // Null pointer checks. + if (javaSrc == NULL) { + self->ThrowNewException("Ljava/lang/NullPointerException;", "src == null"); + return; + } + if (javaDst == NULL) { + self->ThrowNewException("Ljava/lang/NullPointerException;", "dst == null"); + return; + } + + // Make sure source and destination are both arrays. + Object* srcObject = Decode(env, javaSrc); + Object* dstObject = Decode(env, javaDst); + if (!srcObject->IsArrayInstance()) { + ThrowArrayStoreException_NotAnArray("source", srcObject); + return; + } + if (!dstObject->IsArrayInstance()) { + ThrowArrayStoreException_NotAnArray("destination", dstObject); + return; + } + Array* srcArray = srcObject->AsArray(); + Array* dstArray = dstObject->AsArray(); + Class* srcComponentType = srcArray->GetClass()->GetComponentType(); + Class* dstComponentType = dstArray->GetClass()->GetComponentType(); + + // Bounds checking. + if (srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length) { + self->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", + "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d", + srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length); + return; + } + + // Handle primitive arrays. + if (srcComponentType->IsPrimitive() || dstComponentType->IsPrimitive()) { + // If one of the arrays holds a primitive type the other array must hold the exact same type. + if (srcComponentType->IsPrimitive() != dstComponentType->IsPrimitive() || srcComponentType != dstComponentType) { + std::string srcType(PrettyTypeOf(srcArray)); + std::string dstType(PrettyTypeOf(dstArray)); + self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + "Incompatible types: src=%s, dst=%s", srcType.c_str(), dstType.c_str()); + return; + } + + size_t width = srcArray->GetClass()->GetComponentSize(); + uint8_t* dstBytes = reinterpret_cast(dstArray->GetRawData(width)); + const uint8_t* srcBytes = reinterpret_cast(srcArray->GetRawData(width)); + + switch (width) { + case 1: + memmove(dstBytes + dstPos, srcBytes + srcPos, length); + break; + case 2: + move16(dstBytes + dstPos * 2, srcBytes + srcPos * 2, length * 2); + break; + case 4: + move32(dstBytes + dstPos * 4, srcBytes + srcPos * 4, length * 4); + break; + case 8: + // We don't need to guarantee atomicity of the entire 64-bit word. + move32(dstBytes + dstPos * 8, srcBytes + srcPos * 8, length * 8); + break; + default: + LOG(FATAL) << "Unknown primitive array type: " << PrettyTypeOf(srcArray); + } + + return; + } + + // Neither class is primitive. Are the types trivially compatible? + const size_t width = sizeof(Object*); + uint8_t* dstBytes = reinterpret_cast(dstArray->GetRawData(width)); + const uint8_t* srcBytes = reinterpret_cast(srcArray->GetRawData(width)); + if (dstArray == srcArray || dstComponentType->IsAssignableFrom(srcComponentType)) { + // Yes. Bulk copy. + COMPILE_ASSERT(sizeof(width) == sizeof(uint32_t), move32_assumes_Object_references_are_32_bit); + move32(dstBytes + dstPos * width, srcBytes + srcPos * width, length * width); + Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length); + return; + } + + // The arrays are not trivially compatible. However, we may still be able to copy some or all of + // the elements if the source objects are compatible (for example, copying an Object[] to + // String[], the Objects being copied might actually be Strings). + // We can't do a bulk move because that would introduce a check-use race condition, so we copy + // elements one by one. + + // We already dealt with overlapping copies, so we don't need to cope with that case below. + CHECK_NE(dstArray, srcArray); + + Object* const * srcObjects = reinterpret_cast(srcBytes + srcPos * width); + Object** dstObjects = reinterpret_cast(dstBytes + dstPos * width); + Class* dstClass = dstArray->GetClass()->GetComponentType(); + + // We want to avoid redundant IsAssignableFrom checks where possible, so we cache a class that + // we know is assignable to the destination array's component type. + Class* lastAssignableElementClass = dstClass; + + Object* o = NULL; + int i = 0; + for (; i < length; ++i) { + o = srcObjects[i]; + if (o != NULL) { + Class* oClass = o->GetClass(); + if (lastAssignableElementClass == oClass) { + dstObjects[i] = o; + } else if (dstClass->IsAssignableFrom(oClass)) { + lastAssignableElementClass = oClass; + dstObjects[i] = o; + } else { + // Can't put this element into the array. + break; + } + } else { + dstObjects[i] = NULL; + } + } + + Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length); + if (i != length) { + std::string actualSrcType(PrettyTypeOf(o)); + std::string dstType(PrettyTypeOf(dstArray)); + self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + "source[%d] of type %s cannot be stored in destination array of type %s", + srcPos + i, actualSrcType.c_str(), dstType.c_str()); + return; + } +} + +static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) { + Object* o = Decode(env, javaObject); + return static_cast(reinterpret_cast(o)); +} + +static JNINativeMethod gMethods[] = { + NATIVE_METHOD(System, arraycopy, "(Ljava/lang/Object;ILjava/lang/Object;II)V"), + NATIVE_METHOD(System, identityHashCode, "(Ljava/lang/Object;)I"), +}; + +void register_java_lang_System(JNIEnv* env) { + jniRegisterNativeMethods(env, "java/lang/System", gMethods, NELEM(gMethods)); +} + +} // namespace art -- cgit v1.2.3-59-g8ed1b From eac766769e3114a078c188ea26776a81f0edb3cf Mon Sep 17 00:00:00 2001 From: Elliott Hughes Date: Thu, 24 May 2012 21:56:51 -0700 Subject: DO NOT MERGE Fix all unsafe caching to be like libcore. This way, if a runtime is restarted within a process, we re-initialize all the cached data. Conflicts: src/native/java_lang_Runtime.cc -- nativeExit lost an argument in dalvik-dev (cherry picked from commit 7756d5473fa27ce7e6ac7c31770eef7030431da4) Change-Id: I6184fc20c2a9ec16c4b053584a4d1c3b64452d0f --- build/Android.common.mk | 1 + src/class_linker.cc | 14 +-- src/compiler_llvm/runtime_support_llvm.cc | 18 +--- src/debugger.cc | 27 ++--- src/dex_file.cc | 15 +-- src/heap.cc | 5 +- src/jni_internal.cc | 46 +++----- src/jni_internal.h | 9 +- src/native/dalvik_system_DexFile.cc | 31 +++--- src/native/dalvik_system_VMDebug.cc | 42 ++++---- src/native/dalvik_system_VMRuntime.cc | 8 +- src/native/dalvik_system_VMStack.cc | 6 +- src/native/dalvik_system_Zygote.cc | 3 +- src/native/java_lang_Class.cc | 17 ++- src/native/java_lang_Object.cc | 4 +- src/native/java_lang_Runtime.cc | 18 ++-- src/native/java_lang_String.cc | 4 +- src/native/java_lang_System.cc | 4 +- src/native/java_lang_Thread.cc | 4 +- src/native/java_lang_Throwable.cc | 4 +- src/native/java_lang_VMClassLoader.cc | 4 +- src/native/java_lang_reflect_Array.cc | 6 +- src/native/java_lang_reflect_Constructor.cc | 6 +- src/native/java_lang_reflect_Field.cc | 8 +- src/native/java_lang_reflect_Method.cc | 6 +- src/native/java_lang_reflect_Proxy.cc | 6 +- .../java_util_concurrent_atomic_AtomicLong.cc | 4 +- .../org_apache_harmony_dalvik_ddmc_DdmServer.cc | 6 +- ...org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 7 +- src/native/sun_misc_Unsafe.cc | 4 +- src/oat/runtime/support_proxy.cc | 16 +-- src/oatexec.cc | 20 ++-- src/reflection.cc | 14 ++- src/reflection.h | 2 +- src/runtime.cc | 21 ++-- src/runtime_support.cc | 23 ++-- src/thread.cc | 19 ++-- src/well_known_classes.cc | 118 +++++++++++++++++++++ src/well_known_classes.h | 67 ++++++++++++ 39 files changed, 361 insertions(+), 276 deletions(-) create mode 100644 src/well_known_classes.cc create mode 100644 src/well_known_classes.h (limited to 'src/native/java_lang_System.cc') diff --git a/build/Android.common.mk b/build/Android.common.mk index 57baee7f16..9af92dadf1 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -194,6 +194,7 @@ LIBART_COMMON_SRC_FILES := \ src/trace.cc \ src/utf.cc \ src/utils.cc \ + src/well_known_classes.cc \ src/zip_archive.cc \ src/verifier/gc_map.cc \ src/verifier/method_verifier.cc \ diff --git a/src/class_linker.cc b/src/class_linker.cc index b73c2b0ea4..c1382e25c5 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -52,6 +52,7 @@ #include "thread.h" #include "UniquePtr.h" #include "utils.h" +#include "well_known_classes.h" namespace art { @@ -141,24 +142,17 @@ static void WrapExceptionInInitializer() { env->ExceptionClear(); - // TODO: add java.lang.Error to JniConstants? - ScopedLocalRef error_class(env, env->FindClass("java/lang/Error")); - CHECK(error_class.get() != NULL); - if (env->IsInstanceOf(cause.get(), error_class.get())) { + if (env->IsInstanceOf(cause.get(), WellKnownClasses::java_lang_Error)) { // We only wrap non-Error exceptions; an Error can just be used as-is. env->Throw(cause.get()); return; } - // TODO: add java.lang.ExceptionInInitializerError to JniConstants? - ScopedLocalRef eiie_class(env, env->FindClass("java/lang/ExceptionInInitializerError")); - CHECK(eiie_class.get() != NULL); - - jmethodID mid = env->GetMethodID(eiie_class.get(), "" , "(Ljava/lang/Throwable;)V"); + jmethodID mid = env->GetMethodID(WellKnownClasses::java_lang_ExceptionInInitializerError, "" , "(Ljava/lang/Throwable;)V"); CHECK(mid != NULL); ScopedLocalRef eiie(env, - reinterpret_cast(env->NewObject(eiie_class.get(), mid, cause.get()))); + reinterpret_cast(env->NewObject(WellKnownClasses::java_lang_ExceptionInInitializerError, mid, cause.get()))); env->Throw(eiie.get()); } diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc index 0db31873e6..a8101ba8e3 100644 --- a/src/compiler_llvm/runtime_support_llvm.cc +++ b/src/compiler_llvm/runtime_support_llvm.cc @@ -28,6 +28,7 @@ #include "thread.h" #include "thread_list.h" #include "verifier/method_verifier.h" +#include "well_known_classes.h" #include #include @@ -729,22 +730,11 @@ void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) { args->Set(i, val.GetL()); } - // Get the InvocationHandler method and the field that holds it within the Proxy object - static jmethodID inv_hand_invoke_mid = NULL; - static jfieldID proxy_inv_hand_fid = NULL; - if (proxy_inv_hand_fid == NULL) { - ScopedLocalRef proxy(env, env->FindClass("java/lang/reflect/Proxy")); - proxy_inv_hand_fid = env->GetFieldID(proxy.get(), "h", "Ljava/lang/reflect/InvocationHandler;"); - ScopedLocalRef inv_hand_class(env, env->FindClass("java/lang/reflect/InvocationHandler")); - inv_hand_invoke_mid = env->GetMethodID(inv_hand_class.get(), "invoke", - "(Ljava/lang/Object;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;"); - } - - DCHECK(env->IsInstanceOf(rcvr_jobj, env->FindClass("java/lang/reflect/Proxy"))); + DCHECK(env->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); - jobject inv_hand = env->GetObjectField(rcvr_jobj, proxy_inv_hand_fid); + jobject inv_hand = env->GetObjectField(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy_h); // Call InvocationHandler.invoke - jobject result = env->CallObjectMethodA(inv_hand, inv_hand_invoke_mid, args_jobj); + jobject result = env->CallObjectMethodA(inv_hand, WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, args_jobj); // Place result in stack args if (!thread->IsExceptionPending()) { diff --git a/src/debugger.cc b/src/debugger.cc index 2156482607..342d54837d 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -34,6 +34,7 @@ #include "space.h" #include "stack_indirect_reference_table.h" #include "thread_list.h" +#include "well_known_classes.h" extern "C" void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*), void*); #ifndef HAVE_ANDROID_OS @@ -2322,14 +2323,6 @@ bool Dbg::DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); - static jclass Chunk_class = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk"); - static jclass DdmServer_class = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer"); - static jmethodID dispatch_mid = env->GetStaticMethodID(DdmServer_class, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;"); - static jfieldID data_fid = env->GetFieldID(Chunk_class, "data", "[B"); - static jfieldID length_fid = env->GetFieldID(Chunk_class, "length", "I"); - static jfieldID offset_fid = env->GetFieldID(Chunk_class, "offset", "I"); - static jfieldID type_fid = env->GetFieldID(Chunk_class, "type", "I"); - // Create a byte[] corresponding to 'buf'. ScopedLocalRef dataArray(env, env->NewByteArray(dataLen)); if (dataArray.get() == NULL) { @@ -2352,7 +2345,9 @@ bool Dbg::DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, } // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)". - ScopedLocalRef chunk(env, env->CallStaticObjectMethod(DdmServer_class, dispatch_mid, type, dataArray.get(), offset, length)); + ScopedLocalRef chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, + WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch, + type, dataArray.get(), offset, length)); if (env->ExceptionCheck()) { LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type); env->ExceptionDescribe(); @@ -2376,10 +2371,10 @@ bool Dbg::DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, * * So we're pretty much stuck with copying data around multiple times. */ - ScopedLocalRef replyData(env, reinterpret_cast(env->GetObjectField(chunk.get(), data_fid))); - length = env->GetIntField(chunk.get(), length_fid); - offset = env->GetIntField(chunk.get(), offset_fid); - type = env->GetIntField(chunk.get(), type_fid); + ScopedLocalRef replyData(env, reinterpret_cast(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data))); + length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length); + offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset); + type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type); VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length); if (length == 0 || replyData.get() == NULL) { @@ -2418,10 +2413,10 @@ void Dbg::DdmBroadcast(bool connect) { } JNIEnv* env = self->GetJniEnv(); - static jclass DdmServer_class = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer"); - static jmethodID broadcast_mid = env->GetStaticMethodID(DdmServer_class, "broadcast", "(I)V"); jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/; - env->CallStaticVoidMethod(DdmServer_class, broadcast_mid, event); + env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, + WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast, + event); if (env->ExceptionCheck()) { LOG(ERROR) << "DdmServer.broadcast " << event << " failed"; env->ExceptionDescribe(); diff --git a/src/dex_file.cc b/src/dex_file.cc index 78f0fdc67b..f18b21556e 100644 --- a/src/dex_file.cc +++ b/src/dex_file.cc @@ -38,6 +38,7 @@ #include "UniquePtr.h" #include "utf.h" #include "utils.h" +#include "well_known_classes.h" #include "zip_archive.h" namespace art { @@ -234,19 +235,11 @@ jobject DexFile::GetDexObject(JNIEnv* env) const { return NULL; } - jclass c = env->FindClass("com/android/dex/Dex"); - if (c == NULL) { - return NULL; - } - - jmethodID mid = env->GetStaticMethodID(c, "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;"); - if (mid == NULL) { - return NULL; - } - jvalue args[1]; args[0].l = byte_buffer; - jobject local = env->CallStaticObjectMethodA(c, mid, args); + jobject local = env->CallStaticObjectMethodA(WellKnownClasses::com_android_dex_Dex, + WellKnownClasses::com_android_dex_Dex_create, + args); if (local == NULL) { return NULL; } diff --git a/src/heap.cc b/src/heap.cc index 02e3ac74ea..bc59cfc9be 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -35,6 +35,7 @@ #include "thread_list.h" #include "timing_logger.h" #include "UniquePtr.h" +#include "well_known_classes.h" namespace art { @@ -854,9 +855,7 @@ void Heap::RequestHeapTrim() { return; } JNIEnv* env = Thread::Current()->GetJniEnv(); - static jclass Daemons_class = CacheClass(env, "java/lang/Daemons"); - static jmethodID Daemons_requestHeapTrim = env->GetStaticMethodID(Daemons_class, "requestHeapTrim", "()V"); - env->CallStaticVoidMethod(Daemons_class, Daemons_requestHeapTrim); + env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_requestHeapTrim); CHECK(!env->ExceptionCheck()); } diff --git a/src/jni_internal.cc b/src/jni_internal.cc index 61f7c3c227..03d668c0c0 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -37,6 +37,7 @@ #include "stringpiece.h" #include "thread.h" #include "UniquePtr.h" +#include "well_known_classes.h" namespace art { @@ -55,6 +56,16 @@ static size_t gGlobalsMax = 51200; // Arbitrary sanity check. static const size_t kWeakGlobalsInitial = 16; // Arbitrary. static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. +void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, size_t method_count) { + ScopedLocalRef c(env, env->FindClass(jni_class_name)); + if (c.get() == NULL) { + LOG(FATAL) << "Couldn't find class: " << jni_class_name; + } + if (env->RegisterNatives(c.get(), methods, method_count) != JNI_OK) { + LOG(FATAL) << "Failed to register natives methods: " << jni_class_name; + } +} + void SetJniGlobalsMax(size_t max) { if (max != 0) { gGlobalsMax = max; @@ -511,17 +522,6 @@ static void SetPrimitiveArrayRegion(ScopedJniThreadState& ts, JavaArrayT java_ar } } -static jclass InitDirectByteBufferClass(JNIEnv* env) { - ScopedLocalRef buffer_class(env, env->FindClass("java/nio/ReadWriteDirectByteBuffer")); - CHECK(buffer_class.get() != NULL); - return reinterpret_cast(env->NewGlobalRef(buffer_class.get())); -} - -static jclass GetDirectByteBufferClass(JNIEnv* env) { - static jclass buffer_class = InitDirectByteBufferClass(env); - return buffer_class; -} - static jint JII_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) { if (vm == NULL || p_env == NULL) { return JNI_ERR; @@ -2315,32 +2315,26 @@ class JNI { CHECK(address != NULL); // TODO: ReportJniError CHECK_GT(capacity, 0); // TODO: ReportJniError - jclass buffer_class = GetDirectByteBufferClass(env); - jmethodID mid = env->GetMethodID(buffer_class, "", "(II)V"); - if (mid == NULL) { - return NULL; - } - // At the moment, the Java side is limited to 32 bits. CHECK_LE(reinterpret_cast(address), 0xffffffff); CHECK_LE(capacity, 0xffffffff); jint address_arg = reinterpret_cast(address); jint capacity_arg = static_cast(capacity); - jobject result = env->NewObject(buffer_class, mid, address_arg, capacity_arg); + jobject result = env->NewObject(WellKnownClasses::java_nio_ReadWriteDirectByteBuffer, + WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_init, + address_arg, capacity_arg); return ts.Self()->IsExceptionPending() ? NULL : result; } static void* GetDirectBufferAddress(JNIEnv* env, jobject java_buffer) { ScopedJniThreadState ts(env); - static jfieldID fid = env->GetFieldID(GetDirectByteBufferClass(env), "effectiveDirectAddress", "I"); - return reinterpret_cast(env->GetIntField(java_buffer, fid)); + return reinterpret_cast(env->GetIntField(java_buffer, WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_effectiveDirectAddress)); } static jlong GetDirectBufferCapacity(JNIEnv* env, jobject java_buffer) { ScopedJniThreadState ts(env); - static jfieldID fid = env->GetFieldID(GetDirectByteBufferClass(env), "capacity", "I"); - return static_cast(env->GetIntField(java_buffer, fid)); + return static_cast(env->GetIntField(java_buffer, WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_capacity)); } static jobjectRefType GetObjectRefType(JNIEnv* env, jobject java_object) { @@ -3011,14 +3005,6 @@ void JavaVMExt::VisitRoots(Heap::RootVisitor* visitor, void* arg) { // The weak_globals table is visited by the GC itself (because it mutates the table). } -jclass CacheClass(JNIEnv* env, const char* jni_class_name) { - ScopedLocalRef c(env, env->FindClass(jni_class_name)); - if (c.get() == NULL) { - return NULL; - } - return reinterpret_cast(env->NewGlobalRef(c.get())); -} - } // namespace art std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs) { diff --git a/src/jni_internal.h b/src/jni_internal.h index 0d97aa495c..2964bba302 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -29,6 +29,13 @@ #include #include +#ifndef NATIVE_METHOD +#define NATIVE_METHOD(className, functionName, signature) \ + { #functionName, signature, reinterpret_cast(className ## _ ## functionName) } +#endif +#define REGISTER_NATIVE_METHODS(jni_class_name) \ + RegisterNativeMethods(env, jni_class_name, gMethods, arraysize(gMethods)) + namespace art { class ClassLoader; @@ -41,7 +48,7 @@ class Thread; void SetJniGlobalsMax(size_t max); void JniAbort(const char* jni_function_name); void* FindNativeMethod(Thread* thread); -jclass CacheClass(JNIEnv* env, const char* jni_class_name); +void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, size_t method_count); template T Decode(JNIEnv*, jobject); template T AddLocalReference(JNIEnv*, const Object*); diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 96a1ac1636..afa09b315d 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -16,20 +16,19 @@ #include -#include "class_loader.h" #include "class_linker.h" +#include "class_loader.h" #include "dex_file.h" #include "image.h" +#include "jni_internal.h" #include "logging.h" #include "os.h" #include "runtime.h" -#include "space.h" -#include "zip_archive.h" -#include "toStringArray.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. +#include "space.h" +#include "toStringArray.h" +#include "zip_archive.h" namespace art { @@ -98,23 +97,23 @@ static jint DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jst } if (dex_file == NULL) { LOG(WARNING) << "Failed to open dex file: " << source; - jniThrowExceptionFmt(env, "java/io/IOException", "unable to open dex file: %s", - source.c_str()); + Thread::Current()->ThrowNewExceptionF("Ljava/io/IOException;", "Unable to open dex file: %s", + source.c_str()); return 0; } return static_cast(reinterpret_cast(dex_file)); } -static const DexFile* toDexFile(JNIEnv* env, int dex_file_address) { +static const DexFile* toDexFile(int dex_file_address) { const DexFile* dex_file = reinterpret_cast(static_cast(dex_file_address)); if (dex_file == NULL) { - jniThrowNullPointerException(env, "dex_file == null"); + Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", "dex_file == null"); } return dex_file; } -static void DexFile_closeDexFile(JNIEnv* env, jclass, jint cookie) { - const DexFile* dex_file = toDexFile(env, cookie); +static void DexFile_closeDexFile(JNIEnv*, jclass, jint cookie) { + const DexFile* dex_file = toDexFile(cookie); if (dex_file == NULL) { return; } @@ -127,7 +126,7 @@ static void DexFile_closeDexFile(JNIEnv* env, jclass, jint cookie) { static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader, jint cookie) { ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - const DexFile* dex_file = toDexFile(env, cookie); + const DexFile* dex_file = toDexFile(cookie); if (dex_file == NULL) { return NULL; } @@ -150,7 +149,7 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j } static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jint cookie) { - const DexFile* dex_file = toDexFile(env, cookie); + const DexFile* dex_file = toDexFile(cookie); if (dex_file == NULL) { return NULL; } @@ -175,7 +174,7 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename if (!OS::FileExists(filename.c_str())) { LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename.c_str() << "' does not exist"; - jniThrowExceptionFmt(env, "java/io/FileNotFoundException", "%s", filename.c_str()); + Thread::Current()->ThrowNewExceptionF("Ljava/io/FileNotFoundException;", "%s", filename.c_str()); return JNI_TRUE; } @@ -265,7 +264,7 @@ static JNINativeMethod gMethods[] = { }; void register_dalvik_system_DexFile(JNIEnv* env) { - jniRegisterNativeMethods(env, "dalvik/system/DexFile", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("dalvik/system/DexFile"); } } // namespace art diff --git a/src/native/dalvik_system_VMDebug.cc b/src/native/dalvik_system_VMDebug.cc index 49ef593a56..bb6f8bc486 100644 --- a/src/native/dalvik_system_VMDebug.cc +++ b/src/native/dalvik_system_VMDebug.cc @@ -14,18 +14,16 @@ * limitations under the License. */ +#include +#include + #include "class_linker.h" #include "debugger.h" -#include "jni_internal.h" -#include "trace.h" #include "hprof/hprof.h" +#include "jni_internal.h" #include "ScopedUtfChars.h" #include "toStringArray.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - -#include -#include +#include "trace.h" namespace art { @@ -66,7 +64,7 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF int fd = dup(originalFd); if (fd < 0) { - jniThrowExceptionFmt(env, "java/lang/RuntimeException", "dup(%d) failed: %s", originalFd, strerror(errno)); + Thread::Current()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", "dup(%d) failed: %s", originalFd, strerror(errno)); return; } @@ -115,20 +113,20 @@ static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) { return Dbg::LastDebuggerActivity(); } -static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) { - jniThrowException(env, "java/lang/UnsupportedOperationException", NULL); +static void VMDebug_startInstructionCounting(JNIEnv*, jclass) { + Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_stopInstructionCounting(JNIEnv* env, jclass) { - jniThrowException(env, "java/lang/UnsupportedOperationException", NULL); +static void VMDebug_stopInstructionCounting(JNIEnv*, jclass) { + Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_getInstructionCount(JNIEnv* env, jclass, jintArray /*javaCounts*/) { - jniThrowException(env, "java/lang/UnsupportedOperationException", NULL); +static void VMDebug_getInstructionCount(JNIEnv*, jclass, jintArray /*javaCounts*/) { + Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_resetInstructionCount(JNIEnv* env, jclass) { - jniThrowException(env, "java/lang/UnsupportedOperationException", NULL); +static void VMDebug_resetInstructionCount(JNIEnv*, jclass) { + Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } static void VMDebug_printLoadedClasses(JNIEnv*, jclass, jint flags) { @@ -156,7 +154,7 @@ static jlong VMDebug_threadCpuTimeNanos(JNIEnv*, jclass) { static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) { // Only one of these may be NULL. if (javaFilename == NULL && javaFd == NULL) { - jniThrowNullPointerException(env, "fileName == null && fd == null"); + Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "fileName == null && fd == null"); return; } @@ -175,7 +173,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job if (javaFd != NULL) { fd = jniGetFDFromFileDescriptor(env, javaFd); if (fd < 0) { - jniThrowException(env, "Ljava/lang/RuntimeException;", "Invalid file descriptor"); + Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", "Invalid file descriptor"); return; } } @@ -183,16 +181,16 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job int result = hprof::DumpHeap(filename.c_str(), fd, false); if (result != 0) { // TODO: ideally we'd throw something more specific based on actual failure - jniThrowException(env, "Ljava/lang/RuntimeException;", "Failure during heap dump; check log output for details"); + Thread::Current()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", "Failure during heap dump; check log output for details: %d", result); return; } } -static void VMDebug_dumpHprofDataDdms(JNIEnv* env, jclass) { +static void VMDebug_dumpHprofDataDdms(JNIEnv*, jclass) { int result = hprof::DumpHeap("[DDMS]", -1, true); if (result != 0) { // TODO: ideally we'd throw something more specific based on actual failure - jniThrowException(env, "Ljava/lang/RuntimeException;", "Failure during heap dump; check log output for details"); + Thread::Current()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", "Failure during heap dump; check log output for details: %d", result); return; } } @@ -255,7 +253,7 @@ static JNINativeMethod gMethods[] = { }; void register_dalvik_system_VMDebug(JNIEnv* env) { - jniRegisterNativeMethods(env, "dalvik/system/VMDebug", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("dalvik/system/VMDebug"); } } // namespace art diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index caac28341f..fbc2a019e0 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include + #include "class_linker.h" #include "debugger.h" #include "jni_internal.h" @@ -24,12 +26,8 @@ #include "space.h" #include "thread.h" #include "thread_list.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. #include "toStringArray.h" -#include - namespace art { static jfloat VMRuntime_getTargetHeapUtilization(JNIEnv*, jobject) { @@ -187,7 +185,7 @@ static JNINativeMethod gMethods[] = { }; void register_dalvik_system_VMRuntime(JNIEnv* env) { - jniRegisterNativeMethods(env, "dalvik/system/VMRuntime", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("dalvik/system/VMRuntime"); } } // namespace art diff --git a/src/native/dalvik_system_VMStack.cc b/src/native/dalvik_system_VMStack.cc index e0862c3788..ab82694056 100644 --- a/src/native/dalvik_system_VMStack.cc +++ b/src/native/dalvik_system_VMStack.cc @@ -14,16 +14,14 @@ * limitations under the License. */ -#include "jni_internal.h" #include "class_loader.h" +#include "jni_internal.h" #include "nth_caller_visitor.h" #include "object.h" #include "scoped_heap_lock.h" #include "scoped_thread_list_lock.h" #include "thread_list.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jobject GetThreadStack(JNIEnv* env, jobject javaThread) { @@ -99,7 +97,7 @@ static JNINativeMethod gMethods[] = { }; void register_dalvik_system_VMStack(JNIEnv* env) { - jniRegisterNativeMethods(env, "dalvik/system/VMStack", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("dalvik/system/VMStack"); } } // namespace art diff --git a/src/native/dalvik_system_Zygote.cc b/src/native/dalvik_system_Zygote.cc index b4f69fbeb9..65fadf26b8 100644 --- a/src/native/dalvik_system_Zygote.cc +++ b/src/native/dalvik_system_Zygote.cc @@ -25,7 +25,6 @@ #include "cutils/sched_policy.h" #include "debugger.h" #include "jni_internal.h" -#include "JniConstants.h" #include "JNIHelp.h" #include "ScopedLocalRef.h" #include "ScopedPrimitiveArray.h" @@ -375,7 +374,7 @@ static JNINativeMethod gMethods[] = { }; void register_dalvik_system_Zygote(JNIEnv* env) { - jniRegisterNativeMethods(env, "dalvik/system/Zygote", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("dalvik/system/Zygote"); } } // namespace art diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index 2cd70900b1..66c83c78c6 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -14,16 +14,15 @@ * limitations under the License. */ -#include "jni_internal.h" #include "class_linker.h" #include "class_loader.h" +#include "jni_internal.h" #include "nth_caller_visitor.h" #include "object.h" #include "object_utils.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. +#include "well_known_classes.h" namespace art { @@ -62,9 +61,9 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean if (c == NULL) { ScopedLocalRef cause(env, env->ExceptionOccurred()); env->ExceptionClear(); - static jclass ClassNotFoundException_class = CacheClass(env, "java/lang/ClassNotFoundException"); - static jmethodID ctor = env->GetMethodID(ClassNotFoundException_class, "", "(Ljava/lang/String;Ljava/lang/Throwable;)V"); - jthrowable cnfe = reinterpret_cast(env->NewObject(ClassNotFoundException_class, ctor, javaName, cause.get())); + jthrowable cnfe = reinterpret_cast(env->NewObject(WellKnownClasses::java_lang_ClassNotFoundException, + WellKnownClasses::java_lang_ClassNotFoundException_init, + javaName, cause.get())); env->Throw(cnfe); return NULL; } @@ -89,8 +88,8 @@ static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { template static jobjectArray ToArray(JNIEnv* env, const char* array_class_name, const std::vector& objects) { - jclass array_class = env->FindClass(array_class_name); - jobjectArray result = env->NewObjectArray(objects.size(), array_class, NULL); + ScopedLocalRef array_class(env, env->FindClass(array_class_name)); + jobjectArray result = env->NewObjectArray(objects.size(), array_class.get(), NULL); for (size_t i = 0; i < objects.size(); ++i) { ScopedLocalRef object(env, AddLocalReference(env, objects[i])); env->SetObjectArrayElement(result, i, object.get()); @@ -472,7 +471,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_Class(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/Class", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/Class"); } } // namespace art diff --git a/src/native/java_lang_Object.cc b/src/native/java_lang_Object.cc index 7c79fc1aed..51e4581de0 100644 --- a/src/native/java_lang_Object.cc +++ b/src/native/java_lang_Object.cc @@ -17,8 +17,6 @@ #include "jni_internal.h" #include "object.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jobject Object_internalClone(JNIEnv* env, jobject javaThis) { @@ -50,7 +48,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_Object(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/Object", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/Object"); } } // namespace art diff --git a/src/native/java_lang_Runtime.cc b/src/native/java_lang_Runtime.cc index 1b5520b8de..b7e81af806 100644 --- a/src/native/java_lang_Runtime.cc +++ b/src/native/java_lang_Runtime.cc @@ -14,15 +14,13 @@ * limitations under the License. */ -#include #include +#include #include "heap.h" #include "jni_internal.h" #include "object.h" #include "runtime.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. #include "ScopedUtfChars.h" namespace art { @@ -77,16 +75,16 @@ static jlong Runtime_freeMemory(JNIEnv*, jclass) { } static JNINativeMethod gMethods[] = { - NATIVE_METHOD(Runtime, freeMemory, "()J"), - NATIVE_METHOD(Runtime, gc, "()V"), - NATIVE_METHOD(Runtime, maxMemory, "()J"), - NATIVE_METHOD(Runtime, nativeExit, "(IZ)V"), - NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;)Ljava/lang/String;"), - NATIVE_METHOD(Runtime, totalMemory, "()J"), + NATIVE_METHOD(Runtime, freeMemory, "()J"), + NATIVE_METHOD(Runtime, gc, "()V"), + NATIVE_METHOD(Runtime, maxMemory, "()J"), + NATIVE_METHOD(Runtime, nativeExit, "(IZ)V"), + NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;)Ljava/lang/String;"), + NATIVE_METHOD(Runtime, totalMemory, "()J"), }; void register_java_lang_Runtime(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/Runtime", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/Runtime"); } } // namespace art diff --git a/src/native/java_lang_String.cc b/src/native/java_lang_String.cc index 49bf234e00..f8fb4a745e 100644 --- a/src/native/java_lang_String.cc +++ b/src/native/java_lang_String.cc @@ -17,8 +17,6 @@ #include "jni_internal.h" #include "object.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - #ifdef HAVE__MEMCMP16 // "count" is in 16-bit units. extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count); @@ -108,7 +106,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_String(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/String", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/String"); } } // namespace art diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index 741b319718..b0d1eec6dc 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -17,8 +17,6 @@ #include "jni_internal.h" #include "object.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - /* * We make guarantees about the atomicity of accesses to primitive * variables. These guarantees also apply to elements of arrays. @@ -253,7 +251,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_System(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/System", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/System"); } } // namespace art diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index 197c1b98e3..e009157d58 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -22,8 +22,6 @@ #include "thread.h" #include "thread_list.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jobject Thread_currentThread(JNIEnv* env, jclass) { @@ -145,7 +143,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_Thread(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/Thread", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/Thread"); } } // namespace art diff --git a/src/native/java_lang_Throwable.cc b/src/native/java_lang_Throwable.cc index 1d730273f3..625a34b067 100644 --- a/src/native/java_lang_Throwable.cc +++ b/src/native/java_lang_Throwable.cc @@ -17,8 +17,6 @@ #include "jni_internal.h" #include "thread.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) { @@ -39,7 +37,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_Throwable(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/Throwable", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/Throwable"); } } // namespace art diff --git a/src/native/java_lang_VMClassLoader.cc b/src/native/java_lang_VMClassLoader.cc index 3d96c472c5..a976933087 100644 --- a/src/native/java_lang_VMClassLoader.cc +++ b/src/native/java_lang_VMClassLoader.cc @@ -19,8 +19,6 @@ #include "ScopedUtfChars.h" #include "zip_archive.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) { @@ -91,7 +89,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_VMClassLoader(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/VMClassLoader", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/VMClassLoader"); } } // namespace art diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index 1c86aacf8b..ea635d3647 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -14,13 +14,11 @@ * limitations under the License. */ -#include "jni_internal.h" #include "class_linker.h" +#include "jni_internal.h" #include "object.h" #include "object_utils.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { // Recursively create an array with multiple dimensions. Elements may be @@ -150,7 +148,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_reflect_Array(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/reflect/Array", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/reflect/Array"); } } // namespace art diff --git a/src/native/java_lang_reflect_Constructor.cc b/src/native/java_lang_reflect_Constructor.cc index 5d0434d4c7..1094d06334 100644 --- a/src/native/java_lang_reflect_Constructor.cc +++ b/src/native/java_lang_reflect_Constructor.cc @@ -14,14 +14,12 @@ * limitations under the License. */ -#include "jni_internal.h" #include "class_linker.h" +#include "jni_internal.h" #include "object.h" #include "object_utils.h" #include "reflection.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { /* @@ -63,7 +61,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_reflect_Constructor(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/reflect/Constructor", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/reflect/Constructor"); } } // namespace art diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index 3e0c9d7377..bd33c0ebf2 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -14,14 +14,12 @@ * limitations under the License. */ -#include "jni_internal.h" #include "class_linker.h" +#include "jni_internal.h" #include "object.h" #include "object_utils.h" #include "reflection.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static bool GetFieldValue(Object* o, Field* f, JValue& value, bool allow_references) { @@ -79,7 +77,7 @@ static bool CheckReceiver(JNIEnv* env, jobject javaObj, Field* f, Object*& o) { o = Decode(env, javaObj); Class* declaringClass = f->GetDeclaringClass(); - if (!VerifyObjectInClass(env, o, declaringClass)) { + if (!VerifyObjectInClass(o, declaringClass)) { return false; } return true; @@ -323,7 +321,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_reflect_Field(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/reflect/Field", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/reflect/Field"); } } // namespace art diff --git a/src/native/java_lang_reflect_Method.cc b/src/native/java_lang_reflect_Method.cc index a5a705bab7..bf5c850399 100644 --- a/src/native/java_lang_reflect_Method.cc +++ b/src/native/java_lang_reflect_Method.cc @@ -14,14 +14,12 @@ * limitations under the License. */ -#include "jni_internal.h" #include "class_linker.h" +#include "jni_internal.h" #include "object.h" #include "object_utils.h" #include "reflection.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobject javaArgs) { @@ -60,7 +58,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_reflect_Method(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/reflect/Method", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/reflect/Method"); } } // namespace art diff --git a/src/native/java_lang_reflect_Proxy.cc b/src/native/java_lang_reflect_Proxy.cc index bac20b05da..eca6c32574 100644 --- a/src/native/java_lang_reflect_Proxy.cc +++ b/src/native/java_lang_reflect_Proxy.cc @@ -14,11 +14,9 @@ * limitations under the License. */ +#include "class_linker.h" #include "jni_internal.h" #include "object.h" -#include "class_linker.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. namespace art { @@ -40,7 +38,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_lang_reflect_Proxy(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/lang/reflect/Proxy", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/lang/reflect/Proxy"); } } // namespace art diff --git a/src/native/java_util_concurrent_atomic_AtomicLong.cc b/src/native/java_util_concurrent_atomic_AtomicLong.cc index 118b9e40ed..7caa23fc63 100644 --- a/src/native/java_util_concurrent_atomic_AtomicLong.cc +++ b/src/native/java_util_concurrent_atomic_AtomicLong.cc @@ -17,8 +17,6 @@ #include "jni_internal.h" #include "object.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jboolean AtomicLong_VMSupportsCS8(JNIEnv*, jclass) { @@ -30,7 +28,7 @@ static JNINativeMethod gMethods[] = { }; void register_java_util_concurrent_atomic_AtomicLong(JNIEnv* env) { - jniRegisterNativeMethods(env, "java/util/concurrent/atomic/AtomicLong", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("java/util/concurrent/atomic/AtomicLong"); } } // namespace art diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc index ed36590401..e3c31b01eb 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc @@ -16,9 +16,7 @@ #include "debugger.h" #include "logging.h" - -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. -#include "ScopedPrimitiveArray.h" // Last to avoid problems with LOG redefinition. +#include "ScopedPrimitiveArray.h" namespace art { @@ -34,7 +32,7 @@ static JNINativeMethod gMethods[] = { }; void register_org_apache_harmony_dalvik_ddmc_DdmServer(JNIEnv* env) { - jniRegisterNativeMethods(env, "org/apache/harmony/dalvik/ddmc/DdmServer", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("org/apache/harmony/dalvik/ddmc/DdmServer"); } } // namespace art diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 88416af9b2..3766546ad6 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -15,15 +15,14 @@ */ #include "debugger.h" +#include "jni_internal.h" #include "logging.h" #include "scoped_heap_lock.h" #include "scoped_thread_list_lock.h" +#include "ScopedPrimitiveArray.h" #include "stack.h" #include "thread_list.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. -#include "ScopedPrimitiveArray.h" // Last to avoid problems with LOG redefinition. - namespace art { static void DdmVmInternal_enableRecentAllocations(JNIEnv*, jclass, jboolean enable) { @@ -162,7 +161,7 @@ static JNINativeMethod gMethods[] = { }; void register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(JNIEnv* env) { - jniRegisterNativeMethods(env, "org/apache/harmony/dalvik/ddmc/DdmVmInternal", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("org/apache/harmony/dalvik/ddmc/DdmVmInternal"); } } // namespace art diff --git a/src/native/sun_misc_Unsafe.cc b/src/native/sun_misc_Unsafe.cc index 8cc549acfd..360f241f19 100644 --- a/src/native/sun_misc_Unsafe.cc +++ b/src/native/sun_misc_Unsafe.cc @@ -17,8 +17,6 @@ #include "jni_internal.h" #include "object.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { static jlong Unsafe_objectFieldOffset0(JNIEnv* env, jclass, jobject javaField) { @@ -187,7 +185,7 @@ static JNINativeMethod gMethods[] = { }; void register_sun_misc_Unsafe(JNIEnv* env) { - jniRegisterNativeMethods(env, "sun/misc/Unsafe", gMethods, NELEM(gMethods)); + REGISTER_NATIVE_METHODS("sun/misc/Unsafe"); } } // namespace art diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc index 66d0e2c93b..26f61cf6dd 100644 --- a/src/oat/runtime/support_proxy.cc +++ b/src/oat/runtime/support_proxy.cc @@ -19,6 +19,7 @@ #include "reflection.h" #include "runtime_support.h" #include "thread.h" +#include "well_known_classes.h" #include "ScopedLocalRef.h" @@ -161,19 +162,10 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, param_index++; } // Get the InvocationHandler method and the field that holds it within the Proxy object - static jmethodID inv_hand_invoke_mid = NULL; - static jfieldID proxy_inv_hand_fid = NULL; - if (proxy_inv_hand_fid == NULL) { - ScopedLocalRef proxy(env, env->FindClass("java/lang/reflect/Proxy")); - proxy_inv_hand_fid = env->GetFieldID(proxy.get(), "h", "Ljava/lang/reflect/InvocationHandler;"); - ScopedLocalRef inv_hand_class(env, env->FindClass("java/lang/reflect/InvocationHandler")); - inv_hand_invoke_mid = env->GetMethodID(inv_hand_class.get(), "invoke", - "(Ljava/lang/Object;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;"); - } - DCHECK(env->IsInstanceOf(rcvr_jobj, env->FindClass("java/lang/reflect/Proxy"))); - jobject inv_hand = env->GetObjectField(rcvr_jobj, proxy_inv_hand_fid); + DCHECK(env->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); + jobject inv_hand = env->GetObjectField(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy_h); // Call InvocationHandler.invoke - jobject result = env->CallObjectMethodA(inv_hand, inv_hand_invoke_mid, args_jobj); + jobject result = env->CallObjectMethodA(inv_hand, WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, args_jobj); // Place result in stack args if (!self->IsExceptionPending()) { Object* result_ref = self->DecodeJObject(result); diff --git a/src/oatexec.cc b/src/oatexec.cc index d080560676..e8e2050f56 100644 --- a/src/oatexec.cc +++ b/src/oatexec.cc @@ -21,12 +21,13 @@ #include #include -#include "ScopedLocalRef.h" -#include "UniquePtr.h" #include "jni.h" #include "logging.h" -#include "toStringArray.h" #include "object.h" +#include "ScopedLocalRef.h" +#include "toStringArray.h" +#include "UniquePtr.h" +#include "well_known_classes.h" namespace art { @@ -39,17 +40,12 @@ static bool IsMethodPublic(JNIEnv* env, jclass c, jmethodID method_id) { } // We now have a Method instance. We need to call its // getModifiers() method. - ScopedLocalRef method(env, env->FindClass("java/lang/reflect/Method")); - if (method.get() == NULL) { - fprintf(stderr, "Failed to find class Method\n"); - return false; - } - jmethodID get_modifiers = env->GetMethodID(method.get(), "getModifiers", "()I"); - if (get_modifiers == NULL) { - fprintf(stderr, "Failed to find reflect.Method.getModifiers\n"); + jmethodID mid = env->GetMethodID(WellKnownClasses::java_lang_reflect_Method, "getModifiers", "()I"); + if (mid == NULL) { + fprintf(stderr, "Failed to find java.lang.reflect.Method.getModifiers\n"); return false; } - int modifiers = env->CallIntMethod(reflected.get(), get_modifiers); + int modifiers = env->CallIntMethod(reflected.get(), mid); if ((modifiers & kAccPublic) == 0) { return false; } diff --git a/src/reflection.cc b/src/reflection.cc index 010211edb1..2b72944297 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -21,8 +21,6 @@ #include "object.h" #include "object_utils.h" -#include "JniConstants.h" // Last to avoid problems with LOG redefinition. - namespace art { Method* gBoolean_valueOf; @@ -62,7 +60,7 @@ jobject InvokeMethod(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobj if (!m->IsStatic()) { // Check that the receiver is non-null and an instance of the field's declaring class. receiver = Decode(env, javaReceiver); - if (!VerifyObjectInClass(env, receiver, declaring_class)) { + if (!VerifyObjectInClass(receiver, declaring_class)) { return NULL; } @@ -117,18 +115,18 @@ jobject InvokeMethod(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobj return AddLocalReference(env, value.GetL()); } -bool VerifyObjectInClass(JNIEnv* env, Object* o, Class* c) { +bool VerifyObjectInClass(Object* o, Class* c) { const char* exception = NULL; if (o == NULL) { - exception = "java/lang/NullPointerException"; + exception = "Ljava/lang/NullPointerException;"; } else if (!o->InstanceOf(c)) { - exception = "java/lang/IllegalArgumentException"; + exception = "Ljava/lang/IllegalArgumentException;"; } if (exception != NULL) { std::string expected_class_name(PrettyDescriptor(c)); std::string actual_class_name(PrettyTypeOf(o)); - jniThrowExceptionFmt(env, exception, "expected receiver of type %s, but got %s", - expected_class_name.c_str(), actual_class_name.c_str()); + Thread::Current()->ThrowNewExceptionF(exception, "expected receiver of type %s, but got %s", + expected_class_name.c_str(), actual_class_name.c_str()); return false; } return true; diff --git a/src/reflection.h b/src/reflection.h index 50c36d7a6c..6b47440d99 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -38,7 +38,7 @@ bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, jobject InvokeMethod(JNIEnv* env, jobject method, jobject receiver, jobject args); -bool VerifyObjectInClass(JNIEnv* env, Object* o, Class* c); +bool VerifyObjectInClass(Object* o, Class* c); } // namespace art diff --git a/src/runtime.cc b/src/runtime.cc index e03305973d..226334eaea 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -43,9 +43,9 @@ #include "trace.h" #include "UniquePtr.h" #include "verifier/method_verifier.h" +#include "well_known_classes.h" -// TODO: this drags in cutil/log.h, which conflicts with our logging.h. -#include "JniConstants.h" +#include "JniConstants.h" // Last to avoid LOG redefinition in ics-mr1-plus-art. namespace art { @@ -516,21 +516,17 @@ void CreateSystemClassLoader() { CHECK_EQ(self->GetState(), kNative); JNIEnv* env = self->GetJniEnv(); - ScopedLocalRef ClassLoader_class(env, env->FindClass("java/lang/ClassLoader")); - CHECK(ClassLoader_class.get() != NULL); - jmethodID getSystemClassLoader = env->GetStaticMethodID(ClassLoader_class.get(), + jmethodID getSystemClassLoader = env->GetStaticMethodID(WellKnownClasses::java_lang_ClassLoader, "getSystemClassLoader", "()Ljava/lang/ClassLoader;"); CHECK(getSystemClassLoader != NULL); - ScopedLocalRef class_loader(env, env->CallStaticObjectMethod(ClassLoader_class.get(), + ScopedLocalRef class_loader(env, env->CallStaticObjectMethod(WellKnownClasses::java_lang_ClassLoader, getSystemClassLoader)); CHECK(class_loader.get() != NULL); Thread::Current()->SetClassLoaderOverride(Decode(env, class_loader.get())); - ScopedLocalRef Thread_class(env, env->FindClass("java/lang/Thread")); - CHECK(Thread_class.get() != NULL); - jfieldID contextClassLoader = env->GetFieldID(Thread_class.get(), + jfieldID contextClassLoader = env->GetFieldID(WellKnownClasses::java_lang_Thread, "contextClassLoader", "Ljava/lang/ClassLoader;"); CHECK(contextClassLoader != NULL); @@ -595,11 +591,7 @@ void Runtime::StartDaemonThreads() { CHECK_EQ(self->GetState(), kNative); JNIEnv* env = self->GetJniEnv(); - ScopedLocalRef c(env, env->FindClass("java/lang/Daemons")); - CHECK(c.get() != NULL); - jmethodID mid = env->GetStaticMethodID(c.get(), "start", "()V"); - CHECK(mid != NULL); - env->CallStaticVoidMethod(c.get(), mid); + env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_start); CHECK(!env->ExceptionCheck()); VLOG(startup) << "Runtime::StartDaemonThreads exiting"; @@ -708,6 +700,7 @@ void Runtime::InitNativeMethods() { // First set up JniConstants, which is used by both the runtime's built-in native // methods and libcore. JniConstants::init(env); + WellKnownClasses::Init(env); // Then set up the native methods provided by the runtime itself. RegisterRuntimeNativeMethods(env); diff --git a/src/runtime_support.cc b/src/runtime_support.cc index f8b40a258a..8d649a61df 100644 --- a/src/runtime_support.cc +++ b/src/runtime_support.cc @@ -17,6 +17,7 @@ #include "runtime_support.h" #include "ScopedLocalRef.h" +#include "well_known_classes.h" double art_l2d(int64_t l) { return (double) l; @@ -609,21 +610,15 @@ Class* ResolveVerifyAndClinit(uint32_t type_idx, const Method* referrer, Thread* } void ThrowNewUndeclaredThrowableException(Thread* self, JNIEnv* env, Throwable* exception) { - ScopedLocalRef jlr_UTE_class(env, - env->FindClass("java/lang/reflect/UndeclaredThrowableException")); - if (jlr_UTE_class.get() == NULL) { + jmethodID jlre_UTE_constructor = env->GetMethodID(WellKnownClasses::java_lang_reflect_UndeclaredThrowableException, "", + "(Ljava/lang/Throwable;)V"); + jthrowable jexception = AddLocalReference(env, exception); + ScopedLocalRef jlr_UTE(env, + reinterpret_cast(env->NewObject(WellKnownClasses::java_lang_reflect_UndeclaredThrowableException, + jlre_UTE_constructor, jexception))); + int rc = env->Throw(jlr_UTE.get()); + if (rc != JNI_OK) { LOG(ERROR) << "Couldn't throw new \"java/lang/reflect/UndeclaredThrowableException\""; - } else { - jmethodID jlre_UTE_constructor = env->GetMethodID(jlr_UTE_class.get(), "", - "(Ljava/lang/Throwable;)V"); - jthrowable jexception = AddLocalReference(env, exception); - ScopedLocalRef jlr_UTE(env, - reinterpret_cast(env->NewObject(jlr_UTE_class.get(), jlre_UTE_constructor, - jexception))); - int rc = env->Throw(jlr_UTE.get()); - if (rc != JNI_OK) { - LOG(ERROR) << "Couldn't throw new \"java/lang/reflect/UndeclaredThrowableException\""; - } } CHECK(self->IsExceptionPending()); } diff --git a/src/thread.cc b/src/thread.cc index 01e0771c35..a986ed541e 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -50,6 +50,7 @@ #include "thread_list.h" #include "utils.h" #include "verifier/gc_map.h" +#include "well_known_classes.h" namespace art { @@ -339,15 +340,16 @@ void Thread::CreatePeer(const char* name, bool as_daemon, Object* thread_group) jint thread_priority = GetNativePriority(); jboolean thread_is_daemon = as_daemon; - ScopedLocalRef c(env, env->FindClass("java/lang/Thread")); - ScopedLocalRef peer(env, env->AllocObject(c.get())); + ScopedLocalRef peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); peer_ = DecodeJObject(peer.get()); if (peer_ == NULL) { CHECK(IsExceptionPending()); return; } - jmethodID mid = env->GetMethodID(c.get(), "", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V"); - env->CallNonvirtualVoidMethod(peer.get(), c.get(), mid, java_thread_group.get(), thread_name.get(), thread_priority, thread_is_daemon); + env->CallNonvirtualVoidMethod(peer.get(), + WellKnownClasses::java_lang_Thread, + WellKnownClasses::java_lang_Thread_init, + java_thread_group.get(), thread_name.get(), thread_priority, thread_is_daemon); CHECK(!IsExceptionPending()) << " " << PrettyTypeOf(GetException()); SetVmData(peer_, Thread::Current()); @@ -524,11 +526,10 @@ void Thread::DumpState(std::ostream& os) const { os << " | schedstat=( " << scheduler_stats << " )" << " utm=" << utime << " stm=" << stime - << " core=" << task_cpu - << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; - - os << " | stackSize=" << PrettySize(stack_size_) - << " stack=" << reinterpret_cast(stack_begin_) << "-" << reinterpret_cast(stack_end_) << "\n"; + << " core=" << task_cpu; + os << " HZ=" << sysconf(_SC_CLK_TCK) << "\n" + << " | stack=" << reinterpret_cast(stack_begin_) << "-" << reinterpret_cast(stack_end_) + << " stackSize=" << PrettySize(stack_size_) << "\n"; } #if !defined(ART_USE_LLVM_COMPILER) diff --git a/src/well_known_classes.cc b/src/well_known_classes.cc new file mode 100644 index 0000000000..7e21a66603 --- /dev/null +++ b/src/well_known_classes.cc @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "well_known_classes.h" + +#include + +#include "logging.h" +#include "ScopedLocalRef.h" + +namespace art { + +jclass WellKnownClasses::com_android_dex_Dex; +jclass WellKnownClasses::java_lang_ClassLoader; +jclass WellKnownClasses::java_lang_ClassNotFoundException; +jclass WellKnownClasses::java_lang_Daemons; +jclass WellKnownClasses::java_lang_Error; +jclass WellKnownClasses::java_lang_ExceptionInInitializerError; +jclass WellKnownClasses::java_lang_reflect_InvocationHandler; +jclass WellKnownClasses::java_lang_reflect_Method; +jclass WellKnownClasses::java_lang_reflect_Proxy; +jclass WellKnownClasses::java_lang_reflect_UndeclaredThrowableException; +jclass WellKnownClasses::java_lang_Thread; +jclass WellKnownClasses::java_nio_ReadWriteDirectByteBuffer; +jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk; +jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer; + +jmethodID WellKnownClasses::com_android_dex_Dex_create; +jmethodID WellKnownClasses::java_lang_ClassNotFoundException_init; +jmethodID WellKnownClasses::java_lang_Daemons_requestHeapTrim; +jmethodID WellKnownClasses::java_lang_Daemons_start; +jmethodID WellKnownClasses::java_lang_reflect_InvocationHandler_invoke; +jmethodID WellKnownClasses::java_lang_Thread_init; +jmethodID WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_init; +jmethodID WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast; +jmethodID WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch; + +jfieldID WellKnownClasses::java_lang_reflect_Proxy_h; +jfieldID WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_capacity; +jfieldID WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_effectiveDirectAddress; +jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data; +jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length; +jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset; +jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type; + +static jclass CacheClass(JNIEnv* env, const char* jni_class_name) { + ScopedLocalRef c(env, env->FindClass(jni_class_name)); + if (c.get() == NULL) { + LOG(FATAL) << "Couldn't find class: " << jni_class_name; + } + return reinterpret_cast(env->NewGlobalRef(c.get())); +} + +static jfieldID CacheField(JNIEnv* env, jclass c, const char* name, const char* signature) { + jfieldID fid = env->GetFieldID(c, name, signature); + if (fid == NULL) { + LOG(FATAL) << "Couldn't find field \"" << name << "\" with signature \"" << signature << "\""; + } + return fid; +} + +static jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) { + jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) : env->GetMethodID(c, name, signature); + if (mid == NULL) { + LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\""; + } + return mid; +} + +void WellKnownClasses::Init(JNIEnv* env) { + com_android_dex_Dex = CacheClass(env, "com/android/dex/Dex"); + java_lang_ClassLoader = CacheClass(env, "java/lang/ClassLoader"); + java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException"); + java_lang_Daemons = CacheClass(env, "java/lang/Daemons"); + java_lang_Error = CacheClass(env, "java/lang/Error"); + java_lang_ExceptionInInitializerError = CacheClass(env, "java/lang/ExceptionInInitializerError"); + java_lang_reflect_InvocationHandler = CacheClass(env, "java/lang/reflect/InvocationHandler"); + java_lang_reflect_Method = CacheClass(env, "java/lang/reflect/Method"); + java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy"); + java_lang_reflect_UndeclaredThrowableException = CacheClass(env, "java/lang/reflect/UndeclaredThrowableException"); + java_lang_Thread = CacheClass(env, "java/lang/Thread"); + java_nio_ReadWriteDirectByteBuffer = CacheClass(env, "java/nio/ReadWriteDirectByteBuffer"); + org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk"); + org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer"); + + com_android_dex_Dex_create = CacheMethod(env, com_android_dex_Dex, true, "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;"); + java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "", "(Ljava/lang/String;Ljava/lang/Throwable;)V"); + java_lang_Daemons_requestHeapTrim = CacheMethod(env, java_lang_Daemons, true, "requestHeapTrim", "()V"); + java_lang_Daemons_start = CacheMethod(env, java_lang_Daemons, true, "start", "()V"); + java_lang_reflect_InvocationHandler_invoke = CacheMethod(env, java_lang_reflect_InvocationHandler, false, "invoke", "(Ljava/lang/Object;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;"); + java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V"); + java_nio_ReadWriteDirectByteBuffer_init = CacheMethod(env, java_nio_ReadWriteDirectByteBuffer, false, "", "(II)V"); + org_apache_harmony_dalvik_ddmc_DdmServer_broadcast = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "broadcast", "(I)V"); + org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;"); + + java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, "h", "Ljava/lang/reflect/InvocationHandler;"); + java_nio_ReadWriteDirectByteBuffer_capacity = CacheField(env, java_nio_ReadWriteDirectByteBuffer, "capacity", "I"); + java_nio_ReadWriteDirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_ReadWriteDirectByteBuffer, "effectiveDirectAddress", "I"); + org_apache_harmony_dalvik_ddmc_Chunk_data = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, "data", "[B"); + org_apache_harmony_dalvik_ddmc_Chunk_length = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, "length", "I"); + org_apache_harmony_dalvik_ddmc_Chunk_offset = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, "offset", "I"); + org_apache_harmony_dalvik_ddmc_Chunk_type = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, "type", "I"); +} + +} // namespace art diff --git a/src/well_known_classes.h b/src/well_known_classes.h new file mode 100644 index 0000000000..5d45938f9f --- /dev/null +++ b/src/well_known_classes.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_WELL_KNOWN_CLASSES_H_ +#define ART_SRC_WELL_KNOWN_CLASSES_H_ + +#include "jni.h" + +namespace art { + +// Various classes used in JNI. We cache them so we don't have to keep looking +// them up. Similar to libcore's JniConstants (except there's no overlap, so +// we keep them separate). + +struct WellKnownClasses { + static void Init(JNIEnv* env); + + static jclass com_android_dex_Dex; + static jclass java_lang_ClassLoader; + static jclass java_lang_ClassNotFoundException; + static jclass java_lang_Daemons; + static jclass java_lang_Error; + static jclass java_lang_ExceptionInInitializerError; + static jclass java_lang_reflect_InvocationHandler; + static jclass java_lang_reflect_Method; + static jclass java_lang_reflect_Proxy; + static jclass java_lang_reflect_UndeclaredThrowableException; + static jclass java_lang_Thread; + static jclass java_nio_ReadWriteDirectByteBuffer; + static jclass org_apache_harmony_dalvik_ddmc_Chunk; + static jclass org_apache_harmony_dalvik_ddmc_DdmServer; + + static jmethodID com_android_dex_Dex_create; + static jmethodID java_lang_ClassNotFoundException_init; + static jmethodID java_lang_Daemons_requestHeapTrim; + static jmethodID java_lang_Daemons_start; + static jmethodID java_lang_reflect_InvocationHandler_invoke; + static jmethodID java_lang_Thread_init; + static jmethodID java_nio_ReadWriteDirectByteBuffer_init; + static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_broadcast; + static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_dispatch; + + static jfieldID java_lang_reflect_Proxy_h; + static jfieldID java_nio_ReadWriteDirectByteBuffer_capacity; + static jfieldID java_nio_ReadWriteDirectByteBuffer_effectiveDirectAddress; + static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_data; + static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_length; + static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_offset; + static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_type; +}; + +} // namespace art + +#endif // ART_SRC_WELL_KNOWN_CLASSES_H_ -- cgit v1.2.3-59-g8ed1b From 365c10235438607541fa2259a5fec48061b90bd8 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 22 Jun 2012 15:05:28 -0700 Subject: Increase use of ScopedJniThreadState. Move the routines for changing Object* to jobject and vice-versa (AddLocalReference and Decode) to ScopedJniThreadState to enforce use of Object*s in the Runnable thread state. In the Runnable thread state suspension is necessary before GC can take place. Reduce use of const ClassLoader* as the code bottoms out in FindClass and with a field assignment where the const is cast away (ie if we're not going to enforce the const-ness we shouldn't pretend it is). Refactor the Thread::Attach API so that we're not handling raw Objects on unattached threads. Remove some unreachable code. Change-Id: I0fa969f49ee6a8f10752af74a6b0e04d46b4cd97 --- src/check_jni.cc | 77 ++- src/class_linker.cc | 48 +- src/class_linker.h | 24 +- src/class_linker_test.cc | 2 +- src/common_test.h | 2 +- src/compiler.cc | 34 +- src/compiler.h | 26 +- src/compiler/CompilerIR.h | 2 +- src/compiler_llvm/runtime_support_llvm.cc | 7 +- src/compiler_test.cc | 10 +- src/debugger.cc | 72 ++- src/exception_test.cc | 6 +- src/heap.cc | 10 +- src/jdwp/jdwp_main.cc | 2 +- src/jni_compiler_test.cc | 7 +- src/jni_internal.cc | 542 +++++++++------------ src/jni_internal.h | 38 +- src/jni_internal_test.cc | 4 +- src/monitor.cc | 9 +- src/monitor_android.cc | 3 +- src/native/dalvik_system_DexFile.cc | 6 +- src/native/dalvik_system_VMDebug.cc | 4 +- src/native/dalvik_system_VMRuntime.cc | 11 +- src/native/dalvik_system_VMStack.cc | 19 +- src/native/java_lang_Class.cc | 96 ++-- src/native/java_lang_Object.cc | 16 +- src/native/java_lang_Runtime.cc | 9 +- src/native/java_lang_String.cc | 15 +- src/native/java_lang_System.cc | 21 +- src/native/java_lang_Thread.cc | 33 +- src/native/java_lang_Throwable.cc | 5 +- src/native/java_lang_VMClassLoader.cc | 7 +- src/native/java_lang_reflect_Array.cc | 23 +- src/native/java_lang_reflect_Constructor.cc | 13 +- src/native/java_lang_reflect_Field.cc | 43 +- src/native/java_lang_reflect_Method.cc | 16 +- src/native/java_lang_reflect_Proxy.cc | 17 +- ...org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 4 +- src/native/sun_misc_Unsafe.cc | 79 +-- src/oat/runtime/support_proxy.cc | 14 +- src/oat/runtime/support_stubs.cc | 8 +- src/oat_compilation_unit.h | 6 +- src/oat_writer.cc | 4 +- src/oat_writer.h | 6 +- src/object.cc | 32 +- src/object.h | 2 +- src/reflection.cc | 39 +- src/reflection.h | 3 +- src/runtime.cc | 55 ++- src/runtime.h | 16 +- src/scoped_jni_thread_state.h | 146 +++++- src/signal_catcher.cc | 2 +- src/stack.cc | 13 +- src/stack.h | 3 +- src/thread.cc | 210 ++++---- src/thread.h | 31 +- src/verifier/method_verifier.cc | 12 +- src/verifier/method_verifier.h | 9 +- src/verifier/reg_type.cc | 2 +- src/verifier/reg_type_cache.cc | 8 +- src/verifier/reg_type_cache.h | 6 +- 61 files changed, 1018 insertions(+), 971 deletions(-) (limited to 'src/native/java_lang_System.cc') diff --git a/src/check_jni.cc b/src/check_jni.cc index 0fd5f6ed6d..47f20e19d5 100644 --- a/src/check_jni.cc +++ b/src/check_jni.cc @@ -83,11 +83,6 @@ static bool IsSirtLocalRef(JNIEnv* env, jobject localRef) { reinterpret_cast(env)->self->SirtContains(localRef); } -template -T Decode(ScopedJniThreadState& ts, jobject obj) { - return reinterpret_cast(ts.Self()->DecodeJObject(obj)); -} - // Hack to allow forcecopy to work with jniGetNonMovableArrayElements. // The code deliberately uses an invalid sequence of operations, so we // need to pass it through unmodified. Review that code before making @@ -151,14 +146,14 @@ static bool ShouldTrace(JavaVMExt* vm, const Method* method) { class ScopedCheck { public: // For JNIEnv* functions. - explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName) { - Init(env, reinterpret_cast(env)->vm, flags, functionName, true); + explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName) : ts_(env) { + Init(flags, functionName, true); CheckThread(flags); } // For JavaVM* functions. - explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName) { - Init(NULL, vm, kFlag_Invocation, functionName, has_method); + explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName) : ts_(vm) { + Init(kFlag_Invocation, functionName, has_method); } bool ForceCopy() { @@ -185,7 +180,6 @@ class ScopedCheck { * Works for both static and instance fields. */ void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic) { - ScopedJniThreadState ts(env_); Field* f = CheckFieldID(fid); if (f == NULL) { return; @@ -193,7 +187,7 @@ class ScopedCheck { Class* field_type = FieldHelper(f).GetType(); if (!field_type->IsPrimitive()) { if (java_object != NULL) { - Object* obj = Decode(ts, java_object); + Object* obj = ts_.Decode(java_object); // If java_object is a weak global ref whose referent has been cleared, // obj will be NULL. Otherwise, obj should always be non-NULL // and valid. @@ -231,9 +225,7 @@ class ScopedCheck { * Assumes "jobj" has already been validated. */ void CheckInstanceFieldID(jobject java_object, jfieldID fid) { - ScopedJniThreadState ts(env_); - - Object* o = Decode(ts, java_object); + Object* o = ts_.Decode(java_object); if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) { JniAbortF(function_name_, "field operation on invalid %s: %p", ToStr(GetIndirectRefKind(java_object)).c_str(), java_object); @@ -266,7 +258,6 @@ class ScopedCheck { * 'expectedType' will be "L" for all objects, including arrays. */ void CheckSig(jmethodID mid, const char* expectedType, bool isStatic) { - ScopedJniThreadState ts(env_); Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -292,8 +283,7 @@ class ScopedCheck { * Assumes "java_class" has already been validated. */ void CheckStaticFieldID(jclass java_class, jfieldID fid) { - ScopedJniThreadState ts(env_); - Class* c = Decode(ts, java_class); + Class* c = ts_.Decode(java_class); const Field* f = CheckFieldID(fid); if (f == NULL) { return; @@ -314,12 +304,11 @@ class ScopedCheck { * Instances of "java_class" must be instances of the method's declaring class. */ void CheckStaticMethod(jclass java_class, jmethodID mid) { - ScopedJniThreadState ts(env_); const Method* m = CheckMethodID(mid); if (m == NULL) { return; } - Class* c = Decode(ts, java_class); + Class* c = ts_.Decode(java_class); if (!c->IsAssignableFrom(m->GetDeclaringClass())) { JniAbortF(function_name_, "can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str()); @@ -334,12 +323,11 @@ class ScopedCheck { * will be handled automatically by the instanceof check.) */ void CheckVirtualMethod(jobject java_object, jmethodID mid) { - ScopedJniThreadState ts(env_); const Method* m = CheckMethodID(mid); if (m == NULL) { return; } - Object* o = Decode(ts, java_object); + Object* o = ts_.Decode(java_object); if (!o->InstanceOf(m->GetDeclaringClass())) { JniAbortF(function_name_, "can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str()); @@ -386,7 +374,7 @@ class ScopedCheck { va_list ap; const Method* traceMethod = NULL; - if ((!vm_->trace.empty() || VLOG_IS_ON(third_party_jni)) && has_method_) { + if ((!ts_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni)) && has_method_) { // We need to guard some of the invocation interface's calls: a bad caller might // use DetachCurrentThread or GetEnv on a thread that's not yet attached. Thread* self = Thread::Current(); @@ -395,7 +383,7 @@ class ScopedCheck { } } - if (((flags_ & kFlag_ForceTrace) != 0) || (traceMethod != NULL && ShouldTrace(vm_, traceMethod))) { + if (((flags_ & kFlag_ForceTrace) != 0) || (traceMethod != NULL && ShouldTrace(ts_.Vm(), traceMethod))) { va_start(ap, fmt0); std::string msg; for (const char* fmt = fmt0; *fmt;) { @@ -610,8 +598,7 @@ class ScopedCheck { return false; } - ScopedJniThreadState ts(env_); - Object* obj = Decode(ts, java_object); + Object* obj = ts_.Decode(java_object); if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) { JniAbortF(function_name_, "%s is an invalid %s: %p (%p)", what, ToStr(GetIndirectRefKind(java_object)).c_str(), java_object, obj); @@ -647,9 +634,7 @@ class ScopedCheck { // Set "has_method" to true if we have a valid thread with a method pointer. // We won't have one before attaching a thread, after detaching a thread, or // when shutting down the runtime. - void Init(JNIEnv* env, JavaVM* vm, int flags, const char* functionName, bool has_method) { - env_ = reinterpret_cast(env); - vm_ = reinterpret_cast(vm); + void Init(int flags, const char* functionName, bool has_method) { flags_ = flags; function_name_ = functionName; has_method_ = has_method; @@ -666,8 +651,7 @@ class ScopedCheck { return; } - ScopedJniThreadState ts(env_); - Array* a = Decode(ts, java_array); + Array* a = ts_.Decode(java_array); if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) { JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)", ToStr(GetIndirectRefKind(java_array)).c_str(), java_array, a); @@ -687,8 +671,8 @@ class ScopedCheck { JniAbortF(function_name_, "jfieldID was NULL"); return NULL; } - Field* f = DecodeField(fid); - if (!Runtime::Current()->GetHeap()->IsHeapAddress(f)) { + Field* f = ts_.DecodeField(fid); + if (!Runtime::Current()->GetHeap()->IsHeapAddress(f) || !f->IsField()) { JniAbortF(function_name_, "invalid jfieldID: %p", fid); return NULL; } @@ -700,8 +684,8 @@ class ScopedCheck { JniAbortF(function_name_, "jmethodID was NULL"); return NULL; } - Method* m = DecodeMethod(mid); - if (!Runtime::Current()->GetHeap()->IsHeapAddress(m)) { + Method* m = ts_.DecodeMethod(mid); + if (!Runtime::Current()->GetHeap()->IsHeapAddress(m) || !m->IsMethod()) { JniAbortF(function_name_, "invalid jmethodID: %p", mid); return NULL; } @@ -719,9 +703,7 @@ class ScopedCheck { return; } - ScopedJniThreadState ts(env_); - - Object* o = Decode(ts, java_object); + Object* o = ts_.Decode(java_object); if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) { // TODO: when we remove work_around_app_jni_bugs, this should be impossible. JniAbortF(function_name_, "native code passing in reference to invalid %s: %p", @@ -751,13 +733,13 @@ class ScopedCheck { // Verify that the current thread is (a) attached and (b) associated with // this particular instance of JNIEnv. - if (env_ != threadEnv) { - if (vm_->work_around_app_jni_bugs) { + if (ts_.Env() != threadEnv) { + if (ts_.Vm()->work_around_app_jni_bugs) { // If we're keeping broken code limping along, we need to suppress the abort... - LOG(ERROR) << "APP BUG DETECTED: thread " << *self << " using JNIEnv* from thread " << *env_->self; + LOG(ERROR) << "APP BUG DETECTED: thread " << *self << " using JNIEnv* from thread " << *ts_.Self(); } else { JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s", - ToStr(*self).c_str(), ToStr(*env_->self).c_str()); + ToStr(*self).c_str(), ToStr(*ts_.Self()).c_str()); return; } } @@ -796,7 +778,7 @@ class ScopedCheck { // TODO: do we care any more? art always dumps pending exceptions on aborting threads. if (type != "java.lang.OutOfMemoryError") { JniAbortF(function_name_, "JNI %s called with pending exception: %s", - function_name_, type.c_str(), jniGetStackTrace(env_).c_str()); + function_name_, type.c_str(), jniGetStackTrace(ts_.Env()).c_str()); } else { JniAbortF(function_name_, "JNI %s called with %s pending", function_name_, type.c_str()); } @@ -873,8 +855,7 @@ class ScopedCheck { return 0; } - JNIEnvExt* env_; - JavaVMExt* vm_; + const ScopedJniThreadState ts_; const char* function_name_; int flags_; bool has_method_; @@ -1072,7 +1053,7 @@ struct GuardedCopy { static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) { ScopedJniThreadState ts(env); - Array* a = Decode(ts, java_array); + Array* a = ts.Decode(java_array); size_t component_size = a->GetClass()->GetComponentSize(); size_t byte_count = a->GetLength() * component_size; void* result = GuardedCopy::Create(a->GetRawData(component_size), byte_count, true); @@ -1092,7 +1073,7 @@ static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf, } ScopedJniThreadState ts(env); - Array* a = Decode(ts, java_array); + Array* a = ts.Decode(java_array); GuardedCopy::Check(__FUNCTION__, dataBuf, true); @@ -1481,7 +1462,7 @@ CALL(void, Void, , , VOID_RETURN, "V"); const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy); if (sc.ForceCopy() && result != NULL) { ScopedJniThreadState ts(env); - String* s = Decode(ts, java_string); + String* s = ts.Decode(java_string); int byteCount = s->GetLength() * 2; result = (const jchar*) GuardedCopy::Create(result, byteCount, false); if (isCopy != NULL) { @@ -1709,7 +1690,7 @@ PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D'); const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy); if (sc.ForceCopy() && result != NULL) { ScopedJniThreadState ts(env); - String* s = Decode(ts, java_string); + String* s = ts.Decode(java_string); int byteCount = s->GetLength() * 2; result = (const jchar*) GuardedCopy::Create(result, byteCount, false); if (isCopy != NULL) { diff --git a/src/class_linker.cc b/src/class_linker.cc index b18b31f65d..3c0c345f52 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -45,6 +45,7 @@ #if defined(ART_USE_LLVM_COMPILER) #include "compiler_llvm/runtime_support_llvm.h" #endif +#include "scoped_jni_thread_state.h" #include "ScopedLocalRef.h" #include "space.h" #include "stack_indirect_reference_table.h" @@ -1116,7 +1117,7 @@ Class* ClassLinker::FindSystemClass(const char* descriptor) { return FindClass(descriptor, NULL); } -Class* ClassLinker::FindClass(const char* descriptor, const ClassLoader* class_loader) { +Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) { DCHECK_NE(*descriptor, '\0') << "descriptor is empty string"; Thread* self = Thread::Current(); DCHECK(self != NULL); @@ -1159,19 +1160,24 @@ Class* ClassLinker::FindClass(const char* descriptor, const ClassLoader* class_l } } else { + ScopedJniThreadState ts(self->GetJniEnv()); + ScopedLocalRef class_loader_object(ts.Env(), + ts.AddLocalReference(class_loader)); std::string class_name_string(DescriptorToDot(descriptor)); - ScopedThreadStateChange tsc(self, kNative); - JNIEnv* env = self->GetJniEnv(); - ScopedLocalRef class_name_object(env, env->NewStringUTF(class_name_string.c_str())); - if (class_name_object.get() == NULL) { - return NULL; + ScopedLocalRef result(ts.Env(), NULL); + { + ScopedThreadStateChange tsc(self, kNative); + ScopedLocalRef class_name_object(ts.Env(), + ts.Env()->NewStringUTF(class_name_string.c_str())); + if (class_name_object.get() == NULL) { + return NULL; + } + CHECK(class_loader_object.get() != NULL); + result.reset(ts.Env()->CallObjectMethod(class_loader_object.get(), + WellKnownClasses::java_lang_ClassLoader_loadClass, + class_name_object.get())); } - ScopedLocalRef class_loader_object(env, AddLocalReference(env, class_loader)); - CHECK(class_loader_object.get() != NULL); - ScopedLocalRef result(env, env->CallObjectMethod(class_loader_object.get(), - WellKnownClasses::java_lang_ClassLoader_loadClass, - class_name_object.get())); - if (env->ExceptionCheck()) { + if (ts.Env()->ExceptionCheck()) { // If the ClassLoader threw, pass that exception up. return NULL; } else if (result.get() == NULL) { @@ -1181,7 +1187,7 @@ Class* ClassLinker::FindClass(const char* descriptor, const ClassLoader* class_l return NULL; } else { // success, return Class* - return Decode(env, result.get()); + return ts.Decode(result.get()); } } @@ -1190,7 +1196,7 @@ Class* ClassLinker::FindClass(const char* descriptor, const ClassLoader* class_l } Class* ClassLinker::DefineClass(const StringPiece& descriptor, - const ClassLoader* class_loader, + ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) { SirtRef klass(NULL); @@ -1453,7 +1459,7 @@ static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class void ClassLinker::LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, SirtRef& klass, - const ClassLoader* class_loader) { + ClassLoader* class_loader) { CHECK(klass.get() != NULL); CHECK(klass->GetDexCache() != NULL); CHECK_EQ(Class::kStatusNotReady, klass->GetStatus()); @@ -1707,7 +1713,7 @@ Class* ClassLinker::InitializePrimitiveClass(Class* primitive_class, // array class; that always comes from the base element class. // // Returns NULL with an exception raised on failure. -Class* ClassLinker::CreateArrayClass(const std::string& descriptor, const ClassLoader* class_loader) { +Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) { CHECK_EQ('[', descriptor[0]); // Identify the underlying component type @@ -2657,7 +2663,7 @@ bool ClassLinker::EnsureInitialized(Class* c, bool can_run_clinit, bool can_init void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, Class* c, SafeMap& field_map) { - const ClassLoader* cl = c->GetClassLoader(); + ClassLoader* cl = c->GetClassLoader(); const byte* class_data = dex_file.GetClassData(dex_class_def); ClassDataItemIterator it(dex_file, class_data); for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) { @@ -3342,7 +3348,7 @@ String* ClassLinker::ResolveString(const DexFile& dex_file, Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx, DexCache* dex_cache, - const ClassLoader* class_loader) { + ClassLoader* class_loader) { DCHECK(dex_cache != NULL); Class* resolved = dex_cache->GetResolvedType(type_idx); if (resolved == NULL) { @@ -3369,7 +3375,7 @@ Class* ClassLinker::ResolveType(const DexFile& dex_file, Method* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx, DexCache* dex_cache, - const ClassLoader* class_loader, + ClassLoader* class_loader, bool is_direct) { DCHECK(dex_cache != NULL); Method* resolved = dex_cache->GetResolvedMethod(method_idx); @@ -3419,7 +3425,7 @@ Method* ClassLinker::ResolveMethod(const DexFile& dex_file, Field* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx, DexCache* dex_cache, - const ClassLoader* class_loader, + ClassLoader* class_loader, bool is_static) { DCHECK(dex_cache != NULL); Field* resolved = dex_cache->GetResolvedField(field_idx); @@ -3459,7 +3465,7 @@ Field* ClassLinker::ResolveField(const DexFile& dex_file, Field* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, DexCache* dex_cache, - const ClassLoader* class_loader) { + ClassLoader* class_loader) { DCHECK(dex_cache != NULL); Field* resolved = dex_cache->GetResolvedField(field_idx); if (resolved != NULL) { diff --git a/src/class_linker.h b/src/class_linker.h index 01c1051864..6cf2e149f1 100644 --- a/src/class_linker.h +++ b/src/class_linker.h @@ -54,12 +54,12 @@ class ClassLinker { // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. - Class* FindClass(const char* descriptor, const ClassLoader* class_loader); + Class* FindClass(const char* descriptor, ClassLoader* class_loader); Class* FindSystemClass(const char* descriptor); // Define a new a class based on a ClassDef from a DexFile - Class* DefineClass(const StringPiece& descriptor, const ClassLoader* class_loader, + Class* DefineClass(const StringPiece& descriptor, ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded @@ -119,7 +119,7 @@ class ClassLinker { if (UNLIKELY(resolved_type == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); DexCache* dex_cache = declaring_class->GetDexCache(); - const ClassLoader* class_loader = declaring_class->GetClassLoader(); + ClassLoader* class_loader = declaring_class->GetClassLoader(); const DexFile& dex_file = FindDexFile(dex_cache); resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); } @@ -131,7 +131,7 @@ class ClassLinker { DexCache* dex_cache = declaring_class->GetDexCache(); Class* resolved_type = dex_cache->GetResolvedType(type_idx); if (UNLIKELY(resolved_type == NULL)) { - const ClassLoader* class_loader = declaring_class->GetClassLoader(); + ClassLoader* class_loader = declaring_class->GetClassLoader(); const DexFile& dex_file = FindDexFile(dex_cache); resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); } @@ -145,7 +145,7 @@ class ClassLinker { Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, DexCache* dex_cache, - const ClassLoader* class_loader); + ClassLoader* class_loader); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -155,7 +155,7 @@ class ClassLinker { Method* ResolveMethod(const DexFile& dex_file, uint32_t method_idx, DexCache* dex_cache, - const ClassLoader* class_loader, + ClassLoader* class_loader, bool is_direct); Method* ResolveMethod(uint32_t method_idx, const Method* referrer, bool is_direct) { @@ -163,7 +163,7 @@ class ClassLinker { if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) { Class* declaring_class = referrer->GetDeclaringClass(); DexCache* dex_cache = declaring_class->GetDexCache(); - const ClassLoader* class_loader = declaring_class->GetClassLoader(); + ClassLoader* class_loader = declaring_class->GetClassLoader(); const DexFile& dex_file = FindDexFile(dex_cache); resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, is_direct); } @@ -176,7 +176,7 @@ class ClassLinker { if (UNLIKELY(resolved_field == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); DexCache* dex_cache = declaring_class->GetDexCache(); - const ClassLoader* class_loader = declaring_class->GetClassLoader(); + ClassLoader* class_loader = declaring_class->GetClassLoader(); const DexFile& dex_file = FindDexFile(dex_cache); resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static); } @@ -191,7 +191,7 @@ class ClassLinker { Field* ResolveField(const DexFile& dex_file, uint32_t field_idx, DexCache* dex_cache, - const ClassLoader* class_loader, + ClassLoader* class_loader, bool is_static); // Resolve a field with a given ID from the DexFile, storing the @@ -201,7 +201,7 @@ class ClassLinker { Field* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, DexCache* dex_cache, - const ClassLoader* class_loader); + ClassLoader* class_loader); // Get shorty from method index without resolution. Used to do handlerization. const char* MethodShorty(uint32_t method_idx, Method* referrer, uint32_t* length); @@ -323,7 +323,7 @@ class ClassLinker { Primitive::Type type); - Class* CreateArrayClass(const std::string& descriptor, const ClassLoader* class_loader); + Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader); void AppendToBootClassPath(const DexFile& dex_file); void AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache); @@ -337,7 +337,7 @@ class ClassLinker { void LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, SirtRef& klass, - const ClassLoader* class_loader); + ClassLoader* class_loader); void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef& klass, SirtRef& dst); diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc index f677cae351..a7f9c66c51 100644 --- a/src/class_linker_test.cc +++ b/src/class_linker_test.cc @@ -79,7 +79,7 @@ class ClassLinkerTest : public CommonTest { void AssertArrayClass(const std::string& array_descriptor, const std::string& component_type, - const ClassLoader* class_loader) { + ClassLoader* class_loader) { Class* array = class_linker_->FindClass(array_descriptor.c_str(), class_loader); ClassHelper array_component_ch(array->GetComponentType()); EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor()); diff --git a/src/common_test.h b/src/common_test.h index fbd8b5bd0d..a9bd139804 100644 --- a/src/common_test.h +++ b/src/common_test.h @@ -468,7 +468,7 @@ class CommonTest : public testing::Test { return class_loader.get(); } - void CompileClass(const ClassLoader* class_loader, const char* class_name) { + void CompileClass(ClassLoader* class_loader, const char* class_name) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; diff --git a/src/compiler.cc b/src/compiler.cc index 2633b78073..fd18713e10 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -439,7 +439,7 @@ ByteArray* Compiler::CreateAbstractMethodErrorStub(InstructionSet instruction_se } } -void Compiler::CompileAll(const ClassLoader* class_loader, +void Compiler::CompileAll(ClassLoader* class_loader, const std::vector& dex_files) { DCHECK(!Runtime::Current()->IsStarted()); @@ -469,7 +469,7 @@ void Compiler::CompileAll(const ClassLoader* class_loader, void Compiler::CompileOne(const Method* method) { DCHECK(!Runtime::Current()->IsStarted()); - const ClassLoader* class_loader = method->GetDeclaringClass()->GetClassLoader(); + ClassLoader* class_loader = method->GetDeclaringClass()->GetClassLoader(); // Find the dex_file const DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); @@ -487,7 +487,7 @@ void Compiler::CompileOne(const Method* method) { PostCompile(class_loader, dex_files); } -void Compiler::Resolve(const ClassLoader* class_loader, +void Compiler::Resolve(ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; @@ -496,7 +496,7 @@ void Compiler::Resolve(const ClassLoader* class_loader, } } -void Compiler::PreCompile(const ClassLoader* class_loader, +void Compiler::PreCompile(ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings) { Resolve(class_loader, dex_files, timings); @@ -507,7 +507,7 @@ void Compiler::PreCompile(const ClassLoader* class_loader, timings.AddSplit("PreCompile.InitializeClassesWithoutClinit"); } -void Compiler::PostCompile(const ClassLoader* class_loader, +void Compiler::PostCompile(ClassLoader* class_loader, const std::vector& dex_files) { SetGcMaps(class_loader, dex_files); #if defined(ART_USE_LLVM_COMPILER) @@ -926,7 +926,7 @@ static bool SkipClass(const ClassLoader* class_loader, class CompilationContext { public: CompilationContext(ClassLinker* class_linker, - const ClassLoader* class_loader, + ClassLoader* class_loader, Compiler* compiler, DexCache* dex_cache, const DexFile* dex_file) @@ -940,7 +940,7 @@ class CompilationContext { CHECK(class_linker_ != NULL); return class_linker_; } - const ClassLoader* GetClassLoader() { + ClassLoader* GetClassLoader() { return class_loader_; } Compiler* GetCompiler() { @@ -958,7 +958,7 @@ class CompilationContext { private: ClassLinker* class_linker_; - const ClassLoader* class_loader_; + ClassLoader* class_loader_; Compiler* compiler_; DexCache* dex_cache_; const DexFile* dex_file_; @@ -1121,7 +1121,7 @@ static void ResolveType(CompilationContext* context, size_t type_idx) { } } -void Compiler::ResolveDexFile(const ClassLoader* class_loader, const DexFile& dex_file, TimingLogger& timings) { +void Compiler::ResolveDexFile(ClassLoader* class_loader, const DexFile& dex_file, TimingLogger& timings) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(dex_file); @@ -1143,7 +1143,7 @@ void Compiler::ResolveDexFile(const ClassLoader* class_loader, const DexFile& de timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields"); } -void Compiler::Verify(const ClassLoader* class_loader, +void Compiler::Verify(ClassLoader* class_loader, const std::vector& dex_files) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; @@ -1190,7 +1190,7 @@ static void VerifyClass(CompilationContext* context, size_t class_def_index) { CHECK(!Thread::Current()->IsExceptionPending()) << PrettyTypeOf(Thread::Current()->GetException()); } -void Compiler::VerifyDexFile(const ClassLoader* class_loader, const DexFile& dex_file) { +void Compiler::VerifyDexFile(ClassLoader* class_loader, const DexFile& dex_file) { dex_file.ChangePermissions(PROT_READ | PROT_WRITE); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -1200,7 +1200,7 @@ void Compiler::VerifyDexFile(const ClassLoader* class_loader, const DexFile& dex dex_file.ChangePermissions(PROT_READ); } -void Compiler::InitializeClassesWithoutClinit(const ClassLoader* class_loader, +void Compiler::InitializeClassesWithoutClinit(ClassLoader* class_loader, const std::vector& dex_files) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; @@ -1209,7 +1209,7 @@ void Compiler::InitializeClassesWithoutClinit(const ClassLoader* class_loader, } } -void Compiler::InitializeClassesWithoutClinit(const ClassLoader* class_loader, const DexFile& dex_file) { +void Compiler::InitializeClassesWithoutClinit(ClassLoader* class_loader, const DexFile& dex_file) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs(); class_def_index++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); @@ -1390,7 +1390,7 @@ void ForClassesInAllDexFiles(CompilationContext* worker_context, STLDeleteElements(&threads); } -void Compiler::Compile(const ClassLoader* class_loader, +void Compiler::Compile(ClassLoader* class_loader, const std::vector& dex_files) { #if defined(ART_USE_LLVM_COMPILER) if (dex_files.size() <= 0) { @@ -1465,7 +1465,7 @@ void Compiler::CompileClass(CompilationContext* context, size_t class_def_index) DCHECK(!it.HasNext()); } -void Compiler::CompileDexFile(const ClassLoader* class_loader, const DexFile& dex_file) { +void Compiler::CompileDexFile(ClassLoader* class_loader, const DexFile& dex_file) { CompilationContext context(NULL, class_loader, this, NULL, &dex_file); ForAll(&context, 0, dex_file.NumClassDefs(), Compiler::CompileClass, thread_count_); } @@ -1605,7 +1605,7 @@ CompiledMethod* Compiler::GetCompiledMethod(MethodReference ref) const { return it->second; } -void Compiler::SetGcMaps(const ClassLoader* class_loader, const std::vector& dex_files) { +void Compiler::SetGcMaps(ClassLoader* class_loader, const std::vector& dex_files) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -1613,7 +1613,7 @@ void Compiler::SetGcMaps(const ClassLoader* class_loader, const std::vectorGetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(dex_file); for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs(); class_def_index++) { diff --git a/src/compiler.h b/src/compiler.h index 8f5d5b47c0..5202967323 100644 --- a/src/compiler.h +++ b/src/compiler.h @@ -53,7 +53,7 @@ class Compiler { ~Compiler(); - void CompileAll(const ClassLoader* class_loader, + void CompileAll(ClassLoader* class_loader, const std::vector& dex_files); // Compile a single Method @@ -255,24 +255,24 @@ class Compiler { // Checks if class specified by type_idx is one of the image_classes_ bool IsImageClass(const std::string& descriptor) const; - void PreCompile(const ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings); - void PostCompile(const ClassLoader* class_loader, const std::vector& dex_files); + void PreCompile(ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings); + void PostCompile(ClassLoader* class_loader, const std::vector& dex_files); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. - void Resolve(const ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings); - void ResolveDexFile(const ClassLoader* class_loader, const DexFile& dex_file, TimingLogger& timings); + void Resolve(ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings); + void ResolveDexFile(ClassLoader* class_loader, const DexFile& dex_file, TimingLogger& timings); - void Verify(const ClassLoader* class_loader, const std::vector& dex_files); - void VerifyDexFile(const ClassLoader* class_loader, const DexFile& dex_file); + void Verify(ClassLoader* class_loader, const std::vector& dex_files); + void VerifyDexFile(ClassLoader* class_loader, const DexFile& dex_file); - void InitializeClassesWithoutClinit(const ClassLoader* class_loader, const std::vector& dex_files); - void InitializeClassesWithoutClinit(const ClassLoader* class_loader, const DexFile& dex_file); + void InitializeClassesWithoutClinit(ClassLoader* class_loader, const std::vector& dex_files); + void InitializeClassesWithoutClinit(ClassLoader* class_loader, const DexFile& dex_file); - void Compile(const ClassLoader* class_loader, + void Compile(ClassLoader* class_loader, const std::vector& dex_files); - void CompileDexFile(const ClassLoader* class_loader, const DexFile& dex_file); + void CompileDexFile(ClassLoader* class_loader, const DexFile& dex_file); void CompileClass(const DexFile::ClassDef& class_def, const ClassLoader* class_loader, const DexFile& dex_file); void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx, @@ -280,8 +280,8 @@ class Compiler { static void CompileClass(CompilationContext* context, size_t class_def_index); - void SetGcMaps(const ClassLoader* class_loader, const std::vector& dex_files); - void SetGcMapsDexFile(const ClassLoader* class_loader, const DexFile& dex_file); + void SetGcMaps(ClassLoader* class_loader, const std::vector& dex_files); + void SetGcMapsDexFile(ClassLoader* class_loader, const DexFile& dex_file); void SetGcMapsMethod(const DexFile& dex_file, Method* method); void InsertInvokeStub(const std::string& key, const CompiledInvokeStub* compiled_invoke_stub); diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h index ba997158ff..ef56876a4e 100644 --- a/src/compiler/CompilerIR.h +++ b/src/compiler/CompilerIR.h @@ -436,7 +436,7 @@ struct CompilationUnit { ClassLinker* class_linker; // Linker to resolve fields and methods const DexFile* dex_file; // DexFile containing the method being compiled DexCache* dex_cache; // DexFile's corresponding cache - const ClassLoader* class_loader; // compiling method's class loader + ClassLoader* class_loader; // compiling method's class loader uint32_t method_idx; // compiling method's index into method_ids of DexFile const DexFile::CodeItem* code_item; // compiling method's DexFile code_item uint32_t access_flags; // compiling method's access flags diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc index 93785a60f4..cfaaea4fca 100644 --- a/src/compiler_llvm/runtime_support_llvm.cc +++ b/src/compiler_llvm/runtime_support_llvm.cc @@ -667,10 +667,11 @@ void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) { // Start new JNI local reference state JNIEnvExt* env = thread->GetJniEnv(); + ScopedJniThreadState ts(env); ScopedJniEnvLocalRefState env_state(env); // Create local ref. copies of the receiver - jobject rcvr_jobj = AddLocalReference(env, receiver); + jobject rcvr_jobj = ts.AddLocalReference(receiver); // Convert proxy method into expected interface method Method* interface_method = proxy_method->FindOverriddenMethod(); @@ -680,7 +681,7 @@ void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) { // Set up arguments array and place in local IRT during boxing (which may allocate/GC) jvalue args_jobj[3]; args_jobj[0].l = rcvr_jobj; - args_jobj[1].l = AddLocalReference(env, interface_method); + args_jobj[1].l = ts.AddLocalReference(interface_method); // Args array, if no arguments then NULL (don't include receiver in argument count) args_jobj[2].l = NULL; ObjectArray* args = NULL; @@ -690,7 +691,7 @@ void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) { CHECK(thread->IsExceptionPending()); return; } - args_jobj[2].l = AddLocalReference(env, args); + args_jobj[2].l = ts.AddLocalReference(args); } // Get parameter types. diff --git a/src/compiler_test.cc b/src/compiler_test.cc index e3faa3bf17..088726fff5 100644 --- a/src/compiler_test.cc +++ b/src/compiler_test.cc @@ -31,13 +31,13 @@ namespace art { class CompilerTest : public CommonTest { protected: - void CompileAll(const ClassLoader* class_loader) { + void CompileAll(ClassLoader* class_loader) { compiler_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader)); MakeAllExecutable(class_loader); } - void EnsureCompiled(const ClassLoader* class_loader, - const char* class_name, const char* method, const char* signature, bool is_virtual) { + void EnsureCompiled(ClassLoader* class_loader, const char* class_name, const char* method, + const char* signature, bool is_virtual) { CompileAll(class_loader); runtime_->Start(); env_ = Thread::Current()->GetJniEnv(); @@ -51,7 +51,7 @@ class CompilerTest : public CommonTest { CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature; } - void MakeAllExecutable(const ClassLoader* class_loader) { + void MakeAllExecutable(ClassLoader* class_loader) { const std::vector& class_path = Runtime::Current()->GetCompileTimeClassPath(class_loader); for (size_t i = 0; i != class_path.size(); ++i) { @@ -61,7 +61,7 @@ class CompilerTest : public CommonTest { } } - void MakeDexFileExecutable(const ClassLoader* class_loader, const DexFile& dex_file) { + void MakeDexFileExecutable(ClassLoader* class_loader, const DexFile& dex_file) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (size_t i = 0; i < dex_file.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(i); diff --git a/src/debugger.cc b/src/debugger.cc index 7dfbd22eac..9e3ab3fce4 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -28,6 +28,7 @@ #endif #include "object_utils.h" #include "safe_map.h" +#include "scoped_jni_thread_state.h" #include "scoped_thread_list_lock.h" #include "ScopedLocalRef.h" #include "ScopedPrimitiveArray.h" @@ -220,11 +221,12 @@ static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) { } static Thread* DecodeThread(JDWP::ObjectId threadId) { + ScopedJniThreadState ts(Thread::Current()); Object* thread_peer = gRegistry->Get(threadId); if (thread_peer == NULL || thread_peer == kInvalidObject) { return NULL; } - return Thread::FromManagedThread(thread_peer); + return Thread::FromManagedThread(ts, thread_peer); } static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { @@ -1369,11 +1371,17 @@ JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId threadGroupId) { } JDWP::ObjectId Dbg::GetSystemThreadGroupId() { - return gRegistry->Add(Thread::GetSystemThreadGroup()); + ScopedJniThreadState ts(Thread::Current()); + Object* group = + ts.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup)->GetObject(NULL); + return gRegistry->Add(group); } JDWP::ObjectId Dbg::GetMainThreadGroupId() { - return gRegistry->Add(Thread::GetMainThreadGroup()); + ScopedJniThreadState ts(Thread::Current()); + Object* group = + ts.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup)->GetObject(NULL); + return gRegistry->Add(group); } bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) { @@ -1422,7 +1430,11 @@ bool Dbg::IsSuspended(JDWP::ObjectId threadId) { } void Dbg::GetThreadGroupThreadsImpl(Object* thread_group, JDWP::ObjectId** ppThreadIds, uint32_t* pThreadCount) { - struct ThreadListVisitor { + class ThreadListVisitor { + public: + ThreadListVisitor(const ScopedJniThreadState& ts, Object* thread_group) + : ts_(ts), thread_group_(thread_group) {} + static void Visit(Thread* t, void* arg) { reinterpret_cast(arg)->Visit(t); } @@ -1433,27 +1445,34 @@ void Dbg::GetThreadGroupThreadsImpl(Object* thread_group, JDWP::ObjectId** ppThr // query all threads, so it's easier if we just don't tell them about this thread. return; } - if (thread_group == NULL || t->GetThreadGroup() == thread_group) { - threads.push_back(gRegistry->Add(t->GetPeer())); + if (thread_group_ == NULL || t->GetThreadGroup(ts_) == thread_group_) { + threads_.push_back(gRegistry->Add(t->GetPeer())); } } - Object* thread_group; - std::vector threads; + const std::vector& GetThreads() { + return threads_; + } + + private: + const ScopedJniThreadState& ts_; + Object* const thread_group_; + std::vector threads_; }; - ThreadListVisitor tlv; - tlv.thread_group = thread_group; + ScopedJniThreadState ts(Thread::Current()); + ThreadListVisitor tlv(ts, thread_group); Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); - *pThreadCount = tlv.threads.size(); + *pThreadCount = tlv.GetThreads().size(); if (*pThreadCount == 0) { *ppThreadIds = NULL; } else { + // TODO: pass in std::vector rather than passing around pointers. *ppThreadIds = new JDWP::ObjectId[*pThreadCount]; for (size_t i = 0; i < *pThreadCount; ++i) { - (*ppThreadIds)[i] = tlv.threads[i]; + (*ppThreadIds)[i] = tlv.GetThreads()[i]; } } } @@ -1546,9 +1565,10 @@ void Dbg::ResumeVM() { } void Dbg::SuspendThread(JDWP::ObjectId threadId) { + ScopedJniThreadState ts(Thread::Current()); Object* peer = gRegistry->Get(threadId); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(peer); + Thread* thread = Thread::FromManagedThread(ts, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for suspend: " << peer; return; @@ -1557,9 +1577,10 @@ void Dbg::SuspendThread(JDWP::ObjectId threadId) { } void Dbg::ResumeThread(JDWP::ObjectId threadId) { + ScopedJniThreadState ts(Thread::Current()); Object* peer = gRegistry->Get(threadId); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(peer); + Thread* thread = Thread::FromManagedThread(ts, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for resume: " << peer; return; @@ -2336,14 +2357,12 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object } void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { - Thread* self = Thread::Current(); + ScopedJniThreadState ts(Thread::Current()); // We can be called while an exception is pending. We need // to preserve that across the method invocation. - SirtRef old_exception(self->GetException()); - self->ClearException(); - - ScopedThreadStateChange tsc(self, kRunnable); + SirtRef old_exception(ts.Self()->GetException()); + ts.Self()->ClearException(); // Translate the method through the vtable, unless the debugger wants to suppress it. Method* m = pReq->method_; @@ -2359,15 +2378,15 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { CHECK_EQ(sizeof(jvalue), sizeof(uint64_t)); - LOG(INFO) << "self=" << self << " pReq->receiver_=" << pReq->receiver_ << " m=" << m << " #" << pReq->arg_count_ << " " << pReq->arg_values_; - pReq->result_value = InvokeWithJValues(self, pReq->receiver_, m, reinterpret_cast(pReq->arg_values_)); + LOG(INFO) << "self=" << ts.Self() << " pReq->receiver_=" << pReq->receiver_ << " m=" << m << " #" << pReq->arg_count_ << " " << pReq->arg_values_; + pReq->result_value = InvokeWithJValues(ts, pReq->receiver_, m, reinterpret_cast(pReq->arg_values_)); - pReq->exception = gRegistry->Add(self->GetException()); + pReq->exception = gRegistry->Add(ts.Self()->GetException()); pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty()); if (pReq->exception != 0) { - Object* exc = self->GetException(); + Object* exc = ts.Self()->GetException(); VLOG(jdwp) << " JDWP invocation returning with exception=" << exc << " " << PrettyTypeOf(exc); - self->ClearException(); + ts.Self()->ClearException(); pReq->result_value.SetJ(0); } else if (pReq->result_tag == JDWP::JT_OBJECT) { /* if no exception thrown, examine object result more closely */ @@ -2390,7 +2409,7 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { } if (old_exception.get() != NULL) { - self->SetException(old_exception.get()); + ts.Self()->SetException(old_exception.get()); } } @@ -2549,7 +2568,8 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf); } else { CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; - SirtRef name(t->GetThreadName()); + ScopedJniThreadState ts(Thread::Current()); + SirtRef name(t->GetThreadName(ts)); size_t char_count = (name.get() != NULL) ? name->GetLength() : 0; const jchar* chars = name->GetCharArray()->GetData(); diff --git a/src/exception_test.cc b/src/exception_test.cc index 90bcb7c6b7..269822a627 100644 --- a/src/exception_test.cc +++ b/src/exception_test.cc @@ -19,6 +19,7 @@ #include "dex_file.h" #include "gtest/gtest.h" #include "runtime.h" +#include "scoped_jni_thread_state.h" #include "thread.h" #include "UniquePtr.h" @@ -160,12 +161,13 @@ TEST_F(ExceptionTest, StackTraceElement) { #endif JNIEnv* env = thread->GetJniEnv(); - jobject internal = thread->CreateInternalStackTrace(env); + ScopedJniThreadState ts(env); + jobject internal = thread->CreateInternalStackTrace(ts); ASSERT_TRUE(internal != NULL); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal); ASSERT_TRUE(ste_array != NULL); ObjectArray* trace_array = - Decode*>(env, ste_array); + ts.Decode*>(ste_array); ASSERT_TRUE(trace_array != NULL); ASSERT_TRUE(trace_array->Get(0) != NULL); diff --git a/src/heap.cc b/src/heap.cc index 9fbfa32867..c6dfdf78a8 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -31,6 +31,7 @@ #include "object_utils.h" #include "os.h" #include "scoped_heap_lock.h" +#include "scoped_jni_thread_state.h" #include "scoped_thread_list_lock_releaser.h" #include "ScopedLocalRef.h" #include "space.h" @@ -935,20 +936,19 @@ Object* Heap::DequeuePendingReference(Object** list) { } void Heap::AddFinalizerReference(Thread* self, Object* object) { - ScopedThreadStateChange tsc(self, kRunnable); + ScopedJniThreadState ts(self); JValue args[1]; args[0].SetL(object); - DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args, NULL); + ts.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args, NULL); } void Heap::EnqueueClearedReferences(Object** cleared) { DCHECK(cleared != NULL); if (*cleared != NULL) { - Thread* self = Thread::Current(); - ScopedThreadStateChange tsc(self, kRunnable); + ScopedJniThreadState ts(Thread::Current()); JValue args[1]; args[0].SetL(*cleared); - DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(self, NULL, args, NULL); + ts.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(ts.Self(), NULL, args, NULL); *cleared = NULL; } } diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc index 3a4d3980b0..dfe83ff513 100644 --- a/src/jdwp/jdwp_main.cc +++ b/src/jdwp/jdwp_main.cc @@ -270,7 +270,7 @@ static void* StartJdwpThread(void* arg) { void JdwpState::Run() { Runtime* runtime = Runtime::Current(); - runtime->AttachCurrentThread("JDWP", true, Thread::GetSystemThreadGroup()); + runtime->AttachCurrentThread("JDWP", true, runtime->GetSystemThreadGroup()); VLOG(jdwp) << "JDWP: thread running"; diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc index f5e1d1e273..22b4b2cc20 100644 --- a/src/jni_compiler_test.cc +++ b/src/jni_compiler_test.cc @@ -536,10 +536,10 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { ScopedJniThreadState ts(env); // Build stack trace - jobject internal = Thread::Current()->CreateInternalStackTrace(env); + jobject internal = Thread::Current()->CreateInternalStackTrace(ts); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal); ObjectArray* trace_array = - Decode*>(env, ste_array); + ts.Decode*>(ste_array); EXPECT_TRUE(trace_array != NULL); EXPECT_EQ(11, trace_array->GetLength()); @@ -591,8 +591,9 @@ TEST_F(JniCompilerTest, ReturnGlobalRef) { jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) { // Add 10 local references + ScopedJniThreadState ts(env); for (int i = 0; i < 10; i++) { - AddLocalReference(env, Decode(env, thisObj)); + ts.AddLocalReference(ts.Decode(thisObj)); } return x+1; } diff --git a/src/jni_internal.cc b/src/jni_internal.cc index fa79a016e1..74b740a229 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -72,85 +72,6 @@ void SetJniGlobalsMax(size_t max) { } } -/* - * Add a local reference for an object to the current stack frame. When - * the native function returns, the reference will be discarded. - * - * We need to allow the same reference to be added multiple times. - * - * This will be called on otherwise unreferenced objects. We cannot do - * GC allocations here, and it's best if we don't grab a mutex. - * - * Returns the local reference (currently just the same pointer that was - * passed in), or NULL on failure. - */ -template -T AddLocalReference(JNIEnv* public_env, const Object* const_obj) { - // The jobject type hierarchy has no notion of const, so it's not worth carrying through. - Object* obj = const_cast(const_obj); - - if (obj == NULL) { - return NULL; - } - - DCHECK_NE((reinterpret_cast(obj) & 0xffff0000), 0xebad0000); - - JNIEnvExt* env = reinterpret_cast(public_env); - IndirectReferenceTable& locals = env->locals; - - uint32_t cookie = env->local_ref_cookie; - IndirectRef ref = locals.Add(cookie, obj); - -#if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on. - if (env->check_jni) { - size_t entry_count = locals.Capacity(); - if (entry_count > 16) { - LOG(WARNING) << "Warning: more than 16 JNI local references: " - << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n" - << Dumpable(locals); - // TODO: LOG(FATAL) in a later release? - } - } -#endif - - if (env->vm->work_around_app_jni_bugs) { - // Hand out direct pointers to support broken old apps. - return reinterpret_cast(obj); - } - - return reinterpret_cast(ref); -} -// Explicit instantiations -template jclass AddLocalReference(JNIEnv* public_env, const Object* const_obj); -template jobject AddLocalReference(JNIEnv* public_env, const Object* const_obj); -template jobjectArray AddLocalReference(JNIEnv* public_env, const Object* const_obj); -template jstring AddLocalReference(JNIEnv* public_env, const Object* const_obj); -template jthrowable AddLocalReference(JNIEnv* public_env, const Object* const_obj); - -// For external use. -template -T Decode(JNIEnv* public_env, jobject obj) { - JNIEnvExt* env = reinterpret_cast(public_env); - return reinterpret_cast(env->self->DecodeJObject(obj)); -} -// TODO: Change to use template when Mac OS build server no longer uses GCC 4.2.*. -Object* DecodeObj(JNIEnv* public_env, jobject obj) { - JNIEnvExt* env = reinterpret_cast(public_env); - return reinterpret_cast(env->self->DecodeJObject(obj)); -} -// Explicit instantiations. -template Array* Decode(JNIEnv*, jobject); -template Class* Decode(JNIEnv*, jobject); -template ClassLoader* Decode(JNIEnv*, jobject); -template Object* Decode(JNIEnv*, jobject); -template ObjectArray* Decode*>(JNIEnv*, jobject); -template ObjectArray >* Decode >*>(JNIEnv*, jobject); -template ObjectArray* Decode*>(JNIEnv*, jobject); -template ObjectArray* Decode*>(JNIEnv*, jobject); -template ObjectArray* Decode*>(JNIEnv*, jobject); -template String* Decode(JNIEnv*, jobject); -template Throwable* Decode(JNIEnv*, jobject); - size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) { size_t num_bytes = 0; for (size_t i = 1; i < shorty_len; ++i) { @@ -186,8 +107,7 @@ class ArgArray { return arg_array_; } - void BuildArgArray(JNIEnv* public_env, va_list ap) { - JNIEnvExt* env = reinterpret_cast(public_env); + void BuildArgArray(const ScopedJniThreadState& ts, va_list ap) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -209,7 +129,7 @@ class ArgArray { arg_array_[offset].SetF(va_arg(ap, jdouble)); break; case 'L': - arg_array_[offset].SetL(DecodeObj(env, va_arg(ap, jobject))); + arg_array_[offset].SetL(ts.Decode(va_arg(ap, jobject))); break; case 'D': arg_array_[offset].SetD(va_arg(ap, jdouble)); @@ -221,8 +141,7 @@ class ArgArray { } } - void BuildArgArray(JNIEnv* public_env, jvalue* args) { - JNIEnvExt* env = reinterpret_cast(public_env); + void BuildArgArray(const ScopedJniThreadState& ts, jvalue* args) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -244,7 +163,7 @@ class ArgArray { arg_array_[offset].SetF(args[offset].f); break; case 'L': - arg_array_[offset].SetL(DecodeObj(env, args[offset].l)); + arg_array_[offset].SetL(ts.Decode(args[offset].l)); break; case 'D': arg_array_[offset].SetD(args[offset].d); @@ -276,12 +195,6 @@ static jweak AddWeakGlobalReference(ScopedJniThreadState& ts, Object* obj) { return reinterpret_cast(ref); } -// For internal use. -template -static T Decode(ScopedJniThreadState& ts, jobject obj) { - return reinterpret_cast(ts.Self()->DecodeJObject(obj)); -} - static void CheckMethodArguments(Method* m, JValue* args) { MethodHelper mh(m); ObjectArray* parameter_types = mh.GetParameterTypes(); @@ -306,47 +219,45 @@ static void CheckMethodArguments(Method* m, JValue* args) { } } -static JValue InvokeWithArgArray(JNIEnv* public_env, Object* receiver, Method* method, JValue* args) { - JNIEnvExt* env = reinterpret_cast(public_env); - if (UNLIKELY(env->check_jni)) { +static JValue InvokeWithArgArray(const ScopedJniThreadState& ts, Object* receiver, Method* method, + JValue* args) { + if (UNLIKELY(ts.Env()->check_jni)) { CheckMethodArguments(method, args); } JValue result; - method->Invoke(env->self, receiver, args, &result); + method->Invoke(ts.Self(), receiver, args, &result); return result; } -static JValue InvokeWithVarArgs(JNIEnv* public_env, jobject obj, jmethodID mid, va_list args) { - JNIEnvExt* env = reinterpret_cast(public_env); - Object* receiver = DecodeObj(env, obj); - Method* method = DecodeMethod(mid); +static JValue InvokeWithVarArgs(const ScopedJniThreadState& ts, jobject obj, jmethodID mid, + va_list args) { + Object* receiver = ts.Decode(obj); + Method* method = ts.DecodeMethod(mid); ArgArray arg_array(method); - arg_array.BuildArgArray(env, args); - return InvokeWithArgArray(env, receiver, method, arg_array.get()); + arg_array.BuildArgArray(ts, args); + return InvokeWithArgArray(ts, receiver, method, arg_array.get()); } static Method* FindVirtualMethod(Object* receiver, Method* method) { return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method); } -static JValue InvokeVirtualOrInterfaceWithJValues(JNIEnv* public_env, jobject obj, jmethodID mid, - jvalue* args) { - JNIEnvExt* env = reinterpret_cast(public_env); - Object* receiver = DecodeObj(env, obj); - Method* method = FindVirtualMethod(receiver, DecodeMethod(mid)); +static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedJniThreadState& ts, jobject obj, + jmethodID mid, jvalue* args) { + Object* receiver = ts.Decode(obj); + Method* method = FindVirtualMethod(receiver, ts.DecodeMethod(mid)); ArgArray arg_array(method); - arg_array.BuildArgArray(env, args); - return InvokeWithArgArray(env, receiver, method, arg_array.get()); + arg_array.BuildArgArray(ts, args); + return InvokeWithArgArray(ts, receiver, method, arg_array.get()); } -static JValue InvokeVirtualOrInterfaceWithVarArgs(JNIEnv* public_env, jobject obj, jmethodID mid, - va_list args) { - JNIEnvExt* env = reinterpret_cast(public_env); - Object* receiver = DecodeObj(env, obj); - Method* method = FindVirtualMethod(receiver, DecodeMethod(mid)); +static JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedJniThreadState& ts, jobject obj, + jmethodID mid, va_list args) { + Object* receiver = ts.Decode(obj); + Method* method = FindVirtualMethod(receiver, ts.DecodeMethod(mid)); ArgArray arg_array(method); - arg_array.BuildArgArray(env, args); - return InvokeWithArgArray(env, receiver, method, arg_array.get()); + arg_array.BuildArgArray(ts, args); + return InvokeWithArgArray(ts, receiver, method, arg_array.get()); } // Section 12.3.2 of the JNI spec describes JNI class descriptors. They're @@ -379,7 +290,7 @@ static void ThrowNoSuchMethodError(ScopedJniThreadState& ts, Class* c, const cha } static jmethodID FindMethodID(ScopedJniThreadState& ts, jclass jni_class, const char* name, const char* sig, bool is_static) { - Class* c = Decode(ts, jni_class); + Class* c = ts.Decode(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -401,10 +312,10 @@ static jmethodID FindMethodID(ScopedJniThreadState& ts, jclass jni_class, const return NULL; } - return EncodeMethod(method); + return ts.EncodeMethod(method); } -static const ClassLoader* GetClassLoader(Thread* self) { +static ClassLoader* GetClassLoader(Thread* self) { Method* method = self->GetCurrentMethod(); if (method == NULL || PrettyMethod(method, false) == "java.lang.Runtime.nativeLoad") { return self->GetClassLoaderOverride(); @@ -412,8 +323,9 @@ static const ClassLoader* GetClassLoader(Thread* self) { return method->GetDeclaringClass()->GetClassLoader(); } -static jfieldID FindFieldID(ScopedJniThreadState& ts, jclass jni_class, const char* name, const char* sig, bool is_static) { - Class* c = Decode(ts, jni_class); +static jfieldID FindFieldID(const ScopedJniThreadState& ts, jclass jni_class, const char* name, + const char* sig, bool is_static) { + Class* c = ts.Decode(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -422,7 +334,7 @@ static jfieldID FindFieldID(ScopedJniThreadState& ts, jclass jni_class, const ch Class* field_type; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); if (sig[1] != '\0') { - const ClassLoader* cl = GetClassLoader(ts.Self()); + ClassLoader* cl = GetClassLoader(ts.Self()); field_type = class_linker->FindClass(sig, cl); } else { field_type = class_linker->FindPrimitiveClass(*sig); @@ -447,31 +359,31 @@ static jfieldID FindFieldID(ScopedJniThreadState& ts, jclass jni_class, const ch name, ClassHelper(c).GetDescriptor()); return NULL; } - return EncodeField(field); + return ts.EncodeField(field); } -static void PinPrimitiveArray(ScopedJniThreadState& ts, const Array* array) { +static void PinPrimitiveArray(const ScopedJniThreadState& ts, const Array* array) { JavaVMExt* vm = ts.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Add(array); } -static void UnpinPrimitiveArray(ScopedJniThreadState& ts, const Array* array) { +static void UnpinPrimitiveArray(const ScopedJniThreadState& ts, const Array* array) { JavaVMExt* vm = ts.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Remove(array); } template -static JniT NewPrimitiveArray(ScopedJniThreadState& ts, jsize length) { +static JniT NewPrimitiveArray(const ScopedJniThreadState& ts, jsize length) { CHECK_GE(length, 0); // TODO: ReportJniError ArtT* result = ArtT::Alloc(length); - return AddLocalReference(ts.Env(), result); + return ts.AddLocalReference(result); } template static CArrayT GetPrimitiveArray(ScopedJniThreadState& ts, ArrayT java_array, jboolean* is_copy) { - ArtArrayT* array = Decode(ts, java_array); + ArtArrayT* array = ts.Decode(java_array); PinPrimitiveArray(ts, array); if (is_copy != NULL) { *is_copy = JNI_FALSE; @@ -482,7 +394,7 @@ static CArrayT GetPrimitiveArray(ScopedJniThreadState& ts, ArrayT java_array, jb template static void ReleasePrimitiveArray(ScopedJniThreadState& ts, ArrayT java_array, jint mode) { if (mode != JNI_COMMIT) { - Array* array = Decode(ts, java_array); + Array* array = ts.Decode(java_array); UnpinPrimitiveArray(ts, array); } } @@ -501,7 +413,7 @@ static void ThrowSIOOBE(ScopedJniThreadState& ts, jsize start, jsize length, jsi template static void GetPrimitiveArrayRegion(ScopedJniThreadState& ts, JavaArrayT java_array, jsize start, jsize length, JavaT* buf) { - ArrayT* array = Decode(ts, java_array); + ArrayT* array = ts.Decode(java_array); if (start < 0 || length < 0 || start + length > array->GetLength()) { ThrowAIOOBE(ts, array, start, length, "src"); } else { @@ -512,7 +424,7 @@ static void GetPrimitiveArrayRegion(ScopedJniThreadState& ts, JavaArrayT java_ar template static void SetPrimitiveArrayRegion(ScopedJniThreadState& ts, JavaArrayT java_array, jsize start, jsize length, const JavaT* buf) { - ArrayT* array = Decode(ts, java_array); + ArrayT* array = ts.Decode(java_array); if (start < 0 || length < 0 || start + length > array->GetLength()) { ThrowAIOOBE(ts, array, start, length, "dst"); } else { @@ -548,7 +460,8 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj } jmethodID mid = env->GetMethodID(exception_class, "", signature); if (mid == NULL) { - LOG(ERROR) << "No " << signature << " in " << PrettyClass(Decode(env, exception_class)); + LOG(ERROR) << "No " << signature << " in " + << PrettyClass(ts.Decode(exception_class)); return JNI_ERR; } @@ -557,7 +470,7 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj return JNI_ERR; } - ts.Self()->SetException(Decode(ts, exception.get())); + ts.Self()->SetException(ts.Decode(exception.get())); return JNI_OK; } @@ -584,11 +497,11 @@ static jint JII_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* raw_args, JavaVMAttachArgs* args = static_cast(raw_args); const char* thread_name = NULL; - Object* thread_group = NULL; + jobject thread_group = NULL; if (args != NULL) { CHECK_GE(args->version, JNI_VERSION_1_2); thread_name = args->name; - thread_group = static_cast(NULL)->DecodeJObject(args->group); + thread_group = args->group; } runtime->AttachCurrentThread(thread_name, as_daemon, thread_group); @@ -754,17 +667,16 @@ class Libraries { SafeMap libraries_; }; -JValue InvokeWithJValues(JNIEnv* public_env, jobject obj, jmethodID mid, jvalue* args) { - JNIEnvExt* env = reinterpret_cast(public_env); - Object* receiver = Decode(env, obj); - Method* method = DecodeMethod(mid); +JValue InvokeWithJValues(const ScopedJniThreadState& ts, jobject obj, jmethodID mid, jvalue* args) { + Object* receiver = ts.Decode(obj); + Method* method = ts.DecodeMethod(mid); ArgArray arg_array(method); - arg_array.BuildArgArray(env, args); - return InvokeWithArgArray(env, receiver, method, arg_array.get()); + arg_array.BuildArgArray(ts, args); + return InvokeWithArgArray(ts, receiver, method, arg_array.get()); } -JValue InvokeWithJValues(Thread* self, Object* receiver, Method* m, JValue* args) { - return InvokeWithArgArray(self->GetJniEnv(), receiver, m, args); +JValue InvokeWithJValues(const ScopedJniThreadState& ts, Object* receiver, Method* m, JValue* args) { + return InvokeWithArgArray(ts, receiver, m, args); } class JNI { @@ -787,54 +699,54 @@ class JNI { std::string descriptor(NormalizeJniClassDescriptor(name)); Class* c = NULL; if (runtime->IsStarted()) { - const ClassLoader* cl = GetClassLoader(ts.Self()); + ClassLoader* cl = GetClassLoader(ts.Self()); c = class_linker->FindClass(descriptor.c_str(), cl); } else { c = class_linker->FindSystemClass(descriptor.c_str()); } - return AddLocalReference(env, c); + return ts.AddLocalReference(c); } static jmethodID FromReflectedMethod(JNIEnv* env, jobject java_method) { ScopedJniThreadState ts(env); - Method* method = Decode(ts, java_method); - return EncodeMethod(method); + Method* method = ts.Decode(java_method); + return ts.EncodeMethod(method); } static jfieldID FromReflectedField(JNIEnv* env, jobject java_field) { ScopedJniThreadState ts(env); - Field* field = Decode(ts, java_field); - return EncodeField(field); + Field* field = ts.Decode(java_field); + return ts.EncodeField(field); } static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) { ScopedJniThreadState ts(env); - Method* method = DecodeMethod(mid); - return AddLocalReference(env, method); + Method* method = ts.DecodeMethod(mid); + return ts.AddLocalReference(method); } static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) { ScopedJniThreadState ts(env); - Field* field = DecodeField(fid); - return AddLocalReference(env, field); + Field* field = ts.DecodeField(fid); + return ts.AddLocalReference(field); } static jclass GetObjectClass(JNIEnv* env, jobject java_object) { ScopedJniThreadState ts(env); - Object* o = Decode(ts, java_object); - return AddLocalReference(env, o->GetClass()); + Object* o = ts.Decode(java_object); + return ts.AddLocalReference(o->GetClass()); } static jclass GetSuperclass(JNIEnv* env, jclass java_class) { ScopedJniThreadState ts(env); - Class* c = Decode(ts, java_class); - return AddLocalReference(env, c->GetSuperClass()); + Class* c = ts.Decode(java_class); + return ts.AddLocalReference(c->GetSuperClass()); } static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) { ScopedJniThreadState ts(env); - Class* c1 = Decode(ts, java_class1); - Class* c2 = Decode(ts, java_class2); + Class* c1 = ts.Decode(java_class1); + Class* c2 = ts.Decode(java_class2); return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE; } @@ -845,15 +757,15 @@ class JNI { // Note: JNI is different from regular Java instanceof in this respect return JNI_TRUE; } else { - Object* obj = Decode(ts, jobj); - Class* c = Decode(ts, java_class); + Object* obj = ts.Decode(jobj); + Class* c = ts.Decode(java_class); return obj->InstanceOf(c) ? JNI_TRUE : JNI_FALSE; } } static jint Throw(JNIEnv* env, jthrowable java_exception) { ScopedJniThreadState ts(env); - Throwable* exception = Decode(ts, java_exception); + Throwable* exception = ts.Decode(java_exception); if (exception == NULL) { return JNI_ERR; } @@ -882,7 +794,7 @@ class JNI { Throwable* original_exception = self->GetException(); self->ClearException(); - ScopedLocalRef exception(env, AddLocalReference(env, original_exception)); + ScopedLocalRef exception(env, ts.AddLocalReference(original_exception)); ScopedLocalRef exception_class(env, env->GetObjectClass(exception.get())); jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V"); if (mid == NULL) { @@ -903,7 +815,7 @@ class JNI { static jthrowable ExceptionOccurred(JNIEnv* env) { ScopedJniThreadState ts(env); Object* exception = ts.Self()->GetException(); - return (exception != NULL) ? AddLocalReference(env, exception) : NULL; + return ts.AddLocalReference(exception); } static void FatalError(JNIEnv* env, const char* msg) { @@ -922,9 +834,9 @@ class JNI { static jobject PopLocalFrame(JNIEnv* env, jobject java_survivor) { ScopedJniThreadState ts(env); - Object* survivor = Decode(ts, java_survivor); + Object* survivor = ts.Decode(java_survivor); ts.Env()->PopFrame(); - return AddLocalReference(env, survivor); + return ts.AddLocalReference(survivor); } static jint EnsureLocalCapacity(JNIEnv* env, jint desired_capacity) { @@ -932,7 +844,7 @@ class JNI { return EnsureLocalCapacity(ts, desired_capacity, "EnsureLocalCapacity"); } - static jint EnsureLocalCapacity(ScopedJniThreadState& ts, jint desired_capacity, const char* caller) { + static jint EnsureLocalCapacity(const ScopedJniThreadState& ts, jint desired_capacity, const char* caller) { // TODO: we should try to expand the table if necessary. if (desired_capacity < 1 || desired_capacity > static_cast(kLocalsMax)) { LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; @@ -956,7 +868,7 @@ class JNI { JavaVMExt* vm = ts.Vm(); IndirectReferenceTable& globals = vm->globals; MutexLock mu(vm->globals_lock); - IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, Decode(ts, obj)); + IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, ts.Decode(obj)); return reinterpret_cast(ref); } @@ -978,7 +890,7 @@ class JNI { static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) { ScopedJniThreadState ts(env); - return AddWeakGlobalReference(ts, Decode(ts, obj)); + return AddWeakGlobalReference(ts, ts.Decode(obj)); } static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) { @@ -1006,7 +918,7 @@ class JNI { IndirectReferenceTable& locals = ts.Env()->locals; uint32_t cookie = ts.Env()->local_ref_cookie; - IndirectRef ref = locals.Add(cookie, Decode(ts, obj)); + IndirectRef ref = locals.Add(cookie, ts.Decode(obj)); return reinterpret_cast(ref); } @@ -1032,17 +944,17 @@ class JNI { static jboolean IsSameObject(JNIEnv* env, jobject obj1, jobject obj2) { ScopedJniThreadState ts(env); - return (Decode(ts, obj1) == Decode(ts, obj2)) + return (ts.Decode(obj1) == ts.Decode(obj2)) ? JNI_TRUE : JNI_FALSE; } static jobject AllocObject(JNIEnv* env, jclass java_class) { ScopedJniThreadState ts(env); - Class* c = Decode(ts, java_class); + Class* c = ts.Decode(java_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } - return AddLocalReference(env, c->AllocObject()); + return ts.AddLocalReference(c->AllocObject()); } static jobject NewObject(JNIEnv* env, jclass c, jmethodID mid, ...) { @@ -1056,7 +968,7 @@ class JNI { static jobject NewObjectV(JNIEnv* env, jclass java_class, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - Class* c = Decode(ts, java_class); + Class* c = ts.Decode(java_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -1064,7 +976,7 @@ class JNI { if (result == NULL) { return NULL; } - jobject local_result = AddLocalReference(env, result); + jobject local_result = ts.AddLocalReference(result); CallNonvirtualVoidMethodV(env, local_result, java_class, mid, args); if (!ts.Self()->IsExceptionPending()) { return local_result; @@ -1075,7 +987,7 @@ class JNI { static jobject NewObjectA(JNIEnv* env, jclass java_class, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - Class* c = Decode(ts, java_class); + Class* c = ts.Decode(java_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -1083,7 +995,7 @@ class JNI { if (result == NULL) { return NULL; } - jobject local_result = AddLocalReference(env, result); + jobject local_result = ts.AddLocalReference(result); CallNonvirtualVoidMethodA(env, local_result, java_class, mid, args); if (!ts.Self()->IsExceptionPending()) { return local_result; @@ -1106,199 +1018,199 @@ class JNI { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); - return AddLocalReference(env, result.GetL()); + return ts.AddLocalReference(result.GetL()); } static jobject CallObjectMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args)); - return AddLocalReference(env, result.GetL()); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args)); + return ts.AddLocalReference(result.GetL()); } static jobject CallObjectMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - JValue result(InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args)); - return AddLocalReference(env, result.GetL()); + JValue result(InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args)); + return ts.AddLocalReference(result.GetL()); } static jboolean CallBooleanMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetZ(); } static jboolean CallBooleanMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetZ(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetZ(); } static jboolean CallBooleanMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetZ(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetZ(); } static jbyte CallByteMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetB(); } static jbyte CallByteMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetB(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetB(); } static jbyte CallByteMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetB(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetB(); } static jchar CallCharMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetC(); } static jchar CallCharMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetC(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetC(); } static jchar CallCharMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetC(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetC(); } static jdouble CallDoubleMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetD(); } static jdouble CallDoubleMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetD(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetD(); } static jdouble CallDoubleMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetD(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetD(); } static jfloat CallFloatMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetF(); } static jfloat CallFloatMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetF(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetF(); } static jfloat CallFloatMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetF(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetF(); } static jint CallIntMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetI(); } static jint CallIntMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetI(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetI(); } static jint CallIntMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetI(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetI(); } static jlong CallLongMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetJ(); } static jlong CallLongMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetJ(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetJ(); } static jlong CallLongMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetJ(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetJ(); } static jshort CallShortMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetS(); } static jshort CallShortMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args).GetS(); + return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetS(); } static jshort CallShortMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args).GetS(); + return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetS(); } static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); va_end(ap); } static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - InvokeVirtualOrInterfaceWithVarArgs(env, obj, mid, args); + InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args); } static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - InvokeVirtualOrInterfaceWithJValues(env, obj, mid, args); + InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args); } static jobject CallNonvirtualObjectMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); - jobject local_result = AddLocalReference(env, result.GetL()); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + jobject local_result = ts.AddLocalReference(result.GetL()); va_end(ap); return local_result; } @@ -1306,15 +1218,15 @@ class JNI { static jobject CallNonvirtualObjectMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - JValue result(InvokeWithVarArgs(env, obj, mid, args)); - return AddLocalReference(env, result.GetL()); + JValue result(InvokeWithVarArgs(ts, obj, mid, args)); + return ts.AddLocalReference(result.GetL()); } static jobject CallNonvirtualObjectMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - JValue result(InvokeWithJValues(env, obj, mid, args)); - return AddLocalReference(env, result.GetL()); + JValue result(InvokeWithJValues(ts, obj, mid, args)); + return ts.AddLocalReference(result.GetL()); } static jboolean CallNonvirtualBooleanMethod(JNIEnv* env, @@ -1322,7 +1234,7 @@ class JNI { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetZ(); } @@ -1330,20 +1242,20 @@ class JNI { static jboolean CallNonvirtualBooleanMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetZ(); + return InvokeWithVarArgs(ts, obj, mid, args).GetZ(); } static jboolean CallNonvirtualBooleanMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetZ(); + return InvokeWithJValues(ts, obj, mid, args).GetZ(); } static jbyte CallNonvirtualByteMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetB(); } @@ -1351,20 +1263,20 @@ class JNI { static jbyte CallNonvirtualByteMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetB(); + return InvokeWithVarArgs(ts, obj, mid, args).GetB(); } static jbyte CallNonvirtualByteMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetB(); + return InvokeWithJValues(ts, obj, mid, args).GetB(); } static jchar CallNonvirtualCharMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetC(); } @@ -1372,20 +1284,20 @@ class JNI { static jchar CallNonvirtualCharMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetC(); + return InvokeWithVarArgs(ts, obj, mid, args).GetC(); } static jchar CallNonvirtualCharMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetC(); + return InvokeWithJValues(ts, obj, mid, args).GetC(); } static jshort CallNonvirtualShortMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetS(); } @@ -1393,20 +1305,20 @@ class JNI { static jshort CallNonvirtualShortMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetS(); + return InvokeWithVarArgs(ts, obj, mid, args).GetS(); } static jshort CallNonvirtualShortMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetS(); + return InvokeWithJValues(ts, obj, mid, args).GetS(); } static jint CallNonvirtualIntMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetI(); } @@ -1414,20 +1326,20 @@ class JNI { static jint CallNonvirtualIntMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetI(); + return InvokeWithVarArgs(ts, obj, mid, args).GetI(); } static jint CallNonvirtualIntMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetI(); + return InvokeWithJValues(ts, obj, mid, args).GetI(); } static jlong CallNonvirtualLongMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetJ(); } @@ -1435,20 +1347,20 @@ class JNI { static jlong CallNonvirtualLongMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetJ(); + return InvokeWithVarArgs(ts, obj, mid, args).GetJ(); } static jlong CallNonvirtualLongMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetJ(); + return InvokeWithJValues(ts, obj, mid, args).GetJ(); } static jfloat CallNonvirtualFloatMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetF(); } @@ -1456,20 +1368,20 @@ class JNI { static jfloat CallNonvirtualFloatMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetF(); + return InvokeWithVarArgs(ts, obj, mid, args).GetF(); } static jfloat CallNonvirtualFloatMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetF(); + return InvokeWithJValues(ts, obj, mid, args).GetF(); } static jdouble CallNonvirtualDoubleMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, obj, mid, ap)); + JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); va_end(ap); return result.GetD(); } @@ -1477,33 +1389,33 @@ class JNI { static jdouble CallNonvirtualDoubleMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, obj, mid, args).GetD(); + return InvokeWithVarArgs(ts, obj, mid, args).GetD(); } static jdouble CallNonvirtualDoubleMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, obj, mid, args).GetD(); + return InvokeWithJValues(ts, obj, mid, args).GetD(); } static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - InvokeWithVarArgs(env, obj, mid, ap); + InvokeWithVarArgs(ts, obj, mid, ap); va_end(ap); } static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - InvokeWithVarArgs(env, obj, mid, args); + InvokeWithVarArgs(ts, obj, mid, args); } static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - InvokeWithJValues(env, obj, mid, args); + InvokeWithJValues(ts, obj, mid, args); } static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) { @@ -1519,42 +1431,42 @@ class JNI { static jobject GetObjectField(JNIEnv* env, jobject obj, jfieldID fid) { ScopedJniThreadState ts(env); - Object* o = Decode(ts, obj); - Field* f = DecodeField(fid); - return AddLocalReference(env, f->GetObject(o)); + Object* o = ts.Decode(obj); + Field* f = ts.DecodeField(fid); + return ts.AddLocalReference(f->GetObject(o)); } static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) { ScopedJniThreadState ts(env); - Field* f = DecodeField(fid); - return AddLocalReference(env, f->GetObject(NULL)); + Field* f = ts.DecodeField(fid); + return ts.AddLocalReference(f->GetObject(NULL)); } static void SetObjectField(JNIEnv* env, jobject java_object, jfieldID fid, jobject java_value) { ScopedJniThreadState ts(env); - Object* o = Decode(ts, java_object); - Object* v = Decode(ts, java_value); - Field* f = DecodeField(fid); + Object* o = ts.Decode(java_object); + Object* v = ts.Decode(java_value); + Field* f = ts.DecodeField(fid); f->SetObject(o, v); } static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) { ScopedJniThreadState ts(env); - Object* v = Decode(ts, java_value); - Field* f = DecodeField(fid); + Object* v = ts.Decode(java_value); + Field* f = ts.DecodeField(fid); f->SetObject(NULL, v); } #define GET_PRIMITIVE_FIELD(fn, instance) \ ScopedJniThreadState ts(env); \ - Object* o = Decode(ts, instance); \ - Field* f = DecodeField(fid); \ + Object* o = ts.Decode(instance); \ + Field* f = ts.DecodeField(fid); \ return f->fn(o) #define SET_PRIMITIVE_FIELD(fn, instance, value) \ ScopedJniThreadState ts(env); \ - Object* o = Decode(ts, instance); \ - Field* f = DecodeField(fid); \ + Object* o = ts.Decode(instance); \ + Field* f = ts.DecodeField(fid); \ f->fn(o, value) static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) { @@ -1689,222 +1601,222 @@ class JNI { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); - jobject local_result = AddLocalReference(env, result.GetL()); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + jobject local_result = ts.AddLocalReference(result.GetL()); va_end(ap); return local_result; } static jobject CallStaticObjectMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - JValue result(InvokeWithVarArgs(env, NULL, mid, args)); - return AddLocalReference(env, result.GetL()); + JValue result(InvokeWithVarArgs(ts, NULL, mid, args)); + return ts.AddLocalReference(result.GetL()); } static jobject CallStaticObjectMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - JValue result(InvokeWithJValues(env, NULL, mid, args)); - return AddLocalReference(env, result.GetL()); + JValue result(InvokeWithJValues(ts, NULL, mid, args)); + return ts.AddLocalReference(result.GetL()); } static jboolean CallStaticBooleanMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetZ(); } static jboolean CallStaticBooleanMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetZ(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetZ(); } static jboolean CallStaticBooleanMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetZ(); + return InvokeWithJValues(ts, NULL, mid, args).GetZ(); } static jbyte CallStaticByteMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetB(); } static jbyte CallStaticByteMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetB(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetB(); } static jbyte CallStaticByteMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetB(); + return InvokeWithJValues(ts, NULL, mid, args).GetB(); } static jchar CallStaticCharMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetC(); } static jchar CallStaticCharMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetC(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetC(); } static jchar CallStaticCharMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetC(); + return InvokeWithJValues(ts, NULL, mid, args).GetC(); } static jshort CallStaticShortMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetS(); } static jshort CallStaticShortMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetS(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetS(); } static jshort CallStaticShortMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetS(); + return InvokeWithJValues(ts, NULL, mid, args).GetS(); } static jint CallStaticIntMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetI(); } static jint CallStaticIntMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetI(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetI(); } static jint CallStaticIntMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetI(); + return InvokeWithJValues(ts, NULL, mid, args).GetI(); } static jlong CallStaticLongMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetJ(); } static jlong CallStaticLongMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetJ(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetJ(); } static jlong CallStaticLongMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetJ(); + return InvokeWithJValues(ts, NULL, mid, args).GetJ(); } static jfloat CallStaticFloatMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetF(); } static jfloat CallStaticFloatMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetF(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetF(); } static jfloat CallStaticFloatMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetF(); + return InvokeWithJValues(ts, NULL, mid, args).GetF(); } static jdouble CallStaticDoubleMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(env, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); va_end(ap); return result.GetD(); } static jdouble CallStaticDoubleMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - return InvokeWithVarArgs(env, NULL, mid, args).GetD(); + return InvokeWithVarArgs(ts, NULL, mid, args).GetD(); } static jdouble CallStaticDoubleMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - return InvokeWithJValues(env, NULL, mid, args).GetD(); + return InvokeWithJValues(ts, NULL, mid, args).GetD(); } static void CallStaticVoidMethod(JNIEnv* env, jclass, jmethodID mid, ...) { ScopedJniThreadState ts(env); va_list ap; va_start(ap, mid); - InvokeWithVarArgs(env, NULL, mid, ap); + InvokeWithVarArgs(ts, NULL, mid, ap); va_end(ap); } static void CallStaticVoidMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { ScopedJniThreadState ts(env); - InvokeWithVarArgs(env, NULL, mid, args); + InvokeWithVarArgs(ts, NULL, mid, args); } static void CallStaticVoidMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { ScopedJniThreadState ts(env); - InvokeWithJValues(env, NULL, mid, args); + InvokeWithJValues(ts, NULL, mid, args); } static jstring NewString(JNIEnv* env, const jchar* chars, jsize char_count) { ScopedJniThreadState ts(env); String* result = String::AllocFromUtf16(char_count, chars); - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static jstring NewStringUTF(JNIEnv* env, const char* utf) { - ScopedJniThreadState ts(env); if (utf == NULL) { return NULL; } + ScopedJniThreadState ts(env); String* result = String::AllocFromModifiedUtf8(utf); - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static jsize GetStringLength(JNIEnv* env, jstring java_string) { ScopedJniThreadState ts(env); - return Decode(ts, java_string)->GetLength(); + return ts.Decode(java_string)->GetLength(); } static jsize GetStringUTFLength(JNIEnv* env, jstring java_string) { ScopedJniThreadState ts(env); - return Decode(ts, java_string)->GetUtfLength(); + return ts.Decode(java_string)->GetUtfLength(); } static void GetStringRegion(JNIEnv* env, jstring java_string, jsize start, jsize length, jchar* buf) { ScopedJniThreadState ts(env); - String* s = Decode(ts, java_string); + String* s = ts.Decode(java_string); if (start < 0 || length < 0 || start + length > s->GetLength()) { ThrowSIOOBE(ts, start, length, s->GetLength()); } else { @@ -1915,7 +1827,7 @@ class JNI { static void GetStringUTFRegion(JNIEnv* env, jstring java_string, jsize start, jsize length, char* buf) { ScopedJniThreadState ts(env); - String* s = Decode(ts, java_string); + String* s = ts.Decode(java_string); if (start < 0 || length < 0 || start + length > s->GetLength()) { ThrowSIOOBE(ts, start, length, s->GetLength()); } else { @@ -1926,7 +1838,7 @@ class JNI { static const jchar* GetStringChars(JNIEnv* env, jstring java_string, jboolean* is_copy) { ScopedJniThreadState ts(env); - String* s = Decode(ts, java_string); + String* s = ts.Decode(java_string); const CharArray* chars = s->GetCharArray(); PinPrimitiveArray(ts, chars); if (is_copy != NULL) { @@ -1937,7 +1849,7 @@ class JNI { static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar*) { ScopedJniThreadState ts(env); - UnpinPrimitiveArray(ts, Decode(ts, java_string)->GetCharArray()); + UnpinPrimitiveArray(ts, ts.Decode(java_string)->GetCharArray()); } static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* is_copy) { @@ -1958,7 +1870,7 @@ class JNI { if (is_copy != NULL) { *is_copy = JNI_TRUE; } - String* s = Decode(ts, java_string); + String* s = ts.Decode(java_string); size_t byte_count = s->GetUtfLength(); char* bytes = new char[byte_count + 1]; CHECK(bytes != NULL); // bionic aborts anyway. @@ -1975,7 +1887,7 @@ class JNI { static jsize GetArrayLength(JNIEnv* env, jarray java_array) { ScopedJniThreadState ts(env); - Object* obj = Decode(ts, java_array); + Object* obj = ts.Decode(java_array); CHECK(obj->IsArrayInstance()); // TODO: ReportJniError Array* array = obj->AsArray(); return array->GetLength(); @@ -1983,15 +1895,15 @@ class JNI { static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index) { ScopedJniThreadState ts(env); - ObjectArray* array = Decode*>(ts, java_array); - return AddLocalReference(env, array->Get(index)); + ObjectArray* array = ts.Decode*>(java_array); + return ts.AddLocalReference(array->Get(index)); } static void SetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index, jobject java_value) { ScopedJniThreadState ts(env); - ObjectArray* array = Decode*>(ts, java_array); - Object* value = Decode(ts, java_value); + ObjectArray* array = ts.Decode*>(java_array); + Object* value = ts.Decode(java_value); array->Set(index, value); } @@ -2035,7 +1947,7 @@ class JNI { CHECK_GE(length, 0); // TODO: ReportJniError // Compute the array class corresponding to the given element class. - Class* element_class = Decode(ts, element_jclass); + Class* element_class = ts.Decode(element_jclass); std::string descriptor; descriptor += "["; descriptor += ClassHelper(element_class).GetDescriptor(); @@ -2047,15 +1959,15 @@ class JNI { } // Allocate and initialize if necessary. - Class* array_class = Decode(ts, java_array_class.get()); + Class* array_class = ts.Decode(java_array_class.get()); ObjectArray* result = ObjectArray::Alloc(array_class, length); if (initial_element != NULL) { - Object* initial_object = Decode(ts, initial_element); + Object* initial_object = ts.Decode(initial_element); for (jsize i = 0; i < length; ++i) { result->Set(i, initial_object); } } - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static jshortArray NewShortArray(JNIEnv* env, jsize length) { @@ -2065,7 +1977,7 @@ class JNI { static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray java_array, jboolean* is_copy) { ScopedJniThreadState ts(env); - Array* array = Decode(ts, java_array); + Array* array = ts.Decode(java_array); PinPrimitiveArray(ts, array); if (is_copy != NULL) { *is_copy = JNI_FALSE; @@ -2240,7 +2152,7 @@ class JNI { static jint RegisterNatives(JNIEnv* env, jclass java_class, const JNINativeMethod* methods, jint method_count) { ScopedJniThreadState ts(env); - Class* c = Decode(ts, java_class); + Class* c = ts.Decode(java_class); for (int i = 0; i < method_count; i++) { const char* name = methods[i].name; @@ -2274,7 +2186,7 @@ class JNI { static jint UnregisterNatives(JNIEnv* env, jclass java_class) { ScopedJniThreadState ts(env); - Class* c = Decode(ts, java_class); + Class* c = ts.Decode(java_class); VLOG(jni) << "[Unregistering JNI native methods for " << PrettyClass(c) << "]"; @@ -2296,7 +2208,7 @@ class JNI { static jint MonitorEnter(JNIEnv* env, jobject java_object) { ScopedJniThreadState ts(env); - Object* o = Decode(ts, java_object); + Object* o = ts.Decode(java_object); o->MonitorEnter(ts.Self()); if (ts.Self()->IsExceptionPending()) { return JNI_ERR; @@ -2307,7 +2219,7 @@ class JNI { static jint MonitorExit(JNIEnv* env, jobject java_object) { ScopedJniThreadState ts(env); - Object* o = Decode(ts, java_object); + Object* o = ts.Decode(java_object); o->MonitorExit(ts.Self()); if (ts.Self()->IsExceptionPending()) { return JNI_ERR; @@ -2386,7 +2298,7 @@ class JNI { // If we're handing out direct pointers, check whether it's a direct pointer // to a local reference. - if (Decode(ts, java_object) == reinterpret_cast(java_object)) { + if (ts.Decode(java_object) == reinterpret_cast(java_object)) { if (ts.Env()->locals.ContainsDirectPointer(reinterpret_cast(java_object))) { return JNILocalRefType; } @@ -2950,7 +2862,7 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_lo // the comments in the JNI FindClass function.) typedef int (*JNI_OnLoadFn)(JavaVM*, void*); JNI_OnLoadFn jni_on_load = reinterpret_cast(sym); - const ClassLoader* old_class_loader = self->GetClassLoaderOverride(); + ClassLoader* old_class_loader = self->GetClassLoaderOverride(); self->SetClassLoaderOverride(class_loader); int version = 0; diff --git a/src/jni_internal.h b/src/jni_internal.h index be5bca00f6..b96a4d769b 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -43,6 +43,7 @@ class Field; union JValue; class Libraries; class Method; +class ScopedJniThreadState; class Thread; void SetJniGlobalsMax(size_t max); @@ -50,42 +51,9 @@ void JniAbortF(const char* jni_function_name, const char* fmt, ...); void* FindNativeMethod(Thread* thread); void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, size_t method_count); -template T Decode(JNIEnv*, jobject); -template T AddLocalReference(JNIEnv*, const Object*); - -inline Field* DecodeField(jfieldID fid) { -#ifdef MOVING_GARBAGE_COLLECTOR - // TODO: we should make these unique weak globals if Field instances can ever move. - UNIMPLEMENTED(WARNING); -#endif - return reinterpret_cast(fid); -} - -inline jfieldID EncodeField(Field* field) { -#ifdef MOVING_GARBAGE_COLLECTOR - UNIMPLEMENTED(WARNING); -#endif - return reinterpret_cast(field); -} - -inline Method* DecodeMethod(jmethodID mid) { -#ifdef MOVING_GARBAGE_COLLECTOR - // TODO: we should make these unique weak globals if Method instances can ever move. - UNIMPLEMENTED(WARNING); -#endif - return reinterpret_cast(mid); -} - -inline jmethodID EncodeMethod(Method* method) { -#ifdef MOVING_GARBAGE_COLLECTOR - UNIMPLEMENTED(WARNING); -#endif - return reinterpret_cast(method); -} - size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len); -JValue InvokeWithJValues(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args); -JValue InvokeWithJValues(Thread* self, Object* receiver, Method* m, JValue* args); +JValue InvokeWithJValues(const ScopedJniThreadState&, jobject obj, jmethodID mid, jvalue* args); +JValue InvokeWithJValues(const ScopedJniThreadState&, Object* receiver, Method* m, JValue* args); int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause); diff --git a/src/jni_internal_test.cc b/src/jni_internal_test.cc index 00397a3fb0..daca1b540c 100644 --- a/src/jni_internal_test.cc +++ b/src/jni_internal_test.cc @@ -20,6 +20,7 @@ #include "common_test.h" #include "ScopedLocalRef.h" +#include "scoped_jni_thread_state.h" namespace art { @@ -1245,6 +1246,7 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) { jobject outer; jobject inner1, inner2; + ScopedJniThreadState ts(env_); Object* inner2_direct_pointer; { env_->PushLocalFrame(4); @@ -1254,7 +1256,7 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) { env_->PushLocalFrame(4); inner1 = env_->NewLocalRef(outer); inner2 = env_->NewStringUTF("survivor"); - inner2_direct_pointer = Decode(env_, inner2); + inner2_direct_pointer = ts.Decode(inner2); env_->PopLocalFrame(inner2); } diff --git a/src/monitor.cc b/src/monitor.cc index dde67ea9da..de08b88447 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -28,6 +28,7 @@ #include "mutex.h" #include "object.h" #include "object_utils.h" +#include "scoped_jni_thread_state.h" #include "scoped_thread_list_lock.h" #include "stl_util.h" #include "thread.h" @@ -825,15 +826,17 @@ uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) { } static uint32_t LockOwnerFromThreadLock(Object* thread_lock) { - if (thread_lock == NULL || thread_lock->GetClass() != WellKnownClasses::ToClass(WellKnownClasses::java_lang_ThreadLock)) { + ScopedJniThreadState ts(Thread::Current()); + if (thread_lock == NULL || + thread_lock->GetClass() != ts.Decode(WellKnownClasses::java_lang_ThreadLock)) { return ThreadList::kInvalidId; } - Field* thread_field = DecodeField(WellKnownClasses::java_lang_ThreadLock_thread); + Field* thread_field = ts.DecodeField(WellKnownClasses::java_lang_ThreadLock_thread); Object* managed_thread = thread_field->GetObject(thread_lock); if (managed_thread == NULL) { return ThreadList::kInvalidId; } - Field* vmData_field = DecodeField(WellKnownClasses::java_lang_Thread_vmData); + Field* vmData_field = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData); uintptr_t vmData = static_cast(vmData_field->GetInt(managed_thread)); Thread* thread = reinterpret_cast(vmData); if (thread == NULL) { diff --git a/src/monitor_android.cc b/src/monitor_android.cc index dc77b6d424..94f86e8085 100644 --- a/src/monitor_android.cc +++ b/src/monitor_android.cc @@ -69,7 +69,8 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample cp = EventLogWriteInt(cp, Monitor::IsSensitiveThread()); // Emit self thread name string, <= 37 bytes. - std::string thread_name(self->GetThreadName()->ToModifiedUtf8()); + std::string thread_name; + self->GetThreadName(thread_name); cp = EventLogWriteString(cp, thread_name.c_str(), thread_name.size()); // Emit the wait time, 5 bytes. diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 89d7130645..3bf0ea562f 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -126,7 +126,7 @@ static void DexFile_closeDexFile(JNIEnv*, jclass, jint cookie) { static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader, jint cookie) { - ScopedJniThreadState tsc(env); + ScopedJniThreadState ts(env); const DexFile* dex_file = toDexFile(cookie); if (dex_file == NULL) { return NULL; @@ -142,10 +142,10 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); class_linker->RegisterDexFile(*dex_file); - Object* class_loader_object = Decode(env, javaLoader); + Object* class_loader_object = ts.Decode(javaLoader); ClassLoader* class_loader = down_cast(class_loader_object); Class* result = class_linker->DefineClass(descriptor, class_loader, *dex_file, *dex_class_def); - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jint cookie) { diff --git a/src/native/dalvik_system_VMDebug.cc b/src/native/dalvik_system_VMDebug.cc index 9b10cdadf6..70067fe016 100644 --- a/src/native/dalvik_system_VMDebug.cc +++ b/src/native/dalvik_system_VMDebug.cc @@ -22,6 +22,7 @@ #include "hprof/hprof.h" #include "jni_internal.h" #include "ScopedUtfChars.h" +#include "scoped_jni_thread_state.h" #include "toStringArray.h" #include "trace.h" @@ -204,7 +205,8 @@ static void VMDebug_infopoint(JNIEnv*, jclass, jint id) { } static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass, jboolean countAssignable) { - Class* c = Decode(env, javaClass); + ScopedJniThreadState ts(env); + Class* c = ts.Decode(javaClass); if (c == NULL) { return 0; } diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index 09ca251ff3..417ae5b29f 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -22,6 +22,7 @@ #include "object.h" #include "object_utils.h" #include "scoped_heap_lock.h" +#include "scoped_jni_thread_state.h" #include "scoped_thread_list_lock.h" #include "space.h" #include "thread.h" @@ -48,7 +49,7 @@ static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) { } static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaElementClass, jint length) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + ScopedJniThreadState ts(env); #ifdef MOVING_GARBAGE_COLLECTOR // TODO: right now, we don't have a copying collector, so there's no need // to do anything special here, but we ought to pass the non-movability @@ -56,7 +57,7 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle UNIMPLEMENTED(FATAL); #endif - Class* element_class = Decode(env, javaElementClass); + Class* element_class = ts.Decode(javaElementClass); if (element_class == NULL) { Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "element class == null"); return NULL; @@ -75,15 +76,15 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle if (result == NULL) { return NULL; } - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) { if (javaArray == NULL) { // Most likely allocation failed return 0; } - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Array* array = Decode(env, javaArray); + ScopedJniThreadState ts(env); + Array* array = ts.Decode(javaArray); if (!array->IsArrayInstance()) { Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;", "not an array"); return 0; diff --git a/src/native/dalvik_system_VMStack.cc b/src/native/dalvik_system_VMStack.cc index e3ecbd985e..933a5d5b10 100644 --- a/src/native/dalvik_system_VMStack.cc +++ b/src/native/dalvik_system_VMStack.cc @@ -26,10 +26,11 @@ namespace art { static jobject GetThreadStack(JNIEnv* env, jobject javaThread) { + ScopedJniThreadState ts(env); ScopedHeapLock heap_lock; ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, javaThread); - return (thread != NULL) ? GetThreadStack(env, thread) : NULL; + Thread* thread = Thread::FromManagedThread(ts, javaThread); + return (thread != NULL) ? GetThreadStack(ts, thread) : NULL; } static jint VMStack_fillStackTraceElements(JNIEnv* env, jclass, jobject javaThread, jobjectArray javaSteArray) { @@ -44,10 +45,10 @@ static jint VMStack_fillStackTraceElements(JNIEnv* env, jclass, jobject javaThre // Returns the defining class loader of the caller's caller. static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env, kNative); // Not a state change out of native. + ScopedJniThreadState ts(env); NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 2); visitor.WalkStack(); - return AddLocalReference(env, visitor.caller->GetDeclaringClass()->GetClassLoader()); + return ts.AddLocalReference(visitor.caller->GetDeclaringClass()->GetClassLoader()); } static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap, jobject javaSystem) { @@ -72,20 +73,20 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject ja Object* class_loader; }; ScopedJniThreadState ts(env); - Object* bootstrap = Decode(env, javaBootstrap); - Object* system = Decode(env, javaSystem); + Object* bootstrap = ts.Decode(javaBootstrap); + Object* system = ts.Decode(javaSystem); ClosestUserClassLoaderVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), bootstrap, system); visitor.WalkStack(); - return AddLocalReference(env, visitor.class_loader); + return ts.AddLocalReference(visitor.class_loader); } // Returns the class of the caller's caller's caller. static jclass VMStack_getStackClass2(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env, kNative); // Not a state change out of native. + ScopedJniThreadState ts(env); NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 3); visitor.WalkStack(); - return AddLocalReference(env, visitor.caller->GetDeclaringClass()); + return ts.AddLocalReference(visitor.caller->GetDeclaringClass()); } static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject javaThread) { diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index 99e3a26d46..ecab777b17 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -27,8 +27,8 @@ namespace art { -static Class* DecodeClass(JNIEnv* env, jobject java_class) { - Class* c = Decode(env, java_class); +static Class* DecodeClass(const ScopedJniThreadState& ts, jobject java_class) { + Class* c = ts.Decode(java_class); DCHECK(c != NULL); DCHECK(c->IsClass()); // TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke . @@ -39,7 +39,7 @@ static Class* DecodeClass(JNIEnv* env, jobject java_class) { // "name" is in "binary name" format, e.g. "dalvik.system.Debug$1". static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean initialize, jobject javaLoader) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + ScopedJniThreadState ts(env); ScopedUtfChars name(env, javaName); if (name.c_str() == NULL) { return NULL; @@ -55,8 +55,7 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean } std::string descriptor(DotToDescriptor(name.c_str())); - Object* loader = Decode(env, javaLoader); - ClassLoader* class_loader = down_cast(loader); + ClassLoader* class_loader = ts.Decode(javaLoader); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Class* c = class_linker->FindClass(descriptor.c_str(), class_loader); if (c == NULL) { @@ -71,11 +70,12 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean if (initialize) { class_linker->EnsureInitialized(c, true, true); } - return AddLocalReference(env, c); + return ts.AddLocalReference(c); } static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { - Class* c = DecodeClass(env, javaClass); + ScopedJniThreadState ts(env); + Class* c = DecodeClass(ts, javaClass); if (c->IsPrimitive() || c->IsArrayClass() || c->IsProxyClass()) { return 0; // primitive, array and proxy classes don't have class definitions } @@ -88,12 +88,13 @@ static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { } template -static jobjectArray ToArray(JNIEnv* env, const char* array_class_name, const std::vector& objects) { - ScopedLocalRef array_class(env, env->FindClass(array_class_name)); - jobjectArray result = env->NewObjectArray(objects.size(), array_class.get(), NULL); +static jobjectArray ToArray(const ScopedJniThreadState& ts, const char* array_class_name, + const std::vector& objects) { + ScopedLocalRef array_class(ts.Env(), ts.Env()->FindClass(array_class_name)); + jobjectArray result = ts.Env()->NewObjectArray(objects.size(), array_class.get(), NULL); for (size_t i = 0; i < objects.size(); ++i) { - ScopedLocalRef object(env, AddLocalReference(env, objects[i])); - env->SetObjectArrayElement(result, i, object.get()); + ScopedLocalRef object(ts.Env(), ts.AddLocalReference(objects[i])); + ts.Env()->SetObjectArrayElement(result, i, object.get()); } return result; } @@ -109,11 +110,8 @@ static bool IsVisibleConstructor(Method* m, bool public_only) { } static jobjectArray Class_getDeclaredConstructors(JNIEnv* env, jclass javaClass, jboolean publicOnly) { - Class* c = DecodeClass(env, javaClass); - if (c == NULL) { - return NULL; - } - + ScopedJniThreadState ts(env); + Class* c = DecodeClass(ts, javaClass); std::vector constructors; for (size_t i = 0; i < c->NumDirectMethods(); ++i) { Method* m = c->GetDirectMethod(i); @@ -122,7 +120,7 @@ static jobjectArray Class_getDeclaredConstructors(JNIEnv* env, jclass javaClass, } } - return ToArray(env, "java/lang/reflect/Constructor", constructors); + return ToArray(ts, "java/lang/reflect/Constructor", constructors); } static bool IsVisibleField(Field* f, bool public_only) { @@ -133,12 +131,8 @@ static bool IsVisibleField(Field* f, bool public_only) { } static jobjectArray Class_getDeclaredFields(JNIEnv* env, jclass javaClass, jboolean publicOnly) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Class* c = DecodeClass(env, javaClass); - if (c == NULL) { - return NULL; - } - + ScopedJniThreadState ts(env); + Class* c = DecodeClass(ts, javaClass); std::vector fields; FieldHelper fh; for (size_t i = 0; i < c->NumInstanceFields(); ++i) { @@ -170,7 +164,7 @@ static jobjectArray Class_getDeclaredFields(JNIEnv* env, jclass javaClass, jbool } } - return ToArray(env, "java/lang/reflect/Field", fields); + return ToArray(ts, "java/lang/reflect/Field", fields); } static bool IsVisibleMethod(Method* m, bool public_only) { @@ -187,8 +181,8 @@ static bool IsVisibleMethod(Method* m, bool public_only) { } static jobjectArray Class_getDeclaredMethods(JNIEnv* env, jclass javaClass, jboolean publicOnly) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Class* c = DecodeClass(env, javaClass); + ScopedJniThreadState ts(env); + Class* c = DecodeClass(ts, javaClass); if (c == NULL) { return NULL; } @@ -224,11 +218,12 @@ static jobjectArray Class_getDeclaredMethods(JNIEnv* env, jclass javaClass, jboo } } - return ToArray(env, "java/lang/reflect/Method", methods); + return ToArray(ts, "java/lang/reflect/Method", methods); } static jobject Class_getDex(JNIEnv* env, jobject javaClass) { - Class* c = DecodeClass(env, javaClass); + ScopedJniThreadState ts(env); + Class* c = DecodeClass(ts, javaClass); DexCache* dex_cache = c->GetDexCache(); if (dex_cache == NULL) { @@ -287,13 +282,10 @@ static Method* FindConstructorOrMethodInArray(ObjectArray* methods, cons static jobject Class_getDeclaredConstructorOrMethod(JNIEnv* env, jclass javaClass, jstring javaName, jobjectArray javaArgs) { - Class* c = DecodeClass(env, javaClass); - if (c == NULL) { - return NULL; - } - - std::string name(Decode(env, javaName)->ToModifiedUtf8()); - ObjectArray* arg_array = Decode*>(env, javaArgs); + ScopedJniThreadState ts(env); + Class* c = DecodeClass(ts, javaClass); + std::string name(ts.Decode(javaName)->ToModifiedUtf8()); + ObjectArray* arg_array = ts.Decode*>(javaArgs); Method* m = FindConstructorOrMethodInArray(c->GetDirectMethods(), name, arg_array); if (m == NULL) { @@ -301,7 +293,7 @@ static jobject Class_getDeclaredConstructorOrMethod(JNIEnv* env, jclass javaClas } if (m != NULL) { - return AddLocalReference(env, m); + return ts.AddLocalReference(m); } else { return NULL; } @@ -309,12 +301,8 @@ static jobject Class_getDeclaredConstructorOrMethod(JNIEnv* env, jclass javaClas static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobject jname) { ScopedJniThreadState ts(env); - Class* c = DecodeClass(env, java_class); - if (c == NULL) { - return NULL; - } - - String* name = Decode(env, jname); + Class* c = DecodeClass(ts, java_class); + String* name = ts.Decode(jname); DCHECK(name->GetClass()->IsStringClass()); FieldHelper fh; @@ -326,7 +314,7 @@ static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobj DCHECK(env->ExceptionOccurred()); return NULL; } - return AddLocalReference(env, f); + return ts.AddLocalReference(f); } } for (size_t i = 0; i < c->NumStaticFields(); ++i) { @@ -337,7 +325,7 @@ static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobj DCHECK(env->ExceptionOccurred()); return NULL; } - return AddLocalReference(env, f); + return ts.AddLocalReference(f); } } return NULL; @@ -345,20 +333,20 @@ static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobj static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) { ScopedJniThreadState ts(env); - Class* c = DecodeClass(env, javaThis); - return AddLocalReference(env, c->ComputeName()); + Class* c = DecodeClass(ts, javaThis); + return ts.AddLocalReference(c->ComputeName()); } static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { ScopedJniThreadState ts(env); - SynthesizedProxyClass* c = down_cast(DecodeClass(env, javaThis)); - return AddLocalReference(env, c->GetInterfaces()->Clone()); + SynthesizedProxyClass* c = down_cast(DecodeClass(ts, javaThis)); + return ts.AddLocalReference(c->GetInterfaces()->Clone()); } static jboolean Class_isAssignableFrom(JNIEnv* env, jobject javaLhs, jclass javaRhs) { ScopedJniThreadState ts(env); - Class* lhs = DecodeClass(env, javaLhs); - Class* rhs = Decode(env, javaRhs); // Can be null. + Class* lhs = DecodeClass(ts, javaLhs); + Class* rhs = ts.Decode(javaRhs); // Can be null. if (rhs == NULL) { ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "class == null"); return JNI_FALSE; @@ -397,7 +385,7 @@ static bool CheckMemberAccess(const Class* access_from, Class* access_to, uint32 static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) { ScopedJniThreadState ts(env); - Class* c = DecodeClass(env, javaThis); + Class* c = DecodeClass(ts, javaThis); if (c->IsPrimitive() || c->IsInterface() || c->IsArrayClass() || c->IsAbstract()) { ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Class %s can not be instantiated", PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str()); @@ -451,8 +439,8 @@ static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) { } // invoke constructor; unlike reflection calls, we don't wrap exceptions - jclass java_class = AddLocalReference(env, c); - jmethodID mid = EncodeMethod(init); + jclass java_class = ts.AddLocalReference(c); + jmethodID mid = ts.EncodeMethod(init); return env->NewObject(java_class, mid); } diff --git a/src/native/java_lang_Object.cc b/src/native/java_lang_Object.cc index 51e4581de0..d6b1bd6cf8 100644 --- a/src/native/java_lang_Object.cc +++ b/src/native/java_lang_Object.cc @@ -16,27 +16,31 @@ #include "jni_internal.h" #include "object.h" +#include "scoped_jni_thread_state.h" namespace art { static jobject Object_internalClone(JNIEnv* env, jobject javaThis) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Object* o = Decode(env, javaThis); - return AddLocalReference(env, o->Clone()); + ScopedJniThreadState ts(env); + Object* o = ts.Decode(javaThis); + return ts.AddLocalReference(o->Clone()); } static void Object_notify(JNIEnv* env, jobject javaThis) { - Object* o = Decode(env, javaThis); + ScopedJniThreadState ts(env); + Object* o = ts.Decode(javaThis); o->Notify(); } static void Object_notifyAll(JNIEnv* env, jobject javaThis) { - Object* o = Decode(env, javaThis); + ScopedJniThreadState ts(env); + Object* o = ts.Decode(javaThis); o->NotifyAll(); } static void Object_wait(JNIEnv* env, jobject javaThis, jlong ms, jint ns) { - Object* o = Decode(env, javaThis); + ScopedJniThreadState ts(env); + Object* o = ts.Decode(javaThis); o->Wait(ms, ns); } diff --git a/src/native/java_lang_Runtime.cc b/src/native/java_lang_Runtime.cc index 3019e95d9d..1b657b1801 100644 --- a/src/native/java_lang_Runtime.cc +++ b/src/native/java_lang_Runtime.cc @@ -17,16 +17,18 @@ #include #include +#include "class_loader.h" #include "heap.h" #include "jni_internal.h" #include "object.h" #include "runtime.h" +#include "scoped_jni_thread_state.h" #include "ScopedUtfChars.h" namespace art { -static void Runtime_gc(JNIEnv*, jclass) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); +static void Runtime_gc(JNIEnv* env, jclass) { + ScopedJniThreadState ts(env); Runtime::Current()->GetHeap()->CollectGarbage(false); } @@ -43,12 +45,13 @@ static void Runtime_nativeExit(JNIEnv*, jclass, jint status) { * message on failure. */ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader) { + ScopedJniThreadState ts(env); ScopedUtfChars filename(env, javaFilename); if (filename.c_str() == NULL) { return NULL; } - ClassLoader* classLoader = Decode(env, javaLoader); + ClassLoader* classLoader = ts.Decode(javaLoader); std::string detail; JavaVMExt* vm = Runtime::Current()->GetJavaVM(); bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, detail); diff --git a/src/native/java_lang_String.cc b/src/native/java_lang_String.cc index f8fb4a745e..96fcf96287 100644 --- a/src/native/java_lang_String.cc +++ b/src/native/java_lang_String.cc @@ -16,6 +16,7 @@ #include "jni_internal.h" #include "object.h" +#include "scoped_jni_thread_state.h" #ifdef HAVE__MEMCMP16 // "count" is in 16-bit units. @@ -35,9 +36,9 @@ uint32_t MemCmp16(const uint16_t* s0, const uint16_t* s1, size_t count) { namespace art { static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - String* lhs = Decode(env, javaThis); - String* rhs = Decode(env, javaRhs); + ScopedJniThreadState ts(env); + String* lhs = ts.Decode(javaThis); + String* rhs = ts.Decode(javaRhs); if (rhs == NULL) { Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "rhs == null"); @@ -69,10 +70,11 @@ static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) { } static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint start) { + ScopedJniThreadState ts(env); // This method does not handle supplementary characters. They're dealt with in managed code. DCHECK_LE(ch, 0xffff); - String* s = Decode(env, java_this); + String* s = ts.Decode(java_this); jint count = s->GetLength(); if (start < 0) { @@ -94,9 +96,10 @@ static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint sta } static jstring String_intern(JNIEnv* env, jobject javaThis) { - String* s = Decode(env, javaThis); + ScopedJniThreadState ts(env); + String* s = ts.Decode(javaThis); String* result = s->Intern(); - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index b0d1eec6dc..76ac670fbd 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -16,6 +16,7 @@ #include "jni_internal.h" #include "object.h" +#include "scoped_jni_thread_state.h" /* * We make guarantees about the atomicity of accesses to primitive @@ -107,22 +108,21 @@ static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* } static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Thread* self = Thread::Current(); + ScopedJniThreadState ts(env); // Null pointer checks. if (javaSrc == NULL) { - self->ThrowNewException("Ljava/lang/NullPointerException;", "src == null"); + ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "src == null"); return; } if (javaDst == NULL) { - self->ThrowNewException("Ljava/lang/NullPointerException;", "dst == null"); + ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "dst == null"); return; } // Make sure source and destination are both arrays. - Object* srcObject = Decode(env, javaSrc); - Object* dstObject = Decode(env, javaDst); + Object* srcObject = ts.Decode(javaSrc); + Object* dstObject = ts.Decode(javaDst); if (!srcObject->IsArrayInstance()) { ThrowArrayStoreException_NotAnArray("source", srcObject); return; @@ -138,7 +138,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, // Bounds checking. if (srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length) { - self->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d", srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length); return; @@ -150,7 +150,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, if (srcComponentType->IsPrimitive() != dstComponentType->IsPrimitive() || srcComponentType != dstComponentType) { std::string srcType(PrettyTypeOf(srcArray)); std::string dstType(PrettyTypeOf(dstArray)); - self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "Incompatible types: src=%s, dst=%s", srcType.c_str(), dstType.c_str()); return; } @@ -233,7 +233,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, if (i != length) { std::string actualSrcType(PrettyTypeOf(o)); std::string dstType(PrettyTypeOf(dstArray)); - self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "source[%d] of type %s cannot be stored in destination array of type %s", srcPos + i, actualSrcType.c_str(), dstType.c_str()); return; @@ -241,7 +241,8 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, } static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) { - Object* o = Decode(env, javaObject); + ScopedJniThreadState ts(env); + Object* o = ts.Decode(javaObject); return static_cast(reinterpret_cast(o)); } diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index ed95a6cf18..86b3a204f5 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -17,6 +17,7 @@ #include "debugger.h" #include "jni_internal.h" #include "object.h" +#include "scoped_jni_thread_state.h" #include "scoped_thread_list_lock.h" #include "ScopedUtfChars.h" #include "thread.h" @@ -25,22 +26,24 @@ namespace art { static jobject Thread_currentThread(JNIEnv* env, jclass) { - return AddLocalReference(env, Thread::Current()->GetPeer()); + ScopedJniThreadState ts(env); + return ts.AddLocalReference(ts.Self()->GetPeer()); } -static jboolean Thread_interrupted(JNIEnv*, jclass) { - return Thread::Current()->Interrupted(); +static jboolean Thread_interrupted(JNIEnv* env, jclass) { + ScopedJniThreadState ts(env, kNative); // Doesn't touch objects, so keep in native state. + return ts.Self()->Interrupted(); } static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) { + ScopedJniThreadState ts(env); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, java_thread); + Thread* thread = Thread::FromManagedThread(ts, java_thread); return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE; } static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size) { - Object* managedThread = Decode(env, java_thread); - Thread::CreateNativeThread(managedThread, stack_size); + Thread::CreateNativeThread(env, java_thread, stack_size); } static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean has_been_started) { @@ -52,9 +55,10 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha const jint kJavaTimedWaiting = 4; const jint kJavaTerminated = 5; + ScopedJniThreadState ts(env); ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, java_thread); + Thread* thread = Thread::FromManagedThread(ts, java_thread); if (thread != NULL) { internal_thread_state = thread->GetState(); } @@ -74,28 +78,30 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha } static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) { - Object* object = Decode(env, java_object); + ScopedJniThreadState ts(env); + Object* object = ts.Decode(java_object); if (object == NULL) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null"); return JNI_FALSE; } ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, java_thread); + Thread* thread = Thread::FromManagedThread(ts, java_thread); return thread->HoldsLock(object); } static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) { + ScopedJniThreadState ts(env); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, java_thread); + Thread* thread = Thread::FromManagedThread(ts, java_thread); if (thread != NULL) { thread->Interrupt(); } } static void Thread_nativeSetName(JNIEnv* env, jobject java_thread, jstring java_name) { + ScopedJniThreadState ts(env); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, java_thread); + Thread* thread = Thread::FromManagedThread(ts, java_thread); if (thread == NULL) { return; } @@ -112,8 +118,9 @@ static void Thread_nativeSetName(JNIEnv* env, jobject java_thread, jstring java_ * threads at Thread.NORM_PRIORITY (5). */ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_priority) { + ScopedJniThreadState ts(env); ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(env, java_thread); + Thread* thread = Thread::FromManagedThread(ts, java_thread); if (thread != NULL) { thread->SetNativePriority(new_priority); } diff --git a/src/native/java_lang_Throwable.cc b/src/native/java_lang_Throwable.cc index 625a34b067..1c59a34e51 100644 --- a/src/native/java_lang_Throwable.cc +++ b/src/native/java_lang_Throwable.cc @@ -15,13 +15,14 @@ */ #include "jni_internal.h" +#include "scoped_jni_thread_state.h" #include "thread.h" namespace art { static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) { - JNIEnvExt* env_ext = reinterpret_cast(env); - return env_ext->self->CreateInternalStackTrace(env); + ScopedJniThreadState ts(env); + return ts.Self()->CreateInternalStackTrace(ts); } static jobjectArray Throwable_nativeGetStackTrace(JNIEnv* env, jclass, jobject javaStackState) { diff --git a/src/native/java_lang_VMClassLoader.cc b/src/native/java_lang_VMClassLoader.cc index a976933087..0689f74986 100644 --- a/src/native/java_lang_VMClassLoader.cc +++ b/src/native/java_lang_VMClassLoader.cc @@ -15,14 +15,17 @@ */ #include "class_linker.h" +#include "class_loader.h" #include "jni_internal.h" +#include "scoped_jni_thread_state.h" #include "ScopedUtfChars.h" #include "zip_archive.h" namespace art { static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) { - ClassLoader* loader = Decode(env, javaLoader); + ScopedJniThreadState ts(env); + ClassLoader* loader = ts.Decode(javaLoader); ScopedUtfChars name(env, javaName); if (name.c_str() == NULL) { return NULL; @@ -31,7 +34,7 @@ static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoa std::string descriptor(DotToDescriptor(name.c_str())); Class* c = Runtime::Current()->GetClassLinker()->LookupClass(descriptor.c_str(), loader); if (c != NULL && c->IsResolved()) { - return AddLocalReference(env, c); + return ts.AddLocalReference(c); } else { // Class wasn't resolved so it may be erroneous or not yet ready, force the caller to go into // the regular loadClass code. diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index ea635d3647..729312ed94 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -18,6 +18,7 @@ #include "jni_internal.h" #include "object.h" #include "object_utils.h" +#include "scoped_jni_thread_state.h" namespace art { @@ -68,12 +69,12 @@ static Array* CreateMultiArray(Class* array_class, int current_dimension, IntArr // subtract pieces off. Besides, we want to start with the outermost // piece and work our way in. static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + ScopedJniThreadState ts(env); DCHECK(javaElementClass != NULL); - Class* element_class = Decode(env, javaElementClass); + Class* element_class = ts.Decode(javaElementClass); DCHECK(element_class->IsClass()); DCHECK(javaDimArray != NULL); - Object* dimensions_obj = Decode(env, javaDimArray); + Object* dimensions_obj = ts.Decode(javaDimArray); DCHECK(dimensions_obj->IsArrayInstance()); DCHECK_STREQ(ClassHelper(dimensions_obj->GetClass()).GetDescriptor(), "[I"); IntArray* dimensions_array = down_cast(dimensions_obj); @@ -89,7 +90,7 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla for (int i = 0; i < num_dimensions; i++) { int dimension = dimensions_array->Get(i); if (dimension < 0) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "Dimension %d: %d", i, dimension); return NULL; } @@ -112,15 +113,15 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla CHECK(Thread::Current()->IsExceptionPending()); return NULL; } - return AddLocalReference(env, new_array); + return ts.AddLocalReference(new_array); } static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + ScopedJniThreadState ts(env); DCHECK(javaElementClass != NULL); - Class* element_class = Decode(env, javaElementClass); + Class* element_class = ts.Decode(javaElementClass); if (length < 0) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); + ts.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); return NULL; } std::string descriptor; @@ -130,16 +131,16 @@ static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementCl ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader()); if (array_class == NULL) { - CHECK(Thread::Current()->IsExceptionPending()); + CHECK(ts.Self()->IsExceptionPending()); return NULL; } DCHECK(array_class->IsArrayClass()); Array* new_array = Array::Alloc(array_class, length); if (new_array == NULL) { - CHECK(Thread::Current()->IsExceptionPending()); + CHECK(ts.Self()->IsExceptionPending()); return NULL; } - return AddLocalReference(env, new_array); + return ts.AddLocalReference(new_array); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_reflect_Constructor.cc b/src/native/java_lang_reflect_Constructor.cc index 1094d06334..564d6dbf9b 100644 --- a/src/native/java_lang_reflect_Constructor.cc +++ b/src/native/java_lang_reflect_Constructor.cc @@ -19,6 +19,7 @@ #include "object.h" #include "object_utils.h" #include "reflection.h" +#include "scoped_jni_thread_state.h" namespace art { @@ -30,17 +31,17 @@ namespace art { * with an interface, array, or primitive class. */ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Method* m = Decode(env, javaMethod)->AsMethod(); + ScopedJniThreadState ts(env); + Method* m = ts.Decode(javaMethod)->AsMethod(); Class* c = m->GetDeclaringClass(); if (c->IsAbstract()) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Can't instantiate abstract class %s", PrettyDescriptor(c).c_str()); return NULL; } if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { - DCHECK(Thread::Current()->IsExceptionPending()); + DCHECK(ts.Self()->IsExceptionPending()); return NULL; } @@ -49,8 +50,8 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA return NULL; } - jobject javaReceiver = AddLocalReference(env, receiver); - InvokeMethod(env, javaMethod, javaReceiver, javaArgs); + jobject javaReceiver = ts.AddLocalReference(receiver); + InvokeMethod(ts, javaMethod, javaReceiver, javaArgs); // Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod. return javaReceiver; diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index bd33c0ebf2..b2ede634b7 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -19,12 +19,13 @@ #include "object.h" #include "object_utils.h" #include "reflection.h" +#include "scoped_jni_thread_state.h" namespace art { -static bool GetFieldValue(Object* o, Field* f, JValue& value, bool allow_references) { +static bool GetFieldValue(const ScopedJniThreadState& ts, Object* o, Field* f, JValue& value, + bool allow_references) { DCHECK_EQ(value.GetJ(), 0LL); - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { return false; } @@ -64,18 +65,18 @@ static bool GetFieldValue(Object* o, Field* f, JValue& value, bool allow_referen // Never okay. break; } - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", "Not a primitive field: %s", PrettyField(f).c_str()); return false; } -static bool CheckReceiver(JNIEnv* env, jobject javaObj, Field* f, Object*& o) { +static bool CheckReceiver(const ScopedJniThreadState& ts, jobject javaObj, Field* f, Object*& o) { if (f->IsStatic()) { o = NULL; return true; } - o = Decode(env, javaObj); + o = ts.Decode(javaObj); Class* declaringClass = f->GetDeclaringClass(); if (!VerifyObjectInClass(o, declaringClass)) { return false; @@ -84,32 +85,34 @@ static bool CheckReceiver(JNIEnv* env, jobject javaObj, Field* f, Object*& o) { } static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj) { - Field* f = DecodeField(env->FromReflectedField(javaField)); + ScopedJniThreadState ts(env); + Field* f = ts.DecodeField(env->FromReflectedField(javaField)); Object* o = NULL; - if (!CheckReceiver(env, javaObj, f, o)) { + if (!CheckReceiver(ts, javaObj, f, o)) { return NULL; } // Get the field's value, boxing if necessary. JValue value; - if (!GetFieldValue(o, f, value, true)) { + if (!GetFieldValue(ts, o, f, value, true)) { return NULL; } BoxPrimitive(FieldHelper(f).GetTypeAsPrimitiveType(), value); - return AddLocalReference(env, value.GetL()); + return ts.AddLocalReference(value.GetL()); } static JValue GetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char dst_descriptor) { - Field* f = DecodeField(env->FromReflectedField(javaField)); + ScopedJniThreadState ts(env); + Field* f = ts.DecodeField(env->FromReflectedField(javaField)); Object* o = NULL; - if (!CheckReceiver(env, javaObj, f, o)) { + if (!CheckReceiver(ts, javaObj, f, o)) { return JValue(); } // Read the value. JValue field_value; - if (!GetFieldValue(o, f, field_value, false)) { + if (!GetFieldValue(ts, o, f, field_value, false)) { return JValue(); } @@ -205,11 +208,11 @@ static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool all } static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Field* f = DecodeField(env->FromReflectedField(javaField)); + ScopedJniThreadState ts(env); + Field* f = ts.DecodeField(env->FromReflectedField(javaField)); // Unbox the value, if necessary. - Object* boxed_value = Decode(env, javaValue); + Object* boxed_value = ts.Decode(javaValue); JValue unboxed_value; if (!UnboxPrimitiveForField(boxed_value, FieldHelper(f).GetType(), unboxed_value, f)) { return; @@ -217,7 +220,7 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j // Check that the receiver is non-null and an instance of the field's declaring class. Object* o = NULL; - if (!CheckReceiver(env, javaObj, f, o)) { + if (!CheckReceiver(ts, javaObj, f, o)) { return; } @@ -226,15 +229,15 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char src_descriptor, const JValue& new_value) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Field* f = DecodeField(env->FromReflectedField(javaField)); + ScopedJniThreadState ts(env); + Field* f = ts.DecodeField(env->FromReflectedField(javaField)); Object* o = NULL; - if (!CheckReceiver(env, javaObj, f, o)) { + if (!CheckReceiver(ts, javaObj, f, o)) { return; } FieldHelper fh(f); if (!fh.IsPrimitiveType()) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", "Not a primitive field: %s", PrettyField(f).c_str()); return; } diff --git a/src/native/java_lang_reflect_Method.cc b/src/native/java_lang_reflect_Method.cc index bf5c850399..269582269e 100644 --- a/src/native/java_lang_reflect_Method.cc +++ b/src/native/java_lang_reflect_Method.cc @@ -19,15 +19,18 @@ #include "object.h" #include "object_utils.h" #include "reflection.h" +#include "scoped_jni_thread_state.h" namespace art { static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobject javaArgs) { - return InvokeMethod(env, javaMethod, javaReceiver, javaArgs); + ScopedJniThreadState ts(env); + return InvokeMethod(ts, javaMethod, javaReceiver, javaArgs); } static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { - Method* proxy_method = Decode(env, javaMethod)->AsMethod(); + ScopedJniThreadState ts(env); + Method* proxy_method = ts.Decode(javaMethod)->AsMethod(); CHECK(proxy_method->GetDeclaringClass()->IsProxyClass()); SynthesizedProxyClass* proxy_class = down_cast(proxy_method->GetDeclaringClass()); @@ -41,14 +44,13 @@ static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { } CHECK_NE(throws_index, -1); ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); - // Change thread state for allocation - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - return AddLocalReference(env, declared_exceptions->Clone()); + return ts.AddLocalReference(declared_exceptions->Clone()); } static jobject Method_findOverriddenMethodNative(JNIEnv* env, jobject javaMethod) { - Method* method = Decode(env, javaMethod)->AsMethod(); - return AddLocalReference(env, method->FindOverriddenMethod()); + ScopedJniThreadState ts(env); + Method* method = ts.Decode(javaMethod)->AsMethod(); + return ts.AddLocalReference(method->FindOverriddenMethod()); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_reflect_Proxy.cc b/src/native/java_lang_reflect_Proxy.cc index eca6c32574..a1337a60d4 100644 --- a/src/native/java_lang_reflect_Proxy.cc +++ b/src/native/java_lang_reflect_Proxy.cc @@ -15,22 +15,23 @@ */ #include "class_linker.h" +#include "class_loader.h" #include "jni_internal.h" #include "object.h" +#include "scoped_jni_thread_state.h" namespace art { static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring javaName, jobjectArray javaInterfaces, jobject javaLoader, jobjectArray javaMethods, jobjectArray javaThrows) { - // Allocates Class so transition thread state to runnable - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - String* name = Decode(env, javaName); - ObjectArray* interfaces = Decode*>(env, javaInterfaces); - ClassLoader* loader = Decode(env, javaLoader); - ObjectArray* methods = Decode*>(env, javaMethods); - ObjectArray >* throws = Decode >*>(env, javaThrows); + ScopedJniThreadState ts(env); + String* name = ts.Decode(javaName); + ObjectArray* interfaces = ts.Decode*>(javaInterfaces); + ClassLoader* loader = ts.Decode(javaLoader); + ObjectArray* methods = ts.Decode*>(javaMethods); + ObjectArray >* throws = ts.Decode >*>(javaThrows); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Class* result = class_linker->CreateProxyClass(name, interfaces, loader, methods, throws); - return AddLocalReference(env, result); + return ts.AddLocalReference(result); } static JNINativeMethod gMethods[] = { diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 3766546ad6..87d2b22663 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -18,6 +18,7 @@ #include "jni_internal.h" #include "logging.h" #include "scoped_heap_lock.h" +#include "scoped_jni_thread_state.h" #include "scoped_thread_list_lock.h" #include "ScopedPrimitiveArray.h" #include "stack.h" @@ -68,7 +69,8 @@ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint th if (thread == NULL) { return NULL; } - jobject stack = GetThreadStack(env, thread); + ScopedJniThreadState ts(env); + jobject stack = GetThreadStack(ts, thread); return (stack != NULL) ? Thread::InternalStackTraceToStackTraceElementArray(env, stack) : NULL; } diff --git a/src/native/sun_misc_Unsafe.cc b/src/native/sun_misc_Unsafe.cc index 360f241f19..dfddd86b66 100644 --- a/src/native/sun_misc_Unsafe.cc +++ b/src/native/sun_misc_Unsafe.cc @@ -16,30 +16,34 @@ #include "jni_internal.h" #include "object.h" +#include "scoped_jni_thread_state.h" namespace art { static jlong Unsafe_objectFieldOffset0(JNIEnv* env, jclass, jobject javaField) { // TODO: move to Java code jfieldID fid = env->FromReflectedField(javaField); - Field* field = DecodeField(fid); + ScopedJniThreadState ts(env); + Field* field = ts.DecodeField(fid); return field->GetOffset().Int32Value(); } static jint Unsafe_arrayBaseOffset0(JNIEnv* env, jclass, jclass javaArrayClass) { // TODO: move to Java code - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Class* array_class = Decode(env, javaArrayClass); + ScopedJniThreadState ts(env); + Class* array_class = ts.Decode(javaArrayClass); return Array::DataOffset(array_class->GetComponentSize()).Int32Value(); } static jint Unsafe_arrayIndexScale0(JNIEnv* env, jclass, jclass javaClass) { - Class* c = Decode(env, javaClass); + ScopedJniThreadState ts(env); + Class* c = ts.Decode(javaClass); return c->GetComponentSize(); } static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint expectedValue, jint newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_release_cas() returns 0 on success, not failure. @@ -48,7 +52,8 @@ static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, } static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong expectedValue, jlong newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int64_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_cmpxchg() returns 0 on success, not failure. @@ -57,9 +62,10 @@ static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, } static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaExpectedValue, jobject javaNewValue) { - Object* obj = Decode(env, javaObj); - Object* expectedValue = Decode(env, javaExpectedValue); - Object* newValue = Decode(env, javaNewValue); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); + Object* expectedValue = ts.Decode(javaExpectedValue); + Object* newValue = ts.Decode(javaNewValue); byte* raw_addr = reinterpret_cast(obj) + offset; int32_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_cmpxchg() returns 0 on success, not failure. @@ -72,90 +78,105 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb } static jint Unsafe_getInt(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); return obj->GetField32(MemberOffset(offset), false); } static jint Unsafe_getIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); return android_atomic_acquire_load(address); } static void Unsafe_putInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); obj->SetField32(MemberOffset(offset), newValue, false); } static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); android_atomic_release_store(newValue, address); } static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); ANDROID_MEMBAR_STORE(); obj->SetField32(MemberOffset(offset), newValue, false); } static jlong Unsafe_getLong(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; int64_t* address = reinterpret_cast(raw_addr); return *address; } static jlong Unsafe_getLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); return obj->GetField64(MemberOffset(offset), true); } static void Unsafe_putLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); obj->SetField64(MemberOffset(offset), newValue, false); } static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); obj->SetField64(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); ANDROID_MEMBAR_STORE(); obj->SetField64(MemberOffset(offset), newValue, false); } static jobject Unsafe_getObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); Object* value = obj->GetFieldObject(MemberOffset(offset), true); - return AddLocalReference(env, value); + return ts.AddLocalReference(value); } static jobject Unsafe_getObject(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - Object* obj = Decode(env, javaObj); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); Object* value = obj->GetFieldObject(MemberOffset(offset), false); - return AddLocalReference(env, value); + return ts.AddLocalReference(value); } static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { - Object* obj = Decode(env, javaObj); - Object* newValue = Decode(env, javaNewValue); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); + Object* newValue = ts.Decode(javaNewValue); obj->SetFieldObject(MemberOffset(offset), newValue, false); } static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { - Object* obj = Decode(env, javaObj); - Object* newValue = Decode(env, javaNewValue); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); + Object* newValue = ts.Decode(javaNewValue); obj->SetFieldObject(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { - Object* obj = Decode(env, javaObj); - Object* newValue = Decode(env, javaNewValue); + ScopedJniThreadState ts(env); + Object* obj = ts.Decode(javaObj); + Object* newValue = ts.Decode(javaNewValue); ANDROID_MEMBAR_STORE(); obj->SetFieldObject(MemberOffset(offset), newValue, false); } diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc index 37cacb47d3..83d2265975 100644 --- a/src/oat/runtime/support_proxy.cc +++ b/src/oat/runtime/support_proxy.cc @@ -18,6 +18,7 @@ #include "object_utils.h" #include "reflection.h" #include "runtime_support.h" +#include "scoped_jni_thread_state.h" #include "thread.h" #include "well_known_classes.h" @@ -50,10 +51,11 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), FRAME_SIZE_IN_BYTES); // Start new JNI local reference state JNIEnvExt* env = self->GetJniEnv(); + ScopedJniThreadState ts(env); ScopedJniEnvLocalRefState env_state(env); // Create local ref. copies of proxy method and the receiver - jobject rcvr_jobj = AddLocalReference(env, receiver); - jobject proxy_method_jobj = AddLocalReference(env, proxy_method); + jobject rcvr_jobj = ts.AddLocalReference(receiver); + jobject proxy_method_jobj = ts.AddLocalReference(proxy_method); // Placing into local references incoming arguments from the caller's register arguments, // replacing original Object* with jobject @@ -72,7 +74,7 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, while (cur_arg < args_in_regs && param_index < num_params) { if (proxy_mh.IsParamAReference(param_index)) { Object* obj = *reinterpret_cast(stack_args + (cur_arg * kPointerSize)); - jobject jobj = AddLocalReference(env, obj); + jobject jobj = ts.AddLocalReference(obj); *reinterpret_cast(stack_args + (cur_arg * kPointerSize)) = jobj; } cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1); @@ -83,7 +85,7 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, while (param_index < num_params) { if (proxy_mh.IsParamAReference(param_index)) { Object* obj = *reinterpret_cast(stack_args + (cur_arg * kPointerSize)); - jobject jobj = AddLocalReference(env, obj); + jobject jobj = ts.AddLocalReference(obj); *reinterpret_cast(stack_args + (cur_arg * kPointerSize)) = jobj; } cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1); @@ -102,13 +104,13 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, CHECK(self->IsExceptionPending()); return; } - args_jobj[2].l = AddLocalReference(env, args); + args_jobj[2].l = ts.AddLocalReference(args); } // Convert proxy method into expected interface method Method* interface_method = proxy_method->FindOverriddenMethod(); DCHECK(interface_method != NULL); DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - args_jobj[1].l = AddLocalReference(env, interface_method); + args_jobj[1].l = ts.AddLocalReference(interface_method); // Box arguments cur_arg = 0; // reset stack location to read to start // reset index, will index into param type array which doesn't include the receiver diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc index 522ccf2019..3f6bc8f2ae 100644 --- a/src/oat/runtime/support_stubs.cc +++ b/src/oat/runtime/support_stubs.cc @@ -23,6 +23,7 @@ #if defined(ART_USE_LLVM_COMPILER) #include "nth_caller_visitor.h" #endif +#include "scoped_jni_thread_state.h" // Architecture specific assembler helper to deliver exception. extern "C" void art_deliver_exception_from_code(void*); @@ -81,6 +82,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs); // Start new JNI local reference state JNIEnvExt* env = thread->GetJniEnv(); + ScopedJniThreadState ts(env); ScopedJniEnvLocalRefState env_state(env); // Compute details about the called method (avoid GCs) @@ -145,7 +147,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp // If we thought we had fewer than 3 arguments in registers, account for the receiver args_in_regs++; } - AddLocalReference(env, obj); + ts.AddLocalReference(obj); } size_t shorty_index = 1; // skip return value // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip @@ -155,7 +157,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp shorty_index++; if (c == 'L') { Object* obj = reinterpret_cast(regs[cur_arg]); - AddLocalReference(env, obj); + ts.AddLocalReference(obj); } cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); } @@ -166,7 +168,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp shorty_index++; if (c == 'L') { Object* obj = reinterpret_cast(regs[cur_arg]); - AddLocalReference(env, obj); + ts.AddLocalReference(obj); } cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); } diff --git a/src/oat_compilation_unit.h b/src/oat_compilation_unit.h index 0000f216b6..41c1847fbc 100644 --- a/src/oat_compilation_unit.h +++ b/src/oat_compilation_unit.h @@ -30,7 +30,7 @@ class DexCache; class OatCompilationUnit { public: - OatCompilationUnit(const ClassLoader* class_loader, ClassLinker* class_linker, + OatCompilationUnit(ClassLoader* class_loader, ClassLinker* class_linker, const DexFile& dex_file, DexCache& dex_cache, const DexFile::CodeItem* code_item, uint32_t method_idx, uint32_t access_flags) @@ -46,7 +46,7 @@ class OatCompilationUnit { callee_access_flags); } - const ClassLoader* GetClassLoader() const { + ClassLoader* GetClassLoader() const { return class_loader_; } @@ -85,7 +85,7 @@ class OatCompilationUnit { } public: - const ClassLoader* class_loader_; + ClassLoader* class_loader_; ClassLinker* class_linker_; const DexFile* dex_file_; diff --git a/src/oat_writer.cc b/src/oat_writer.cc index 852320de9a..ec25ae9c3b 100644 --- a/src/oat_writer.cc +++ b/src/oat_writer.cc @@ -30,7 +30,7 @@ namespace art { bool OatWriter::Create(File* file, - const ClassLoader* class_loader, + ClassLoader* class_loader, const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, @@ -46,7 +46,7 @@ bool OatWriter::Create(File* file, OatWriter::OatWriter(const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, - const ClassLoader* class_loader, + ClassLoader* class_loader, const Compiler& compiler) { compiler_ = &compiler; class_loader_ = class_loader; diff --git a/src/oat_writer.h b/src/oat_writer.h index fe0bd67687..29072abd8a 100644 --- a/src/oat_writer.h +++ b/src/oat_writer.h @@ -74,7 +74,7 @@ class OatWriter { public: // Write an oat file. Returns true on success, false on failure. static bool Create(File* file, - const ClassLoader* class_loader, + ClassLoader* class_loader, const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, @@ -84,7 +84,7 @@ class OatWriter { OatWriter(const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, - const ClassLoader* class_loader, + ClassLoader* class_loader, const Compiler& compiler); ~OatWriter(); @@ -177,7 +177,7 @@ class OatWriter { const Compiler* compiler_; // TODO: remove the ClassLoader when the code storage moves out of Method - const ClassLoader* class_loader_; + ClassLoader* class_loader_; // note OatFile does not take ownership of the DexFiles const std::vector* dex_files_; diff --git a/src/object.cc b/src/object.cc index b728e285d2..94e1759611 100644 --- a/src/object.cc +++ b/src/object.cc @@ -322,35 +322,6 @@ void Method::ResetClasses() { java_lang_reflect_Method_ = NULL; } -Class* ExtractNextClassFromSignature(ClassLinker* class_linker, const ClassLoader* cl, const char*& p) { - if (*p == '[') { - // Something like "[[[Ljava/lang/String;". - const char* start = p; - while (*p == '[') { - ++p; - } - if (*p == 'L') { - while (*p != ';') { - ++p; - } - } - ++p; // Either the ';' or the primitive type. - - std::string descriptor(start, (p - start)); - return class_linker->FindClass(descriptor.c_str(), cl); - } else if (*p == 'L') { - const char* start = p; - while (*p != ';') { - ++p; - } - ++p; - std::string descriptor(start, (p - start)); - return class_linker->FindClass(descriptor.c_str(), cl); - } else { - return class_linker->FindPrimitiveClass(*p++); - } -} - ObjectArray* Method::GetDexCacheStrings() const { return GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Method, dex_cache_strings_), false); @@ -937,8 +908,7 @@ ClassLoader* Class::GetClassLoader() const { return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false); } -void Class::SetClassLoader(const ClassLoader* new_cl) { - ClassLoader* new_class_loader = const_cast(new_cl); +void Class::SetClassLoader(ClassLoader* new_class_loader) { SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false); } diff --git a/src/object.h b/src/object.h index eeac3ea0f1..6334f8e9bd 100644 --- a/src/object.h +++ b/src/object.h @@ -1513,7 +1513,7 @@ class MANAGED Class : public StaticStorageBase { ClassLoader* GetClassLoader() const; - void SetClassLoader(const ClassLoader* new_cl); + void SetClassLoader(ClassLoader* new_cl); static MemberOffset DexCacheOffset() { return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_)); diff --git a/src/reflection.cc b/src/reflection.cc index 2b72944297..7726998d51 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -20,6 +20,7 @@ #include "jni_internal.h" #include "object.h" #include "object_utils.h" +#include "scoped_jni_thread_state.h" namespace art { @@ -44,12 +45,10 @@ void InitBoxingMethods() { gShort_valueOf = class_linker->FindSystemClass("Ljava/lang/Short;")->FindDeclaredDirectMethod("valueOf", "(S)Ljava/lang/Short;"); } -jobject InvokeMethod(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobject javaArgs) { - Thread* self = Thread::Current(); - ScopedThreadStateChange tsc(self, kRunnable); - - jmethodID mid = env->FromReflectedMethod(javaMethod); - Method* m = reinterpret_cast(mid); +jobject InvokeMethod(const ScopedJniThreadState& ts, jobject javaMethod, jobject javaReceiver, + jobject javaArgs) { + jmethodID mid = ts.Env()->FromReflectedMethod(javaMethod); + Method* m = ts.DecodeMethod(mid); Class* declaring_class = m->GetDeclaringClass(); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaring_class, true, true)) { @@ -59,24 +58,24 @@ jobject InvokeMethod(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobj Object* receiver = NULL; if (!m->IsStatic()) { // Check that the receiver is non-null and an instance of the field's declaring class. - receiver = Decode(env, javaReceiver); + receiver = ts.Decode(javaReceiver); if (!VerifyObjectInClass(receiver, declaring_class)) { return NULL; } // Find the actual implementation of the virtual method. m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m); - mid = reinterpret_cast(m); + mid = ts.EncodeMethod(m); } // Get our arrays of arguments and their types, and check they're the same size. - ObjectArray* objects = Decode*>(env, javaArgs); + ObjectArray* objects = ts.Decode*>(javaArgs); MethodHelper mh(m); const DexFile::TypeList* classes = mh.GetParameterTypeList(); uint32_t classes_size = classes == NULL ? 0 : classes->Size(); uint32_t arg_count = (objects != NULL) ? objects->GetLength() : 0; if (arg_count != classes_size) { - self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", + ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", "wrong number of arguments; expected %d, got %d", classes_size, arg_count); return NULL; @@ -92,27 +91,27 @@ jobject InvokeMethod(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobj return NULL; } if (!dst_class->IsPrimitive()) { - args[i].l = AddLocalReference(env, arg); + args[i].l = ts.AddLocalReference(arg); } } // Invoke the method. - JValue value(InvokeWithJValues(env, javaReceiver, mid, args.get())); + JValue value(InvokeWithJValues(ts, javaReceiver, mid, args.get())); // Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early. - if (self->IsExceptionPending()) { - jthrowable th = env->ExceptionOccurred(); - env->ExceptionClear(); - jclass exception_class = env->FindClass("java/lang/reflect/InvocationTargetException"); - jmethodID mid = env->GetMethodID(exception_class, "", "(Ljava/lang/Throwable;)V"); - jobject exception_instance = env->NewObject(exception_class, mid, th); - env->Throw(reinterpret_cast(exception_instance)); + if (ts.Self()->IsExceptionPending()) { + jthrowable th = ts.Env()->ExceptionOccurred(); + ts.Env()->ExceptionClear(); + jclass exception_class = ts.Env()->FindClass("java/lang/reflect/InvocationTargetException"); + jmethodID mid = ts.Env()->GetMethodID(exception_class, "", "(Ljava/lang/Throwable;)V"); + jobject exception_instance = ts.Env()->NewObject(exception_class, mid, th); + ts.Env()->Throw(reinterpret_cast(exception_instance)); return NULL; } // Box if necessary and return. BoxPrimitive(mh.GetReturnType()->GetPrimitiveType(), value); - return AddLocalReference(env, value.GetL()); + return ts.AddLocalReference(value.GetL()); } bool VerifyObjectInClass(Object* o, Class* c) { diff --git a/src/reflection.h b/src/reflection.h index 6b47440d99..03847f8a8e 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -27,6 +27,7 @@ class Field; union JValue; class Method; class Object; +class ScopedJniThreadState; void InitBoxingMethods(); void BoxPrimitive(Primitive::Type src_class, JValue& value); @@ -36,7 +37,7 @@ bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value) bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, JValue& dst); -jobject InvokeMethod(JNIEnv* env, jobject method, jobject receiver, jobject args); +jobject InvokeMethod(const ScopedJniThreadState& ts, jobject method, jobject receiver, jobject args); bool VerifyObjectInClass(Object* o, Class* c); diff --git a/src/runtime.cc b/src/runtime.cc index b071ef4a7d..5f20a4ba3b 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -36,6 +36,7 @@ #include "monitor.h" #include "oat_file.h" #include "scoped_heap_lock.h" +#include "scoped_jni_thread_state.h" #include "ScopedLocalRef.h" #include "signal_catcher.h" #include "signal_set.h" @@ -80,7 +81,9 @@ Runtime::Runtime() method_trace_(0), method_trace_file_size_(0), tracer_(NULL), - use_compile_time_class_path_(false) { + use_compile_time_class_path_(false), + main_thread_group_(NULL), + system_thread_group_(NULL) { for (int i = 0; i < Runtime::kLastTrampolineMethodType; i++) { resolution_stub_array_[i] = NULL; } @@ -534,33 +537,33 @@ Runtime* Runtime::Create(const Options& options, bool ignore_unrecognized) { return instance_; } -void CreateSystemClassLoader() { +static void CreateSystemClassLoader() { if (Runtime::Current()->UseCompileTimeClassPath()) { return; } - Thread* self = Thread::Current(); + ScopedJniThreadState ts(Thread::Current()); - // Must be in the kNative state for calling native methods. - CHECK_EQ(self->GetState(), kNative); + Class* class_loader_class = ts.Decode(WellKnownClasses::java_lang_ClassLoader); + CHECK(Runtime::Current()->GetClassLinker()->EnsureInitialized(class_loader_class, true, true)); - JNIEnv* env = self->GetJniEnv(); - jmethodID getSystemClassLoader = env->GetStaticMethodID(WellKnownClasses::java_lang_ClassLoader, - "getSystemClassLoader", - "()Ljava/lang/ClassLoader;"); + Method* getSystemClassLoader = class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;"); CHECK(getSystemClassLoader != NULL); - ScopedLocalRef class_loader(env, env->CallStaticObjectMethod(WellKnownClasses::java_lang_ClassLoader, - getSystemClassLoader)); - CHECK(class_loader.get() != NULL); - Thread::Current()->SetClassLoaderOverride(Decode(env, class_loader.get())); + ClassLoader* class_loader = + down_cast(InvokeWithJValues(ts, NULL, getSystemClassLoader, NULL).GetL()); + CHECK(class_loader != NULL); + + ts.Self()->SetClassLoaderOverride(class_loader); - jfieldID contextClassLoader = env->GetFieldID(WellKnownClasses::java_lang_Thread, - "contextClassLoader", - "Ljava/lang/ClassLoader;"); + Class* thread_class = ts.Decode(WellKnownClasses::java_lang_Thread); + CHECK(Runtime::Current()->GetClassLinker()->EnsureInitialized(thread_class, true, true)); + + Field* contextClassLoader = thread_class->FindDeclaredInstanceField("contextClassLoader", + "Ljava/lang/ClassLoader;"); CHECK(contextClassLoader != NULL); - ScopedLocalRef self_jobject(env, AddLocalReference(env, self->GetPeer())); - env->SetObjectField(self_jobject.get(), contextClassLoader, class_loader.get()); + + contextClassLoader->SetObject(ts.Self()->GetPeer(), class_loader); } void Runtime::Start() { @@ -587,6 +590,9 @@ void Runtime::Start() { // it touches will have methods linked to the oat file if necessary. InitNativeMethods(); + // Initialize well known thread group values that may be accessed threads while attaching. + InitThreadGroups(self); + Thread::FinishStartup(); if (!is_zygote_) { @@ -739,6 +745,17 @@ void Runtime::InitNativeMethods() { VLOG(startup) << "Runtime::InitNativeMethods exiting"; } +void Runtime::InitThreadGroups(Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + ScopedJniEnvLocalRefState env_state(env); + main_thread_group_ = + env->NewGlobalRef(env->GetStaticObjectField(WellKnownClasses::java_lang_ThreadGroup, + WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup)); + system_thread_group_ = + env->NewGlobalRef(env->GetStaticObjectField(WellKnownClasses::java_lang_ThreadGroup, + WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup)); +} + void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) { #define REGISTER(FN) extern void FN(JNIEnv*); FN(env) // Register Throwable first so that registration of other native methods can throw exceptions @@ -850,7 +867,7 @@ void Runtime::BlockSignals() { signals.Block(); } -void Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, Object* thread_group) { +void Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group) { Thread::Attach(thread_name, as_daemon, thread_group); if (thread_name == NULL) { LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; diff --git a/src/runtime.h b/src/runtime.h index e3e0caf3de..a6cebe74bb 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -136,8 +136,18 @@ class Runtime { // that the native stack trace we get may point at the wrong call site. static void Abort(); + // Returns the "main" ThreadGroup, used when attaching user threads. + jobject GetMainThreadGroup() const { + return main_thread_group_; + } + + // Returns the "system" ThreadGroup, used when attaching our internal threads. + jobject GetSystemThreadGroup() const { + return system_thread_group_; + } + // Attaches the calling native thread to the runtime. - void AttachCurrentThread(const char* thread_name, bool as_daemon, Object* thread_group); + void AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group); void CallExitHook(jint status); @@ -323,6 +333,7 @@ class Runtime { bool Init(const Options& options, bool ignore_unrecognized); void InitNativeMethods(); + void InitThreadGroups(Thread* self); void RegisterRuntimeNativeMethods(JNIEnv* env); void StartDaemonThreads(); @@ -409,6 +420,9 @@ class Runtime { CompileTimeClassPaths compile_time_class_paths_; bool use_compile_time_class_path_; + jobject main_thread_group_; + jobject system_thread_group_; + DISALLOW_COPY_AND_ASSIGN(Runtime); }; diff --git a/src/scoped_jni_thread_state.h b/src/scoped_jni_thread_state.h index 9b3e63c557..42ed19c258 100644 --- a/src/scoped_jni_thread_state.h +++ b/src/scoped_jni_thread_state.h @@ -14,38 +14,147 @@ * limitations under the License. */ +#include "casts.h" #include "jni_internal.h" +#include "thread.h" namespace art { -// Entry/exit processing for all JNI calls. +// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions). // -// This performs the necessary thread state switching, lets us amortize the -// cost of working out the current thread, and lets us check (and repair) apps -// that are using a JNIEnv on the wrong thread. +// This class performs the necessary thread state switching to and from Runnable and lets us +// amortize the cost of working out the current thread. Additionally it lets us check (and repair) +// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects +// into jobjects via methods of this class. Performing this here enforces the Runnable thread state +// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code +// is also manipulating the Object. +// +// The destructor transitions back to the previous thread state, typically Native. In this case +// GC and thread suspension may occur. class ScopedJniThreadState { public: explicit ScopedJniThreadState(JNIEnv* env, ThreadState new_state = kRunnable) - : env_(reinterpret_cast(env)) { - self_ = ThreadForEnv(env); - old_thread_state_ = self_->SetState(new_state); + : env_(reinterpret_cast(env)), vm_(env_->vm), self_(ThreadForEnv(env)), + old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) { + self_->VerifyStack(); + } + + explicit ScopedJniThreadState(Thread* self, ThreadState new_state = kRunnable) + : env_(reinterpret_cast(self->GetJniEnv())), vm_(env_->vm), self_(self), + old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) { self_->VerifyStack(); } + // Used when we want a scoped jni thread state but have no thread/JNIEnv. + explicit ScopedJniThreadState(JavaVM* vm) + : env_(NULL), vm_(reinterpret_cast(vm)), self_(NULL), + old_thread_state_(kTerminated), thread_state_(kTerminated) { + } + ~ScopedJniThreadState() { - self_->SetState(old_thread_state_); + if (self_ != NULL) { + self_->SetState(old_thread_state_); + } } - JNIEnvExt* Env() { + JNIEnvExt* Env() const { return env_; } - Thread* Self() { + Thread* Self() const { return self_; } - JavaVMExt* Vm() { - return env_->vm; + JavaVMExt* Vm() const { + return vm_; + } + + /* + * Add a local reference for an object to the indirect reference table associated with the + * current stack frame. When the native function returns, the reference will be discarded. + * Part of the ScopedJniThreadState as native code shouldn't be working on raw Object* without + * having transitioned its state. + * + * We need to allow the same reference to be added multiple times. + * + * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and + * it's best if we don't grab a mutex. + * + * Returns the local reference (currently just the same pointer that was + * passed in), or NULL on failure. + */ + template + T AddLocalReference(Object* obj) const { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + if (obj == NULL) { + return NULL; + } + + DCHECK_NE((reinterpret_cast(obj) & 0xffff0000), 0xebad0000); + + IndirectReferenceTable& locals = Env()->locals; + + uint32_t cookie = Env()->local_ref_cookie; + IndirectRef ref = locals.Add(cookie, obj); + + #if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on. + if (Env()->check_jni) { + size_t entry_count = locals.Capacity(); + if (entry_count > 16) { + LOG(WARNING) << "Warning: more than 16 JNI local references: " + << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n" + << Dumpable(locals); + // TODO: LOG(FATAL) in a later release? + } + } + #endif + + if (Vm()->work_around_app_jni_bugs) { + // Hand out direct pointers to support broken old apps. + return reinterpret_cast(obj); + } + + return reinterpret_cast(ref); + } + + template + T Decode(jobject obj) const { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + return down_cast(Self()->DecodeJObject(obj)); + } + + Field* DecodeField(jfieldID fid) const { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + #ifdef MOVING_GARBAGE_COLLECTOR + // TODO: we should make these unique weak globals if Field instances can ever move. + UNIMPLEMENTED(WARNING); + #endif + return reinterpret_cast(fid); + } + + jfieldID EncodeField(Field* field) const { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + #ifdef MOVING_GARBAGE_COLLECTOR + UNIMPLEMENTED(WARNING); + #endif + return reinterpret_cast(field); + } + + Method* DecodeMethod(jmethodID mid) const { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + #ifdef MOVING_GARBAGE_COLLECTOR + // TODO: we should make these unique weak globals if Method instances can ever move. + UNIMPLEMENTED(WARNING); + #endif + return reinterpret_cast(mid); + } + + jmethodID EncodeMethod(Method* method) const { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + #ifdef MOVING_GARBAGE_COLLECTOR + UNIMPLEMENTED(WARNING); + #endif + return reinterpret_cast(method); } private: @@ -62,9 +171,16 @@ class ScopedJniThreadState { return self; } - JNIEnvExt* env_; - Thread* self_; - ThreadState old_thread_state_; + // The full JNIEnv. + JNIEnvExt* const env_; + // The full JavaVM. + JavaVMExt* const vm_; + // Cached current thread derived from the JNIEnv. + Thread* const self_; + // Previous thread state, most likely kNative. + const ThreadState old_thread_state_; + // Local cache of thread state to enable quick sanity checks. + const ThreadState thread_state_; DISALLOW_COPY_AND_ASSIGN(ScopedJniThreadState); }; diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc index d3c799c212..919923eb84 100644 --- a/src/signal_catcher.cc +++ b/src/signal_catcher.cc @@ -180,7 +180,7 @@ void* SignalCatcher::Run(void* arg) { CHECK(signal_catcher != NULL); Runtime* runtime = Runtime::Current(); - runtime->AttachCurrentThread("Signal Catcher", true, Thread::GetSystemThreadGroup()); + runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup()); Thread* self = Thread::Current(); self->SetState(kRunnable); diff --git a/src/stack.cc b/src/stack.cc index 336f8adfd1..07a1cb1fb1 100644 --- a/src/stack.cc +++ b/src/stack.cc @@ -26,7 +26,8 @@ namespace art { class StackGetter { public: - StackGetter(JNIEnv* env, Thread* thread) : env_(env), thread_(thread), trace_(NULL) { + StackGetter(const ScopedJniThreadState& ts, Thread* thread) + : ts_(ts), thread_(thread), trace_(NULL) { } static void Callback(void* arg) { @@ -39,17 +40,17 @@ class StackGetter { private: void Callback() { - trace_ = thread_->CreateInternalStackTrace(env_); + trace_ = thread_->CreateInternalStackTrace(ts_); } - JNIEnv* env_; - Thread* thread_; + const ScopedJniThreadState& ts_; + Thread* const thread_; jobject trace_; }; -jobject GetThreadStack(JNIEnv* env, Thread* thread) { +jobject GetThreadStack(const ScopedJniThreadState& ts, Thread* thread) { ThreadList* thread_list = Runtime::Current()->GetThreadList(); - StackGetter stack_getter(env, thread); + StackGetter stack_getter(ts, thread); thread_list->RunWhileSuspended(thread, StackGetter::Callback, &stack_getter); return stack_getter.GetTrace(); } diff --git a/src/stack.h b/src/stack.h index bd0aee6613..243ca280ca 100644 --- a/src/stack.h +++ b/src/stack.h @@ -31,9 +31,10 @@ namespace art { class Method; class Object; class ShadowFrame; +class ScopedJniThreadState; class Thread; -jobject GetThreadStack(JNIEnv*, Thread*); +jobject GetThreadStack(const ScopedJniThreadState&, Thread*); class ShadowFrame { public: diff --git a/src/thread.cc b/src/thread.cc index a8ba701c5a..ba2919a200 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -107,19 +107,21 @@ void* Thread::CreateCallback(void* arg) { runtime->GetThreadList()->WaitForGo(); { - CHECK_EQ(self->GetState(), kRunnable); - SirtRef thread_name(self->GetThreadName()); - self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); - } + ScopedJniThreadState ts(self); + { + SirtRef thread_name(self->GetThreadName(ts)); + self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); + } - Dbg::PostThreadStart(self); + Dbg::PostThreadStart(self); - // Invoke the 'run' method of our java.lang.Thread. - CHECK(self->peer_ != NULL); - Object* receiver = self->peer_; - jmethodID mid = WellKnownClasses::java_lang_Thread_run; - Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(DecodeMethod(mid)); - m->Invoke(self, receiver, NULL, NULL); + // Invoke the 'run' method of our java.lang.Thread. + CHECK(self->peer_ != NULL); + Object* receiver = self->peer_; + jmethodID mid = WellKnownClasses::java_lang_Thread_run; + Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(ts.DecodeMethod(mid)); + m->Invoke(self, receiver, NULL, NULL); + } // Detach. runtime->GetThreadList()->Unregister(); @@ -127,18 +129,19 @@ void* Thread::CreateCallback(void* arg) { return NULL; } -static void SetVmData(Object* managed_thread, Thread* native_thread) { - Field* f = DecodeField(WellKnownClasses::java_lang_Thread_vmData); +static void SetVmData(const ScopedJniThreadState& ts, Object* managed_thread, + Thread* native_thread) { + Field* f = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData); f->SetInt(managed_thread, reinterpret_cast(native_thread)); } -Thread* Thread::FromManagedThread(Object* thread_peer) { - Field* f = DecodeField(WellKnownClasses::java_lang_Thread_vmData); +Thread* Thread::FromManagedThread(const ScopedJniThreadState& ts, Object* thread_peer) { + Field* f = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData); return reinterpret_cast(static_cast(f->GetInt(thread_peer))); } -Thread* Thread::FromManagedThread(JNIEnv* env, jobject java_thread) { - return FromManagedThread(Decode(env, java_thread)); +Thread* Thread::FromManagedThread(const ScopedJniThreadState& ts, jobject java_thread) { + return FromManagedThread(ts, ts.Decode(java_thread)); } static size_t FixStackSize(size_t stack_size) { @@ -204,41 +207,43 @@ static void TearDownAlternateSignalStack() { delete[] allocated_signal_stack; } -void Thread::CreateNativeThread(Object* peer, size_t stack_size) { - CHECK(peer != NULL); +void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size) { + Thread* native_thread = new Thread; + { + ScopedJniThreadState ts(env); + Object* peer = ts.Decode(java_peer); + CHECK(peer != NULL); + native_thread->peer_ = peer; - stack_size = FixStackSize(stack_size); + stack_size = FixStackSize(stack_size); - Thread* native_thread = new Thread; - native_thread->peer_ = peer; + // Thread.start is synchronized, so we know that vmData is 0, + // and know that we're not racing to assign it. + SetVmData(ts, peer, native_thread); - // Thread.start is synchronized, so we know that vmData is 0, - // and know that we're not racing to assign it. - SetVmData(peer, native_thread); + int pthread_create_result = 0; + { + ScopedThreadStateChange tsc(Thread::Current(), kVmWait); + pthread_t new_pthread; + pthread_attr_t attr; + CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); + CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); + CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); + pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, native_thread); + CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); + } - int pthread_create_result = 0; - { - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); - pthread_t new_pthread; - pthread_attr_t attr; - CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); - CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); - CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); - pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, native_thread); - CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); - } - - if (pthread_create_result != 0) { - // pthread_create(3) failed, so clean up. - SetVmData(peer, 0); - delete native_thread; - - std::string msg(StringPrintf("pthread_create (%s stack) failed: %s", - PrettySize(stack_size).c_str(), strerror(pthread_create_result))); - Thread::Current()->ThrowOutOfMemoryError(msg.c_str()); - return; - } + if (pthread_create_result != 0) { + // pthread_create(3) failed, so clean up. + SetVmData(ts, peer, 0); + delete native_thread; + std::string msg(StringPrintf("pthread_create (%s stack) failed: %s", + PrettySize(stack_size).c_str(), strerror(pthread_create_result))); + Thread::Current()->ThrowOutOfMemoryError(msg.c_str()); + return; + } + } // Let the child know when it's safe to start running. Runtime::Current()->GetThreadList()->SignalGo(native_thread); } @@ -271,7 +276,7 @@ void Thread::Init() { runtime->GetThreadList()->Register(); } -Thread* Thread::Attach(const char* thread_name, bool as_daemon, Object* thread_group) { +Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group) { Thread* self = new Thread; self->Init(); @@ -295,30 +300,14 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, Object* thread_g return self; } -static Object* GetWellKnownThreadGroup(jfieldID which) { - Class* c = WellKnownClasses::ToClass(WellKnownClasses::java_lang_ThreadGroup); - if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { - return NULL; - } - return DecodeField(which)->GetObject(NULL); -} - -Object* Thread::GetMainThreadGroup() { - return GetWellKnownThreadGroup(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup); -} - -Object* Thread::GetSystemThreadGroup() { - return GetWellKnownThreadGroup(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); -} - -void Thread::CreatePeer(const char* name, bool as_daemon, Object* thread_group) { - CHECK(Runtime::Current()->IsStarted()); +void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { + Runtime* runtime = Runtime::Current(); + CHECK(runtime->IsStarted()); JNIEnv* env = jni_env_; if (thread_group == NULL) { - thread_group = Thread::GetMainThreadGroup(); + thread_group = runtime->GetMainThreadGroup(); } - ScopedLocalRef java_thread_group(env, AddLocalReference(env, thread_group)); ScopedLocalRef thread_name(env, env->NewStringUTF(name)); jint thread_priority = GetNativePriority(); jboolean thread_is_daemon = as_daemon; @@ -332,21 +321,22 @@ void Thread::CreatePeer(const char* name, bool as_daemon, Object* thread_group) env->CallNonvirtualVoidMethod(peer.get(), WellKnownClasses::java_lang_Thread, WellKnownClasses::java_lang_Thread_init, - java_thread_group.get(), thread_name.get(), thread_priority, thread_is_daemon); + thread_group, thread_name.get(), thread_priority, thread_is_daemon); CHECK(!IsExceptionPending()) << " " << PrettyTypeOf(GetException()); - SetVmData(peer_, Thread::Current()); - SirtRef peer_thread_name(GetThreadName()); + ScopedJniThreadState ts(this); + SetVmData(ts, peer_, Thread::Current()); + SirtRef peer_thread_name(GetThreadName(ts)); if (peer_thread_name.get() == NULL) { // The Thread constructor should have set the Thread.name to a // non-null value. However, because we can run without code // available (in the compiler, in tests), we manually assign the // fields the constructor should have set. - DecodeField(WellKnownClasses::java_lang_Thread_daemon)->SetBoolean(peer_, thread_is_daemon); - DecodeField(WellKnownClasses::java_lang_Thread_group)->SetObject(peer_, thread_group); - DecodeField(WellKnownClasses::java_lang_Thread_name)->SetObject(peer_, Decode(env, thread_name.get())); - DecodeField(WellKnownClasses::java_lang_Thread_priority)->SetInt(peer_, thread_priority); - peer_thread_name.reset(GetThreadName()); + ts.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->SetBoolean(peer_, thread_is_daemon); + ts.DecodeField(WellKnownClasses::java_lang_Thread_group)->SetObject(peer_, ts.Decode(thread_group)); + ts.DecodeField(WellKnownClasses::java_lang_Thread_name)->SetObject(peer_, ts.Decode(thread_name.get())); + ts.DecodeField(WellKnownClasses::java_lang_Thread_priority)->SetInt(peer_, thread_priority); + peer_thread_name.reset(GetThreadName(ts)); } // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. if (peer_thread_name.get() != NULL) { @@ -432,8 +422,8 @@ void Thread::Dump(std::ostream& os, bool full) const { } } -String* Thread::GetThreadName() const { - Field* f = DecodeField(WellKnownClasses::java_lang_Thread_name); +String* Thread::GetThreadName(const ScopedJniThreadState& ts) const { + Field* f = ts.DecodeField(WellKnownClasses::java_lang_Thread_name); return (peer_ != NULL) ? reinterpret_cast(f->GetObject(peer_)) : NULL; } @@ -447,12 +437,13 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { bool is_daemon = false; if (thread != NULL && thread->peer_ != NULL) { - priority = DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->peer_); - is_daemon = DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->peer_); + ScopedJniThreadState ts(Thread::Current()); + priority = ts.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->peer_); + is_daemon = ts.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->peer_); - Object* thread_group = thread->GetThreadGroup(); + Object* thread_group = thread->GetThreadGroup(ts); if (thread_group != NULL) { - Field* group_name_field = DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); + Field* group_name_field = ts.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); String* group_name_string = reinterpret_cast(group_name_field->GetObject(thread_group)); group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : ""; } @@ -750,12 +741,13 @@ void Thread::Startup() { } void Thread::FinishStartup() { - CHECK(Runtime::Current()->IsStarted()); + Runtime* runtime = Runtime::Current(); + CHECK(runtime->IsStarted()); Thread* self = Thread::Current(); // Finish attaching the main thread. ScopedThreadStateChange tsc(self, kRunnable); - Thread::Current()->CreatePeer("main", false, Thread::GetMainThreadGroup()); + Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); InitBoxingMethods(); Runtime::Current()->GetClassLinker()->RunRootClinits(); @@ -826,19 +818,19 @@ void Thread::Destroy() { Thread* self = this; // We may need to call user-supplied managed code. - SetState(kRunnable); + ScopedJniThreadState ts(this); - HandleUncaughtExceptions(); - RemoveFromThreadGroup(); + HandleUncaughtExceptions(ts); + RemoveFromThreadGroup(ts); // this.vmData = 0; - SetVmData(peer_, NULL); + SetVmData(ts, peer_, NULL); Dbg::PostThreadDeath(self); // Thread.join() is implemented as an Object.wait() on the Thread.lock // object. Signal anyone who is waiting. - Object* lock = DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(peer_); + Object* lock = ts.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(peer_); // (This conditional is only needed for tests, where Thread.lock won't have been set.) if (lock != NULL) { lock->MonitorEnter(self); @@ -868,25 +860,25 @@ Thread::~Thread() { TearDownAlternateSignalStack(); } -void Thread::HandleUncaughtExceptions() { +void Thread::HandleUncaughtExceptions(const ScopedJniThreadState& ts) { if (!IsExceptionPending()) { return; } - // Get and clear the exception. Object* exception = GetException(); ClearException(); // If the thread has its own handler, use that. - Object* handler = DecodeField(WellKnownClasses::java_lang_Thread_uncaughtHandler)->GetObject(peer_); + Object* handler = + ts.DecodeField(WellKnownClasses::java_lang_Thread_uncaughtHandler)->GetObject(peer_); if (handler == NULL) { // Otherwise use the thread group's default handler. - handler = GetThreadGroup(); + handler = GetThreadGroup(ts); } // Call the handler. jmethodID mid = WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException; - Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(DecodeMethod(mid)); + Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(ts.DecodeMethod(mid)); JValue args[2]; args[0].SetL(peer_); args[1].SetL(exception); @@ -896,17 +888,17 @@ void Thread::HandleUncaughtExceptions() { ClearException(); } -Object* Thread::GetThreadGroup() const { - return DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer_); +Object* Thread::GetThreadGroup(const ScopedJniThreadState& ts) const { + return ts.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer_); } -void Thread::RemoveFromThreadGroup() { +void Thread::RemoveFromThreadGroup(const ScopedJniThreadState& ts) { // this.group.removeThread(this); // group can be null if we're in the compiler or a test. - Object* group = GetThreadGroup(); + Object* group = GetThreadGroup(ts); if (group != NULL) { jmethodID mid = WellKnownClasses::java_lang_ThreadGroup_removeThread; - Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(DecodeMethod(mid)); + Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(ts.DecodeMethod(mid)); JValue args[1]; args[0].SetL(peer_); m->Invoke(this, group, args, NULL); @@ -1051,7 +1043,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { StackVisitor(stack, trace_stack), skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {} - bool Init(int depth, ScopedJniThreadState& ts) { + bool Init(int depth, const ScopedJniThreadState& ts) { // Allocate method trace with an extra slot that will hold the PC trace SirtRef > method_trace(Runtime::Current()->GetClassLinker()->AllocObjectArray(depth + 1)); @@ -1121,16 +1113,13 @@ StackIndirectReferenceTable* Thread::PopSirt() { return sirt; } -jobject Thread::CreateInternalStackTrace(JNIEnv* env) const { +jobject Thread::CreateInternalStackTrace(const ScopedJniThreadState& ts) const { // Compute depth of stack CountStackDepthVisitor count_visitor(GetManagedStack(), GetTraceStack()); count_visitor.WalkStack(); int32_t depth = count_visitor.GetDepth(); int32_t skip_depth = count_visitor.GetSkipDepth(); - // Transition into runnable state to work on Object*/Array* - ScopedJniThreadState ts(env); - // Build internal stack trace BuildInternalStackTraceVisitor build_trace_visitor(GetManagedStack(), GetTraceStack(), skip_depth); @@ -1138,7 +1127,7 @@ jobject Thread::CreateInternalStackTrace(JNIEnv* env) const { return NULL; // Allocation failed } build_trace_visitor.WalkStack(); - return AddLocalReference(ts.Env(), build_trace_visitor.GetInternalStackTrace()); + return ts.AddLocalReference(build_trace_visitor.GetInternalStackTrace()); } jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, @@ -1146,8 +1135,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job // Transition into runnable state to work on Object*/Array* ScopedJniThreadState ts(env); // Decode the internal stack trace into the depth, method trace and PC trace - ObjectArray* method_trace = - down_cast*>(Decode(ts.Env(), internal)); + ObjectArray* method_trace = ts.Decode*>(internal); int32_t depth = method_trace->GetLength() - 1; IntArray* pc_trace = down_cast(method_trace->Get(depth)); @@ -1158,8 +1146,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job if (output_array != NULL) { // Reuse the array we were given. result = output_array; - java_traces = reinterpret_cast*>(Decode(env, - output_array)); + java_traces = ts.Decode*>(output_array); // ...adjusting the number of frames we'll write to not exceed the array length. depth = std::min(depth, java_traces->GetLength()); } else { @@ -1168,7 +1155,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job if (java_traces == NULL) { return NULL; } - result = AddLocalReference(ts.Env(), java_traces); + result = ts.AddLocalReference(java_traces); } if (stack_depth != NULL) { @@ -1602,7 +1589,8 @@ bool Thread::HoldsLock(Object* object) { } bool Thread::IsDaemon() { - return DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(peer_); + ScopedJniThreadState ts(this); + return ts.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(peer_); } class ReferenceMapVisitor : public StackVisitor { diff --git a/src/thread.h b/src/thread.h index 5ff0414ab1..7cd55a357a 100644 --- a/src/thread.h +++ b/src/thread.h @@ -94,11 +94,11 @@ class PACKED Thread { // Creates a new native thread corresponding to the given managed peer. // Used to implement Thread.start. - static void CreateNativeThread(Object* peer, size_t stack_size); + static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size); // Attaches the calling native thread to the runtime, returning the new native peer. // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. - static Thread* Attach(const char* thread_name, bool as_daemon, Object* thread_group); + static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group); // Reset internal state of child thread after fork. void InitAfterFork(); @@ -110,8 +110,8 @@ class PACKED Thread { return reinterpret_cast(thread); } - static Thread* FromManagedThread(Object* thread_peer); - static Thread* FromManagedThread(JNIEnv* env, jobject thread); + static Thread* FromManagedThread(const ScopedJniThreadState& ts, Object* thread_peer); + static Thread* FromManagedThread(const ScopedJniThreadState& ts, jobject thread); // Translates 172 to pAllocArrayFromCode and so on. static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers); @@ -179,11 +179,6 @@ class PACKED Thread { */ static int GetNativePriority(); - // Returns the "main" ThreadGroup, used when attaching user threads. - static Object* GetMainThreadGroup(); - // Returns the "system" ThreadGroup, used when attaching our internal threads. - static Object* GetSystemThreadGroup(); - uint32_t GetThinLockId() const { return thin_lock_id_; } @@ -193,7 +188,7 @@ class PACKED Thread { } // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. - String* GetThreadName() const; + String* GetThreadName(const ScopedJniThreadState& ts) const; // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. @@ -206,7 +201,7 @@ class PACKED Thread { return peer_; } - Object* GetThreadGroup() const; + Object* GetThreadGroup(const ScopedJniThreadState& ts) const; RuntimeStats* GetStats() { return &stats_; @@ -322,19 +317,19 @@ class PACKED Thread { NotifyLocked(); } - const ClassLoader* GetClassLoaderOverride() { + ClassLoader* GetClassLoaderOverride() { // TODO: need to place the class_loader_override_ in a handle // DCHECK(CanAccessDirectReferences()); return class_loader_override_; } - void SetClassLoaderOverride(const ClassLoader* class_loader_override) { + void SetClassLoaderOverride(ClassLoader* class_loader_override) { class_loader_override_ = class_loader_override; } // Create the internal representation of a stack trace, that is more time // and space efficient to compute than the StackTraceElement[] - jobject CreateInternalStackTrace(JNIEnv* env) const; + jobject CreateInternalStackTrace(const ScopedJniThreadState& ts) const; // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many @@ -504,7 +499,7 @@ class PACKED Thread { void Destroy(); friend class ThreadList; // For ~Thread and Destroy. - void CreatePeer(const char* name, bool as_daemon, Object* thread_group); + void CreatePeer(const char* name, bool as_daemon, jobject thread_group); friend class Runtime; // For CreatePeer. void DumpState(std::ostream& os) const; @@ -516,8 +511,8 @@ class PACKED Thread { static void* CreateCallback(void* arg); - void HandleUncaughtExceptions(); - void RemoveFromThreadGroup(); + void HandleUncaughtExceptions(const ScopedJniThreadState& ts); + void RemoveFromThreadGroup(const ScopedJniThreadState& ts); void Init(); void InitCardTable(); @@ -609,7 +604,7 @@ class PACKED Thread { // Needed to get the right ClassLoader in JNI_OnLoad, but also // useful for testing. - const ClassLoader* class_loader_override_; + ClassLoader* class_loader_override_; // Thread local, lazily allocated, long jump context. Used to deliver exceptions. Context* long_jump_context_; diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc index f6a2ddb349..ccc83da9fd 100644 --- a/src/verifier/method_verifier.cc +++ b/src/verifier/method_verifier.cc @@ -204,8 +204,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const Class* klass, std: return VerifyClass(&dex_file, kh.GetDexCache(), klass->GetClassLoader(), class_def_idx, error); } -MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, DexCache* dex_cache, - const ClassLoader* class_loader, uint32_t class_def_idx, std::string& error) { +MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, + DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, std::string& error) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx); const byte* class_data = dex_file->GetClassData(class_def); if (class_data == NULL) { @@ -277,7 +277,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, } MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx, const DexFile* dex_file, - DexCache* dex_cache, const ClassLoader* class_loader, uint32_t class_def_idx, + DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, Method* method, uint32_t method_access_flags) { MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def_idx, code_item, method_idx, method, method_access_flags); @@ -317,7 +317,7 @@ void MethodVerifier::VerifyMethodAndDump(Method* method) { } MethodVerifier::MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, - const ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, + ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, uint32_t method_idx, Method* method, uint32_t method_access_flags) : work_insn_idx_(-1), method_idx_(method_idx), @@ -2900,7 +2900,7 @@ void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn, field = GetInstanceField(object_type, field_idx); } const char* descriptor; - const ClassLoader* loader; + ClassLoader* loader; if (field != NULL) { descriptor = FieldHelper(field).GetTypeDescriptor(); loader = field->GetDeclaringClass()->GetClassLoader(); @@ -2949,7 +2949,7 @@ void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn, field = GetInstanceField(object_type, field_idx); } const char* descriptor; - const ClassLoader* loader; + ClassLoader* loader; if (field != NULL) { descriptor = FieldHelper(field).GetTypeDescriptor(); loader = field->GetDeclaringClass()->GetClassLoader(); diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h index 5f72678559..64a723ef2e 100644 --- a/src/verifier/method_verifier.h +++ b/src/verifier/method_verifier.h @@ -171,7 +171,8 @@ class MethodVerifier { /* Verify a class. Returns "kNoFailure" on success. */ static FailureKind VerifyClass(const Class* klass, std::string& error); static FailureKind VerifyClass(const DexFile* dex_file, DexCache* dex_cache, - const ClassLoader* class_loader, uint32_t class_def_idx, std::string& error); + ClassLoader* class_loader, uint32_t class_def_idx, + std::string& error); uint8_t EncodePcToReferenceMapData() const; @@ -212,7 +213,7 @@ class MethodVerifier { private: explicit MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, - const ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, + ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, uint32_t method_idx, Method* method, uint32_t access_flags); // Adds the given string to the beginning of the last failure message. @@ -233,7 +234,7 @@ class MethodVerifier { * for code flow problems. */ static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file, DexCache* dex_cache, - const ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, + ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, Method* method, uint32_t method_access_flags); static void VerifyMethodAndDump(Method* method); @@ -611,7 +612,7 @@ class MethodVerifier { uint32_t method_access_flags_; // Method's access flags. const DexFile* dex_file_; // The dex file containing the method. DexCache* dex_cache_; // The dex_cache for the declaring class of the method. - const ClassLoader* class_loader_; // The class loader for the declaring class of the method. + ClassLoader* class_loader_; // The class loader for the declaring class of the method. uint32_t class_def_idx_; // The class def index of the declaring class of the method. const DexFile::CodeItem* code_item_; // The code item containing the code for the method. UniquePtr insn_flags_; // Instruction widths and flags, one entry per code unit. diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc index 217084ff55..dd54b5fc38 100644 --- a/src/verifier/reg_type.cc +++ b/src/verifier/reg_type.cc @@ -294,7 +294,7 @@ Class* RegType::ClassJoin(Class* s, Class* t) { } Class* common_elem = ClassJoin(s_ct, t_ct); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - const ClassLoader* class_loader = s->GetClassLoader(); + ClassLoader* class_loader = s->GetClassLoader(); std::string descriptor("["); descriptor += ClassHelper(common_elem).GetDescriptor(); Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader); diff --git a/src/verifier/reg_type_cache.cc b/src/verifier/reg_type_cache.cc index c860bd72ba..bb05e7e103 100644 --- a/src/verifier/reg_type_cache.cc +++ b/src/verifier/reg_type_cache.cc @@ -57,13 +57,11 @@ static RegType::Type RegTypeFromDescriptor(const std::string& descriptor) { } } -const RegType& RegTypeCache::FromDescriptor(const ClassLoader* loader, - const char* descriptor) { +const RegType& RegTypeCache::FromDescriptor(ClassLoader* loader, const char* descriptor) { return From(RegTypeFromDescriptor(descriptor), loader, descriptor); } -const RegType& RegTypeCache::From(RegType::Type type, const ClassLoader* loader, - const char* descriptor) { +const RegType& RegTypeCache::From(RegType::Type type, ClassLoader* loader, const char* descriptor) { if (type <= RegType::kRegTypeLastFixedLocation) { // entries should be sized greater than primitive types DCHECK_GT(entries_.size(), static_cast(type)); @@ -258,7 +256,7 @@ const RegType& RegTypeCache::FromCat1Const(int32_t value) { return *entry; } -const RegType& RegTypeCache::GetComponentType(const RegType& array, const ClassLoader* loader) { +const RegType& RegTypeCache::GetComponentType(const RegType& array, ClassLoader* loader) { CHECK(array.IsArrayTypes()); if (array.IsUnresolvedTypes()) { std::string descriptor(array.GetDescriptor()->ToModifiedUtf8()); diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h index 91a2933565..765809c733 100644 --- a/src/verifier/reg_type_cache.h +++ b/src/verifier/reg_type_cache.h @@ -40,10 +40,10 @@ class RegTypeCache { return *result; } - const RegType& From(RegType::Type type, const ClassLoader* loader, const char* descriptor); + const RegType& From(RegType::Type type, ClassLoader* loader, const char* descriptor); const RegType& FromClass(Class* klass); const RegType& FromCat1Const(int32_t value); - const RegType& FromDescriptor(const ClassLoader* loader, const char* descriptor); + const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor); const RegType& FromType(RegType::Type); const RegType& Boolean() { return FromType(RegType::kRegTypeBoolean); } @@ -77,7 +77,7 @@ class RegTypeCache { const RegType& ShortConstant() { return FromCat1Const(std::numeric_limits::min()); } const RegType& IntConstant() { return FromCat1Const(std::numeric_limits::max()); } - const RegType& GetComponentType(const RegType& array, const ClassLoader* loader); + const RegType& GetComponentType(const RegType& array, ClassLoader* loader); private: // The allocated entries -- cgit v1.2.3-59-g8ed1b From 00f7d0eaa6bd93d33bf0c1429bf4ba0b3f28abac Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 19 Jul 2012 15:28:27 -0700 Subject: Global lock levels. Introduce the notion of the mutators/GC being a shared-exclusive (aka reader-writer) lock. Introduce globally ordered locks, analysable by annotalysis, statically at compile time. Add locking attributes to methods. More subtly, remove the heap_lock_ and split between various locks that are held for smaller periods (where work doesn't get blocked). Remove buggy Dalvik style thread transitions. Make GC use CMS in all cases when concurrent is enabled. Fix bug where suspend counts rather than debug suspend counts were sent to JDWP. Move the PathClassLoader to WellKnownClasses. In debugger refactor calls to send request and possibly suspend. Break apart different VmWait thread states. Move identity hash code to a shared method. Change-Id: Icdbfc3ce3fcccd14341860ac7305d8e97b51f5c6 --- build/Android.common.mk | 4 +- src/card_table.h | 4 +- src/check_jni.cc | 112 +- src/class_linker.cc | 185 +-- src/class_linker.h | 337 +++-- src/class_linker_test.cc | 80 +- src/class_loader.cc | 46 - src/class_loader.h | 24 - src/common_test.h | 41 +- src/compiler.cc | 442 ++++--- src/compiler.h | 149 ++- src/compiler/Compiler.h | 2 +- src/compiler/CompilerIR.h | 4 +- src/compiler/Dataflow.cc | 2 +- src/compiler/Frontend.cc | 5 +- src/compiler/codegen/CodegenUtil.cc | 4 +- src/compiler/codegen/GenCommon.cc | 25 +- src/compiler/codegen/MethodCodegenDriver.cc | 2 +- src/compiler_llvm/runtime_support_llvm.cc | 4 +- src/compiler_test.cc | 39 +- src/debugger.cc | 426 ++++-- src/debugger.h | 267 ++-- src/dex2oat.cc | 100 +- src/dex_cache.h | 60 +- src/dex_cache_test.cc | 1 + src/dex_file.cc | 27 +- src/dex_file.h | 13 +- src/exception_test.cc | 16 +- src/heap.cc | 724 +++++++---- src/heap.h | 147 ++- src/heap_bitmap.h | 15 +- src/heap_test.cc | 18 +- src/hprof/hprof.cc | 45 +- src/image.h | 3 +- src/image_test.cc | 36 +- src/image_writer.cc | 47 +- src/image_writer.h | 68 +- src/indirect_reference_table.cc | 13 +- src/indirect_reference_table.h | 5 +- src/indirect_reference_table_test.cc | 1 + src/intern_table.h | 23 +- src/intern_table_test.cc | 9 +- src/jdwp/jdwp.h | 81 +- src/jdwp/jdwp_event.cc | 99 +- src/jdwp/jdwp_handler.cc | 270 ++-- src/jdwp/jdwp_main.cc | 140 +- src/jni_compiler_test.cc | 256 ++-- src/jni_internal.cc | 1360 ++++++++++---------- src/jni_internal.h | 31 +- src/jni_internal_test.cc | 88 +- src/jobject_comparator.cc | 55 + src/jobject_comparator.h | 30 + src/logging.cc | 10 +- src/logging.h | 31 +- src/mark_sweep.cc | 21 +- src/mark_sweep.h | 164 ++- src/mod_union_table.cc | 13 +- src/mod_union_table.h | 16 +- src/monitor.cc | 196 +-- src/monitor.h | 96 +- src/monitor_android.cc | 6 +- src/mutex.cc | 358 ++++-- src/mutex.h | 394 +++++- src/mutex_test.cc | 42 +- src/native/dalvik_system_DexFile.cc | 35 +- src/native/dalvik_system_VMDebug.cc | 48 +- src/native/dalvik_system_VMRuntime.cc | 18 +- src/native/dalvik_system_VMStack.cc | 55 +- src/native/java_lang_Class.cc | 122 +- src/native/java_lang_Object.cc | 20 +- src/native/java_lang_Runtime.cc | 9 +- src/native/java_lang_String.cc | 18 +- src/native/java_lang_System.cc | 27 +- src/native/java_lang_Thread.cc | 75 +- src/native/java_lang_Throwable.cc | 6 +- src/native/java_lang_VMClassLoader.cc | 8 +- src/native/java_lang_reflect_Array.cc | 27 +- src/native/java_lang_reflect_Constructor.cc | 14 +- src/native/java_lang_reflect_Field.cc | 58 +- src/native/java_lang_reflect_Method.cc | 18 +- src/native/java_lang_reflect_Proxy.cc | 16 +- .../org_apache_harmony_dalvik_ddmc_DdmServer.cc | 2 + ...org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 58 +- src/native/sun_misc_Unsafe.cc | 100 +- src/oat/jni/arm/calling_convention_arm.cc | 65 +- src/oat/jni/arm/calling_convention_arm.h | 10 +- src/oat/jni/calling_convention.h | 10 +- src/oat/jni/jni_compiler.cc | 596 ++++----- src/oat/jni/x86/calling_convention_x86.cc | 16 +- src/oat/jni/x86/calling_convention_x86.h | 2 +- src/oat/runtime/arm/oat_support_entrypoints_arm.cc | 18 +- src/oat/runtime/arm/stub_arm.cc | 1 + src/oat/runtime/callee_save_frame.h | 8 +- src/oat/runtime/oat_support_entrypoints.h | 8 +- src/oat/runtime/stub.h | 44 + src/oat/runtime/support_alloc.cc | 18 +- src/oat/runtime/support_cast.cc | 9 +- src/oat/runtime/support_debug.cc | 6 +- src/oat/runtime/support_dexcache.cc | 12 +- src/oat/runtime/support_field.cc | 36 +- src/oat/runtime/support_fillarray.cc | 3 +- src/oat/runtime/support_invoke.cc | 21 +- src/oat/runtime/support_jni.cc | 116 +- src/oat/runtime/support_locks.cc | 6 +- src/oat/runtime/support_proxy.cc | 19 +- src/oat/runtime/support_stubs.cc | 16 +- src/oat/runtime/support_thread.cc | 13 +- src/oat/runtime/support_throw.cc | 25 +- src/oat/runtime/x86/oat_support_entrypoints_x86.cc | 18 +- src/oat/runtime/x86/stub_x86.cc | 1 + src/oat/utils/arm/assembler_arm.cc | 21 +- src/oat/utils/arm/assembler_arm.h | 7 +- src/oat/utils/assembler.h | 2 +- src/oat/utils/x86/assembler_x86.cc | 7 +- src/oat/utils/x86/assembler_x86.h | 6 +- src/oat_compilation_unit.h | 35 +- src/oat_test.cc | 14 +- src/oat_writer.cc | 11 +- src/oat_writer.h | 23 +- src/oatdump.cc | 39 +- src/object.cc | 13 +- src/object.h | 381 ++++-- src/object_test.cc | 43 +- src/object_utils.h | 150 ++- src/reference_table.cc | 17 +- src/reference_table.h | 5 +- src/reference_table_test.cc | 1 + src/reflection.cc | 92 +- src/reflection.h | 34 +- src/runtime.cc | 93 +- src/runtime.h | 39 +- src/runtime_linux.cc | 3 +- src/runtime_support.cc | 3 +- src/runtime_support.h | 65 +- src/scoped_heap_lock.h | 42 - src/scoped_jni_thread_state.h | 195 --- src/scoped_thread_list_lock.cc | 50 - src/scoped_thread_list_lock.h | 35 - src/scoped_thread_list_lock_releaser.cc | 42 - src/scoped_thread_list_lock_releaser.h | 38 - src/scoped_thread_state_change.h | 315 +++++ src/signal_catcher.cc | 39 +- src/signal_catcher.h | 5 +- src/space.cc | 24 +- src/space.h | 14 +- src/space_bitmap.cc | 6 +- src/space_bitmap.h | 6 +- src/stack.cc | 33 +- src/stack.h | 28 +- src/thread.cc | 682 +++++----- src/thread.h | 283 ++-- src/thread_list.cc | 533 ++++---- src/thread_list.h | 104 +- src/trace.cc | 23 +- src/trace.h | 10 +- src/utf.h | 4 +- src/utils.h | 24 +- src/utils_test.cc | 7 + src/verifier/method_verifier.cc | 25 +- src/verifier/method_verifier.h | 102 +- src/verifier/method_verifier_test.cc | 12 +- src/verifier/reg_type.cc | 3 +- src/verifier/reg_type.h | 28 +- src/verifier/reg_type_cache.h | 83 +- src/verifier/reg_type_test.cc | 1 + src/verifier/register_line.cc | 3 +- src/verifier/register_line.h | 44 +- src/well_known_classes.cc | 33 +- src/well_known_classes.h | 14 +- test/ReferenceMap/stack_walk_refmap_jni.cc | 11 +- test/StackWalk/stack_walk_jni.cc | 6 +- 171 files changed, 7816 insertions(+), 5358 deletions(-) delete mode 100644 src/class_loader.cc create mode 100644 src/jobject_comparator.cc create mode 100644 src/jobject_comparator.h create mode 100644 src/oat/runtime/stub.h delete mode 100644 src/scoped_heap_lock.h delete mode 100644 src/scoped_jni_thread_state.h delete mode 100644 src/scoped_thread_list_lock.cc delete mode 100644 src/scoped_thread_list_lock.h delete mode 100644 src/scoped_thread_list_lock_releaser.cc delete mode 100644 src/scoped_thread_list_lock_releaser.h create mode 100644 src/scoped_thread_state_change.h (limited to 'src/native/java_lang_System.cc') diff --git a/build/Android.common.mk b/build/Android.common.mk index 34e8627b81..c5a6407695 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -147,7 +147,6 @@ LIBART_COMMON_SRC_FILES := \ src/card_table.cc \ src/check_jni.cc \ src/class_linker.cc \ - src/class_loader.cc \ src/compiled_method.cc \ src/compiler.cc \ src/debugger.cc \ @@ -175,6 +174,7 @@ LIBART_COMMON_SRC_FILES := \ src/jdwp/jdwp_main.cc \ src/jdwp/jdwp_socket.cc \ src/jni_internal.cc \ + src/jobject_comparator.cc \ src/logging.cc \ src/mark_stack.cc \ src/mark_sweep.cc \ @@ -223,8 +223,6 @@ LIBART_COMMON_SRC_FILES := \ src/reflection.cc \ src/runtime.cc \ src/runtime_support.cc \ - src/scoped_thread_list_lock.cc \ - src/scoped_thread_list_lock_releaser.cc \ src/signal_catcher.cc \ src/space.cc \ src/space_bitmap.cc \ diff --git a/src/card_table.h b/src/card_table.h index d065bed8e8..e1d0646391 100644 --- a/src/card_table.h +++ b/src/card_table.h @@ -74,7 +74,9 @@ class CardTable { // For every dirty card between begin and end invoke the visitor with the specified argument. template - void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor) const { + void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor) const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(bitmap->HasAddress(scan_begin)); DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan. byte* card_cur = CardFromAddr(scan_begin); diff --git a/src/check_jni.cc b/src/check_jni.cc index 47f20e19d5..b387f5f610 100644 --- a/src/check_jni.cc +++ b/src/check_jni.cc @@ -22,7 +22,7 @@ #include "class_linker.h" #include "logging.h" #include "object_utils.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "thread.h" #include "runtime.h" @@ -35,6 +35,7 @@ namespace art { static void JniAbort(const char* jni_function_name, const char* msg) { Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); Method* current_method = self->GetCurrentMethod(); std::ostringstream os; @@ -54,7 +55,11 @@ static void JniAbort(const char* jni_function_name, const char* msg) { if (vm->check_jni_abort_hook != NULL) { vm->check_jni_abort_hook(vm->check_jni_abort_hook_data, os.str()); } else { - self->SetState(kNative); // Ensure that we get a native stack trace for this thread. + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_NE(self->GetState(), kRunnable); + self->SetState(kNative); // Ensure that we get a native stack trace for this thread. + } LOG(FATAL) << os.str(); } } @@ -120,7 +125,8 @@ static const char* gBuiltInPrefixes[] = { NULL }; -static bool ShouldTrace(JavaVMExt* vm, const Method* method) { +static bool ShouldTrace(JavaVMExt* vm, const Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages // when a native method that matches the -Xjnitrace argument calls a JNI function // such as NewByteArray. @@ -146,16 +152,27 @@ static bool ShouldTrace(JavaVMExt* vm, const Method* method) { class ScopedCheck { public: // For JNIEnv* functions. - explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName) : ts_(env) { + explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + : soa_(env) { Init(flags, functionName, true); CheckThread(flags); } // For JavaVM* functions. - explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName) : ts_(vm) { + // TODO: it's not correct that this is a lock function, but making it so aids annotalysis. + explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + : soa_(vm) { Init(kFlag_Invocation, functionName, has_method); } + ~ScopedCheck() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {} + + const ScopedObjectAccess& soa() { + return soa_; + } + bool ForceCopy() { return Runtime::Current()->GetJavaVM()->force_copy; } @@ -179,7 +196,8 @@ class ScopedCheck { * * Works for both static and instance fields. */ - void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic) { + void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* f = CheckFieldID(fid); if (f == NULL) { return; @@ -187,7 +205,7 @@ class ScopedCheck { Class* field_type = FieldHelper(f).GetType(); if (!field_type->IsPrimitive()) { if (java_object != NULL) { - Object* obj = ts_.Decode(java_object); + Object* obj = soa_.Decode(java_object); // If java_object is a weak global ref whose referent has been cleared, // obj will be NULL. Otherwise, obj should always be non-NULL // and valid. @@ -224,8 +242,9 @@ class ScopedCheck { * * Assumes "jobj" has already been validated. */ - void CheckInstanceFieldID(jobject java_object, jfieldID fid) { - Object* o = ts_.Decode(java_object); + void CheckInstanceFieldID(jobject java_object, jfieldID fid) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Object* o = soa_.Decode(java_object); if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) { JniAbortF(function_name_, "field operation on invalid %s: %p", ToStr(GetIndirectRefKind(java_object)).c_str(), java_object); @@ -257,7 +276,8 @@ class ScopedCheck { * Verify that the method's return type matches the type of call. * 'expectedType' will be "L" for all objects, including arrays. */ - void CheckSig(jmethodID mid, const char* expectedType, bool isStatic) { + void CheckSig(jmethodID mid, const char* expectedType, bool isStatic) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -282,8 +302,9 @@ class ScopedCheck { * * Assumes "java_class" has already been validated. */ - void CheckStaticFieldID(jclass java_class, jfieldID fid) { - Class* c = ts_.Decode(java_class); + void CheckStaticFieldID(jclass java_class, jfieldID fid) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* c = soa_.Decode(java_class); const Field* f = CheckFieldID(fid); if (f == NULL) { return; @@ -303,12 +324,13 @@ class ScopedCheck { * * Instances of "java_class" must be instances of the method's declaring class. */ - void CheckStaticMethod(jclass java_class, jmethodID mid) { + void CheckStaticMethod(jclass java_class, jmethodID mid) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const Method* m = CheckMethodID(mid); if (m == NULL) { return; } - Class* c = ts_.Decode(java_class); + Class* c = soa_.Decode(java_class); if (!c->IsAssignableFrom(m->GetDeclaringClass())) { JniAbortF(function_name_, "can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str()); @@ -322,12 +344,13 @@ class ScopedCheck { * (Note the mid might point to a declaration in an interface; this * will be handled automatically by the instanceof check.) */ - void CheckVirtualMethod(jobject java_object, jmethodID mid) { + void CheckVirtualMethod(jobject java_object, jmethodID mid) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const Method* m = CheckMethodID(mid); if (m == NULL) { return; } - Object* o = ts_.Decode(java_object); + Object* o = soa_.Decode(java_object); if (!o->InstanceOf(m->GetDeclaringClass())) { JniAbortF(function_name_, "can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str()); @@ -370,11 +393,12 @@ class ScopedCheck { * * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ - void Check(bool entry, const char* fmt0, ...) { + void Check(bool entry, const char* fmt0, ...) + SHARED_LOCKS_REQUIRED (GlobalSynchronization::mutator_lock_) { va_list ap; const Method* traceMethod = NULL; - if ((!ts_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni)) && has_method_) { + if ((!soa_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni)) && has_method_) { // We need to guard some of the invocation interface's calls: a bad caller might // use DetachCurrentThread or GetEnv on a thread that's not yet attached. Thread* self = Thread::Current(); @@ -383,7 +407,7 @@ class ScopedCheck { } } - if (((flags_ & kFlag_ForceTrace) != 0) || (traceMethod != NULL && ShouldTrace(ts_.Vm(), traceMethod))) { + if (((flags_ & kFlag_ForceTrace) != 0) || (traceMethod != NULL && ShouldTrace(soa_.Vm(), traceMethod))) { va_start(ap, fmt0); std::string msg; for (const char* fmt = fmt0; *fmt;) { @@ -571,7 +595,8 @@ class ScopedCheck { * Because we're looking at an object on the GC heap, we have to switch * to "running" mode before doing the checks. */ - bool CheckInstance(InstanceKind kind, jobject java_object) { + bool CheckInstance(InstanceKind kind, jobject java_object) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const char* what = NULL; switch (kind) { case kClass: @@ -598,7 +623,7 @@ class ScopedCheck { return false; } - Object* obj = ts_.Decode(java_object); + Object* obj = soa_.Decode(java_object); if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) { JniAbortF(function_name_, "%s is an invalid %s: %p (%p)", what, ToStr(GetIndirectRefKind(java_object)).c_str(), java_object, obj); @@ -645,13 +670,13 @@ class ScopedCheck { * * Since we're dealing with objects, switch to "running" mode. */ - void CheckArray(jarray java_array) { + void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (java_array == NULL) { JniAbortF(function_name_, "jarray was NULL"); return; } - Array* a = ts_.Decode(java_array); + Array* a = soa_.Decode(java_array); if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) { JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)", ToStr(GetIndirectRefKind(java_array)).c_str(), java_array, a); @@ -666,12 +691,12 @@ class ScopedCheck { } } - Field* CheckFieldID(jfieldID fid) { + Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (fid == NULL) { JniAbortF(function_name_, "jfieldID was NULL"); return NULL; } - Field* f = ts_.DecodeField(fid); + Field* f = soa_.DecodeField(fid); if (!Runtime::Current()->GetHeap()->IsHeapAddress(f) || !f->IsField()) { JniAbortF(function_name_, "invalid jfieldID: %p", fid); return NULL; @@ -679,12 +704,12 @@ class ScopedCheck { return f; } - Method* CheckMethodID(jmethodID mid) { + Method* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (mid == NULL) { JniAbortF(function_name_, "jmethodID was NULL"); return NULL; } - Method* m = ts_.DecodeMethod(mid); + Method* m = soa_.DecodeMethod(mid); if (!Runtime::Current()->GetHeap()->IsHeapAddress(m) || !m->IsMethod()) { JniAbortF(function_name_, "invalid jmethodID: %p", mid); return NULL; @@ -698,12 +723,13 @@ class ScopedCheck { * * Switches to "running" mode before performing checks. */ - void CheckObject(jobject java_object) { + void CheckObject(jobject java_object) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (java_object == NULL) { return; } - Object* o = ts_.Decode(java_object); + Object* o = soa_.Decode(java_object); if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) { // TODO: when we remove work_around_app_jni_bugs, this should be impossible. JniAbortF(function_name_, "native code passing in reference to invalid %s: %p", @@ -721,7 +747,7 @@ class ScopedCheck { } } - void CheckThread(int flags) { + void CheckThread(int flags) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Thread* self = Thread::Current(); if (self == NULL) { JniAbortF(function_name_, "a thread (tid %d) is making JNI calls without being attached", GetTid()); @@ -733,13 +759,13 @@ class ScopedCheck { // Verify that the current thread is (a) attached and (b) associated with // this particular instance of JNIEnv. - if (ts_.Env() != threadEnv) { - if (ts_.Vm()->work_around_app_jni_bugs) { + if (soa_.Env() != threadEnv) { + if (soa_.Vm()->work_around_app_jni_bugs) { // If we're keeping broken code limping along, we need to suppress the abort... - LOG(ERROR) << "APP BUG DETECTED: thread " << *self << " using JNIEnv* from thread " << *ts_.Self(); + LOG(ERROR) << "APP BUG DETECTED: thread " << *self << " using JNIEnv* from thread " << *soa_.Self(); } else { JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s", - ToStr(*self).c_str(), ToStr(*ts_.Self()).c_str()); + ToStr(*self).c_str(), ToStr(*soa_.Self()).c_str()); return; } } @@ -778,7 +804,7 @@ class ScopedCheck { // TODO: do we care any more? art always dumps pending exceptions on aborting threads. if (type != "java.lang.OutOfMemoryError") { JniAbortF(function_name_, "JNI %s called with pending exception: %s", - function_name_, type.c_str(), jniGetStackTrace(ts_.Env()).c_str()); + function_name_, type.c_str(), jniGetStackTrace(soa_.Env()).c_str()); } else { JniAbortF(function_name_, "JNI %s called with %s pending", function_name_, type.c_str()); } @@ -855,7 +881,7 @@ class ScopedCheck { return 0; } - const ScopedJniThreadState ts_; + const ScopedObjectAccess soa_; const char* function_name_; int flags_; bool has_method_; @@ -1051,9 +1077,9 @@ struct GuardedCopy { * data are allowed. Returns a pointer to the copied data. */ static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); - Array* a = ts.Decode(java_array); + Array* a = soa.Decode(java_array); size_t component_size = a->GetClass()->GetComponentSize(); size_t byte_count = a->GetLength() * component_size; void* result = GuardedCopy::Create(a->GetRawData(component_size), byte_count, true); @@ -1072,8 +1098,8 @@ static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf, return; } - ScopedJniThreadState ts(env); - Array* a = ts.Decode(java_array); + ScopedObjectAccess soa(env); + Array* a = soa.Decode(java_array); GuardedCopy::Check(__FUNCTION__, dataBuf, true); @@ -1461,8 +1487,7 @@ CALL(void, Void, , , VOID_RETURN, "V"); CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, java_string, isCopy); const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy); if (sc.ForceCopy() && result != NULL) { - ScopedJniThreadState ts(env); - String* s = ts.Decode(java_string); + String* s = sc.soa().Decode(java_string); int byteCount = s->GetLength() * 2; result = (const jchar*) GuardedCopy::Create(result, byteCount, false); if (isCopy != NULL) { @@ -1689,8 +1714,7 @@ PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D'); CHECK_JNI_ENTRY(kFlag_CritGet, "Esp", env, java_string, isCopy); const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy); if (sc.ForceCopy() && result != NULL) { - ScopedJniThreadState ts(env); - String* s = ts.Decode(java_string); + String* s = sc.soa().Decode(java_string); int byteCount = s->GetLength() * 2; result = (const jchar*) GuardedCopy::Create(result, byteCount, false); if (isCopy != NULL) { diff --git a/src/class_linker.cc b/src/class_linker.cc index df14a4105e..35146127e9 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -45,8 +45,8 @@ #if defined(ART_USE_LLVM_COMPILER) #include "compiler_llvm/runtime_support_llvm.h" #endif -#include "scoped_jni_thread_state.h" #include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "space_bitmap.h" #include "stack_indirect_reference_table.h" @@ -58,7 +58,9 @@ namespace art { -static void ThrowNoClassDefFoundError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))); +static void ThrowNoClassDefFoundError(const char* fmt, ...) + __attribute__((__format__(__printf__, 1, 2))) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -66,7 +68,9 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_end(args); } -static void ThrowClassFormatError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))); +static void ThrowClassFormatError(const char* fmt, ...) + __attribute__((__format__(__printf__, 1, 2))) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void ThrowClassFormatError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -74,7 +78,9 @@ static void ThrowClassFormatError(const char* fmt, ...) { va_end(args); } -static void ThrowLinkageError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))); +static void ThrowLinkageError(const char* fmt, ...) + __attribute__((__format__(__printf__, 1, 2))) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void ThrowLinkageError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -83,7 +89,8 @@ static void ThrowLinkageError(const char* fmt, ...) { } static void ThrowNoSuchMethodError(bool is_direct, Class* c, const StringPiece& name, - const StringPiece& signature) { + const StringPiece& signature) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassHelper kh(c); std::ostringstream msg; msg << "no " << (is_direct ? "direct" : "virtual") << " method " << name << signature @@ -96,7 +103,8 @@ static void ThrowNoSuchMethodError(bool is_direct, Class* c, const StringPiece& } static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const StringPiece& type, - const StringPiece& name) { + const StringPiece& name) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassHelper kh(c); std::ostringstream msg; msg << "no " << scope << "field " << name << " of type " << type @@ -108,7 +116,9 @@ static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const Stri Thread::Current()->ThrowNewException("Ljava/lang/NoSuchFieldError;", msg.str().c_str()); } -static void ThrowNullPointerException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))); +static void ThrowNullPointerException(const char* fmt, ...) + __attribute__((__format__(__printf__, 1, 2))) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void ThrowNullPointerException(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -116,7 +126,8 @@ static void ThrowNullPointerException(const char* fmt, ...) { va_end(args); } -static void ThrowEarlierClassFailure(Class* c) { +static void ThrowEarlierClassFailure(Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // The class failed to initialize on a previous attempt, so we want to throw // a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we // failed in verification, in which case v2 5.4.1 says we need to re-throw @@ -134,7 +145,8 @@ static void ThrowEarlierClassFailure(Class* c) { } } -static void WrapExceptionInInitializer() { +static void WrapExceptionInInitializer() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); @@ -172,8 +184,6 @@ const char* ClassLinker::class_roots_descriptors_[] = { "Ljava/lang/reflect/Method;", "Ljava/lang/reflect/Proxy;", "Ljava/lang/ClassLoader;", - "Ldalvik/system/BaseDexClassLoader;", - "Ldalvik/system/PathClassLoader;", "Ljava/lang/Throwable;", "Ljava/lang/ClassNotFoundException;", "Ljava/lang/StackTraceElement;", @@ -212,8 +222,8 @@ ClassLinker* ClassLinker::CreateFromImage(InternTable* intern_table) { } ClassLinker::ClassLinker(InternTable* intern_table) - : dex_lock_("ClassLinker dex lock"), - classes_lock_("ClassLinker classes lock"), + // dex_lock_ is recursive as it may be used in stack dumping. + : dex_lock_("ClassLinker dex lock", kDefaultMutexLevel, true), class_roots_(NULL), array_iftable_(NULL), init_done_(false), @@ -433,20 +443,11 @@ void ClassLinker::InitFromCompiler(const std::vector& boot_class java_lang_ref_WeakReference->GetAccessFlags() | kAccClassIsReference | kAccClassIsWeakReference); - // Setup the ClassLoaders, verifying the object_size_ + // Setup the ClassLoader, verifying the object_size_ Class* java_lang_ClassLoader = FindSystemClass("Ljava/lang/ClassLoader;"); CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), sizeof(ClassLoader)); SetClassRoot(kJavaLangClassLoader, java_lang_ClassLoader); - Class* dalvik_system_BaseDexClassLoader = FindSystemClass("Ldalvik/system/BaseDexClassLoader;"); - CHECK_EQ(dalvik_system_BaseDexClassLoader->GetObjectSize(), sizeof(BaseDexClassLoader)); - SetClassRoot(kDalvikSystemBaseDexClassLoader, dalvik_system_BaseDexClassLoader); - - Class* dalvik_system_PathClassLoader = FindSystemClass("Ldalvik/system/PathClassLoader;"); - CHECK_EQ(dalvik_system_PathClassLoader->GetObjectSize(), sizeof(PathClassLoader)); - SetClassRoot(kDalvikSystemPathClassLoader, dalvik_system_PathClassLoader); - PathClassLoader::SetClass(dalvik_system_PathClassLoader); - // Set up java.lang.Throwable, java.lang.ClassNotFoundException, and // java.lang.StackTraceElement as a convenience SetClassRoot(kJavaLangThrowable, FindSystemClass("Ljava/lang/Throwable;")); @@ -536,7 +537,7 @@ void ClassLinker::RunRootClinits() { Class* c = GetClassRoot(ClassRoot(i)); if (!c->IsArrayClass() && !c->IsPrimitive()) { EnsureInitialized(GetClassRoot(ClassRoot(i)), true, true); - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()); + self->AssertNoPendingException(); } } } @@ -656,11 +657,11 @@ OatFile* ClassLinker::OpenOat(const ImageSpace* space) { } const OatFile* ClassLinker::FindOpenedOatFileForDexFile(const DexFile& dex_file) { + MutexLock mu(dex_lock_); return FindOpenedOatFileFromDexLocation(dex_file.GetLocation()); } const OatFile* ClassLinker::FindOpenedOatFileFromDexLocation(const std::string& dex_location) { - MutexLock mu(dex_lock_); for (size_t i = 0; i < oat_files_.size(); i++) { const OatFile* oat_file = oat_files_[i]; DCHECK(oat_file != NULL); @@ -698,6 +699,12 @@ static const DexFile* FindDexFileInOatLocation(const std::string& dex_location, const DexFile* ClassLinker::FindOrCreateOatFileForDexLocation(const std::string& dex_location, const std::string& oat_location) { + MutexLock mu(dex_lock_); + return FindOrCreateOatFileForDexLocationLocked(dex_location, oat_location); +} + +const DexFile* ClassLinker::FindOrCreateOatFileForDexLocationLocked(const std::string& dex_location, + const std::string& oat_location) { uint32_t dex_location_checksum; if (!DexFile::GetChecksum(dex_location, dex_location_checksum)) { LOG(ERROR) << "Failed to compute checksum '" << dex_location << "'"; @@ -713,13 +720,12 @@ const DexFile* ClassLinker::FindOrCreateOatFileForDexLocation(const std::string& } // Generate the output oat file for the dex file - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); UniquePtr file(OS::OpenFile(oat_location.c_str(), true)); if (file.get() == NULL) { LOG(ERROR) << "Failed to create oat file: " << oat_location; return NULL; } - if (!class_linker->GenerateOatFile(dex_location, file->Fd(), oat_location)) { + if (!GenerateOatFile(dex_location, file->Fd(), oat_location)) { LOG(ERROR) << "Failed to generate oat file: " << oat_location; return NULL; } @@ -734,7 +740,7 @@ const DexFile* ClassLinker::FindOrCreateOatFileForDexLocation(const std::string& LOG(ERROR) << "Failed to open generated oat file: " << oat_location; return NULL; } - class_linker->RegisterOatFile(*oat_file); + RegisterOatFileLocked(*oat_file); const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location); if (oat_dex_file == NULL) { LOG(ERROR) << "Failed to find dex file in generated oat file: " << oat_location; @@ -808,7 +814,7 @@ const DexFile* ClassLinker::FindDexFileInOatFileFromDexLocation(const std::strin // Look for an existing file next to dex. for example, for // /foo/bar/baz.jar, look for /foo/bar/baz.jar.oat. std::string oat_filename(OatFile::DexFilenameToOatFilename(dex_location)); - const OatFile* oat_file = FindOatFileFromOatLocation(oat_filename); + const OatFile* oat_file = FindOatFileFromOatLocationLocked(oat_filename); if (oat_file != NULL) { uint32_t dex_location_checksum; if (!DexFile::GetChecksum(dex_location, dex_location_checksum)) { @@ -829,7 +835,7 @@ const DexFile* ClassLinker::FindDexFileInOatFileFromDexLocation(const std::strin // Look for an existing file in the art-cache, validating the result if found // not found in /foo/bar/baz.oat? try /data/art-cache/foo@bar@baz.oat std::string cache_location(GetArtCacheFilenameOrDie(oat_filename)); - oat_file = FindOatFileFromOatLocation(cache_location); + oat_file = FindOatFileFromOatLocationLocked(cache_location); if (oat_file != NULL) { uint32_t dex_location_checksum; if (!DexFile::GetChecksum(dex_location, dex_location_checksum)) { @@ -850,11 +856,10 @@ const DexFile* ClassLinker::FindDexFileInOatFileFromDexLocation(const std::strin // Try to generate oat file if it wasn't found or was obsolete. std::string oat_cache_filename(GetArtCacheFilenameOrDie(oat_filename)); - return FindOrCreateOatFileForDexLocation(dex_location, oat_cache_filename); + return FindOrCreateOatFileForDexLocationLocked(dex_location, oat_cache_filename); } const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) { - MutexLock mu(dex_lock_); for (size_t i = 0; i < oat_files_.size(); i++) { const OatFile* oat_file = oat_files_[i]; DCHECK(oat_file != NULL); @@ -867,6 +872,10 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location) { MutexLock mu(dex_lock_); + return FindOatFileFromOatLocationLocked(oat_location); +} + +const OatFile* ClassLinker::FindOatFileFromOatLocationLocked(const std::string& oat_location) { const OatFile* oat_file = FindOpenedOatFileFromOatLocation(oat_location); if (oat_file != NULL) { return oat_file; @@ -942,7 +951,6 @@ void ClassLinker::InitFromImage() { IntArray::SetArrayClass(GetClassRoot(kIntArrayClass)); LongArray::SetArrayClass(GetClassRoot(kLongArrayClass)); ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass)); - PathClassLoader::SetClass(GetClassRoot(kDalvikSystemPathClassLoader)); Throwable::SetClass(GetClassRoot(kJavaLangThrowable)); StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); @@ -984,7 +992,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { } { - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { visitor(it->second, arg); @@ -998,7 +1006,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { } void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const { - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { if (!visitor(it->second, arg)) { @@ -1012,6 +1020,24 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const { } } +static bool GetClassesVisitor(Class* c, void* arg) { + std::set* classes = reinterpret_cast*>(arg); + classes->insert(c); + return true; +} + +void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const { + std::set classes; + VisitClasses(GetClassesVisitor, &classes); + typedef std::set::const_iterator It; // TODO: C++0x auto + for (It it = classes.begin(), end = classes.end(); it != end; ++it) { + if (!visitor(*it, arg)) { + return; + } + } +} + + ClassLinker::~ClassLinker() { String::ResetClass(); Field::ResetClass(); @@ -1024,7 +1050,6 @@ ClassLinker::~ClassLinker() { IntArray::ResetArrayClass(); LongArray::ResetArrayClass(); ShortArray::ResetArrayClass(); - PathClassLoader::ResetClass(); Throwable::ResetClass(); StackTraceElement::ResetClass(); STLDeleteElements(&boot_class_path_); @@ -1105,7 +1130,8 @@ ObjectArray* ClassLinker::AllocStackTraceElementArray(size_t length); } -static Class* EnsureResolved(Class* klass) { +static Class* EnsureResolved(Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(klass != NULL); // Wait for the class if it has not already been linked. Thread* self = Thread::Current(); @@ -1130,7 +1156,8 @@ static Class* EnsureResolved(Class* klass) { // Return the loaded class. No exceptions should be pending. CHECK(klass->IsResolved()) << PrettyClass(klass); CHECK(!self->IsExceptionPending()) - << PrettyClass(klass) << " " << PrettyTypeOf(self->GetException()); + << PrettyClass(klass) << " " << PrettyTypeOf(self->GetException()) << "\n" + << self->GetException()->Dump(); return klass; } @@ -1142,7 +1169,7 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) DCHECK_NE(*descriptor, '\0') << "descriptor is empty string"; Thread* self = Thread::Current(); DCHECK(self != NULL); - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()); + self->AssertNoPendingException(); if (descriptor[1] == '\0') { // only the descriptors of primitive types should be 1 character long, also avoid class lookup // for primitive classes that aren't backed by dex files. @@ -1173,32 +1200,37 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) self->ClearException(); // next try the compile time class path - const std::vector& class_path - = Runtime::Current()->GetCompileTimeClassPath(class_loader); - DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, class_path); + const std::vector* class_path; + { + ScopedObjectAccessUnchecked soa(Thread::Current()); + ScopedLocalRef jclass_loader(soa.Env(), soa.AddLocalReference(class_loader)); + class_path = &Runtime::Current()->GetCompileTimeClassPath(jclass_loader.get()); + } + + DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, *class_path); if (pair.second != NULL) { return DefineClass(descriptor, class_loader, *pair.first, *pair.second); } } else { - ScopedJniThreadState ts(self->GetJniEnv()); - ScopedLocalRef class_loader_object(ts.Env(), - ts.AddLocalReference(class_loader)); + ScopedObjectAccessUnchecked soa(self->GetJniEnv()); + ScopedLocalRef class_loader_object(soa.Env(), + soa.AddLocalReference(class_loader)); std::string class_name_string(DescriptorToDot(descriptor)); - ScopedLocalRef result(ts.Env(), NULL); + ScopedLocalRef result(soa.Env(), NULL); { ScopedThreadStateChange tsc(self, kNative); - ScopedLocalRef class_name_object(ts.Env(), - ts.Env()->NewStringUTF(class_name_string.c_str())); + ScopedLocalRef class_name_object(soa.Env(), + soa.Env()->NewStringUTF(class_name_string.c_str())); if (class_name_object.get() == NULL) { return NULL; } CHECK(class_loader_object.get() != NULL); - result.reset(ts.Env()->CallObjectMethod(class_loader_object.get(), - WellKnownClasses::java_lang_ClassLoader_loadClass, - class_name_object.get())); + result.reset(soa.Env()->CallObjectMethod(class_loader_object.get(), + WellKnownClasses::java_lang_ClassLoader_loadClass, + class_name_object.get())); } - if (ts.Env()->ExceptionCheck()) { + if (soa.Env()->ExceptionCheck()) { // If the ClassLoader threw, pass that exception up. return NULL; } else if (result.get() == NULL) { @@ -1208,7 +1240,7 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) return NULL; } else { // success, return Class* - return ts.Decode(result.get()); + return soa.Decode(result.get()); } } @@ -1428,7 +1460,9 @@ void ClassLinker::FixupStaticTrampolines(Class* klass) { } } -static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class, uint32_t method_index) { +static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class, + uint32_t method_index) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Every kind of method should at least get an invoke stub from the oat_method. // non-abstract methods also get their code pointers. const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index); @@ -1863,7 +1897,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo LOG(INFO) << "Loaded class " << descriptor << source; } size_t hash = StringPieceHash()(descriptor); - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); Table& classes = image_class ? image_classes_ : classes_; Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes); #ifndef NDEBUG @@ -1880,7 +1914,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_loader) { size_t hash = Hash(descriptor); - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); typedef Table::iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh; @@ -1905,7 +1939,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_l Class* ClassLinker::LookupClass(const char* descriptor, const ClassLoader* class_loader) { size_t hash = Hash(descriptor); - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); // TODO: determine if its better to search classes_ or image_classes_ first Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_); if (klass != NULL) { @@ -1940,7 +1974,7 @@ Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* void ClassLinker::LookupClasses(const char* descriptor, std::vector& classes) { classes.clear(); size_t hash = Hash(descriptor); - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh(NULL, this); @@ -1961,7 +1995,8 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector& cla } #if !defined(NDEBUG) && !defined(ART_USE_LLVM_COMPILER) -static void CheckMethodsHaveGcMaps(Class* klass) { +static void CheckMethodsHaveGcMaps(Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (!Runtime::Current()->IsStarted()) { return; } @@ -2050,7 +2085,7 @@ void ClassLinker::VerifyClass(Class* klass) { << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8() << " because: " << error_msg; } - DCHECK(!Thread::Current()->IsExceptionPending()); + Thread::Current()->AssertNoPendingException(); CHECK(verifier_failure == verifier::MethodVerifier::kNoFailure || Runtime::Current()->IsCompiler()); // Make sure all classes referenced by catch blocks are resolved @@ -2064,7 +2099,7 @@ void ClassLinker::VerifyClass(Class* klass) { << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8() << " because: " << error_msg; Thread* self = Thread::Current(); - CHECK(!self->IsExceptionPending()); + self->AssertNoPendingException(); self->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str()); CHECK_EQ(klass->GetStatus(), Class::kStatusVerifying) << PrettyDescriptor(klass); klass->SetStatus(Class::kStatusError); @@ -2300,7 +2335,8 @@ Method* ClassLinker::CreateProxyConstructor(SirtRef& klass, Class* proxy_ return constructor; } -static void CheckProxyConstructor(Method* constructor) { +static void CheckProxyConstructor(Method* constructor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(constructor->IsConstructor()); MethodHelper mh(constructor); CHECK_STREQ(mh.GetName(), ""); @@ -2338,7 +2374,8 @@ Method* ClassLinker::CreateProxyMethod(SirtRef& klass, SirtRef& p return method; } -static void CheckProxyMethod(Method* method, SirtRef& prototype) { +static void CheckProxyMethod(Method* method, SirtRef& prototype) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Basic sanity CHECK(!prototype->IsFinal()); CHECK(method->IsFinal()); @@ -2485,9 +2522,10 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in return success; } -bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock) { +bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { while (true) { - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()); + self->AssertNoPendingException(); lock.Wait(); // When we wake up, repeat the test for init-in-progress. If @@ -3096,8 +3134,11 @@ bool ClassLinker::LinkStaticFields(SirtRef& klass) { } struct LinkFieldsComparator { - explicit LinkFieldsComparator(FieldHelper* fh) : fh_(fh) {} - bool operator()(const Field* field1, const Field* field2) { + explicit LinkFieldsComparator(FieldHelper* fh) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + : fh_(fh) {} + // No thread safety analysis as will be called from STL. Checked lock held in constructor. + bool operator()(const Field* field1, const Field* field2) NO_THREAD_SAFETY_ANALYSIS { // First come reference fields, then 64-bit, and finally 32-bit fh_->ChangeField(field1); Primitive::Type type1 = fh_->GetTypeAsPrimitiveType(); @@ -3497,7 +3538,7 @@ void ClassLinker::DumpAllClasses(int flags) const { // lock held, because it might need to resolve a field's type, which would try to take the lock. std::vector all_classes; { - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { all_classes.push_back(it->second); @@ -3513,22 +3554,22 @@ void ClassLinker::DumpAllClasses(int flags) const { } void ClassLinker::DumpForSigQuit(std::ostream& os) const { - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); os << "Loaded classes: " << image_classes_.size() << " image classes; " << classes_.size() << " allocated classes\n"; } size_t ClassLinker::NumLoadedClasses() const { - MutexLock mu(classes_lock_); + MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); return classes_.size() + image_classes_.size(); } pid_t ClassLinker::GetClassesLockOwner() { - return classes_lock_.GetOwner(); + return GlobalSynchronization::classlinker_classes_lock_->GetExclusiveOwnerTid(); } pid_t ClassLinker::GetDexLockOwner() { - return dex_lock_.GetOwner(); + return dex_lock_.GetExclusiveOwnerTid(); } void ClassLinker::SetClassRoot(ClassRoot class_root, Class* klass) { diff --git a/src/class_linker.h b/src/class_linker.h index 8ad8d2d673..c69c442f3e 100644 --- a/src/class_linker.h +++ b/src/class_linker.h @@ -45,46 +45,61 @@ class ClassLinker { public: // Creates the class linker by boot strapping from dex files. static ClassLinker* CreateFromCompiler(const std::vector& boot_class_path, - InternTable* intern_table); + InternTable* intern_table) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Creates the class linker from an image. - static ClassLinker* CreateFromImage(InternTable* intern_table); + static ClassLinker* CreateFromImage(InternTable* intern_table) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); ~ClassLinker(); // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. - Class* FindClass(const char* descriptor, ClassLoader* class_loader); + Class* FindClass(const char* descriptor, ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Class* FindSystemClass(const char* descriptor); + Class* FindSystemClass(const char* descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Define a new a class based on a ClassDef from a DexFile Class* DefineClass(const StringPiece& descriptor, ClassLoader* class_loader, - const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); + const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded // by the given 'class_loader'. - Class* LookupClass(const char* descriptor, const ClassLoader* class_loader); + Class* LookupClass(const char* descriptor, const ClassLoader* class_loader) + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. - void LookupClasses(const char* descriptor, std::vector& classes); + void LookupClasses(const char* descriptor, std::vector& classes) + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Class* FindPrimitiveClass(char type); + Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. - bool RemoveClass(const char* descriptor, const ClassLoader* class_loader); + bool RemoveClass(const char* descriptor, const ClassLoader* class_loader) + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void DumpAllClasses(int flags) const; + void DumpAllClasses(int flags) const + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void DumpForSigQuit(std::ostream& os) const; + void DumpForSigQuit(std::ostream& os) const + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); - size_t NumLoadedClasses() const; + size_t NumLoadedClasses() const LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - String* ResolveString(uint32_t string_idx, const Method* referrer) { + String* ResolveString(uint32_t string_idx, const Method* referrer) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx); if (UNLIKELY(resolved_string == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -97,14 +112,14 @@ class ClassLinker { // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. - String* ResolveString(const DexFile& dex_file, uint32_t string_idx, DexCache* dex_cache); + String* ResolveString(const DexFile& dex_file, uint32_t string_idx, DexCache* dex_cache) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. - Class* ResolveType(const DexFile& dex_file, - uint16_t type_idx, - const Class* referrer) { + Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, const Class* referrer) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return ResolveType(dex_file, type_idx, referrer->GetDexCache(), @@ -114,7 +129,8 @@ class ClassLinker { // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - Class* ResolveType(uint16_t type_idx, const Method* referrer) { + Class* ResolveType(uint16_t type_idx, const Method* referrer) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx); if (UNLIKELY(resolved_type == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -126,7 +142,8 @@ class ClassLinker { return resolved_type; } - Class* ResolveType(uint16_t type_idx, const Field* referrer) { + Class* ResolveType(uint16_t type_idx, const Field* referrer) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* declaring_class = referrer->GetDeclaringClass(); DexCache* dex_cache = declaring_class->GetDexCache(); Class* resolved_type = dex_cache->GetResolvedType(type_idx); @@ -145,7 +162,8 @@ class ClassLinker { Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, DexCache* dex_cache, - ClassLoader* class_loader); + ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -156,9 +174,11 @@ class ClassLinker { uint32_t method_idx, DexCache* dex_cache, ClassLoader* class_loader, - bool is_direct); + bool is_direct) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* ResolveMethod(uint32_t method_idx, const Method* referrer, bool is_direct) { + Method* ResolveMethod(uint32_t method_idx, const Method* referrer, bool is_direct) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* resolved_method = referrer->GetDexCacheResolvedMethods()->Get(method_idx); if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -170,7 +190,8 @@ class ClassLinker { return resolved_method; } - Field* ResolveField(uint32_t field_idx, const Method* referrer, bool is_static) { + Field* ResolveField(uint32_t field_idx, const Method* referrer, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { @@ -192,7 +213,8 @@ class ClassLinker { uint32_t field_idx, DexCache* dex_cache, ClassLoader* class_loader, - bool is_static); + bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -201,89 +223,132 @@ class ClassLinker { Field* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, DexCache* dex_cache, - ClassLoader* class_loader); + ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Get shorty from method index without resolution. Used to do handlerization. - const char* MethodShorty(uint32_t method_idx, Method* referrer, uint32_t* length); + const char* MethodShorty(uint32_t method_idx, Method* referrer, uint32_t* length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no execution is possible. - bool EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields); + bool EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Initializes classes that have instances in the image but that have // methods so they could not be initialized by the compiler. - void RunRootClinits(); + void RunRootClinits() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void RegisterDexFile(const DexFile& dex_file); - void RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache); + void RegisterDexFile(const DexFile& dex_file) + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache) + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void RegisterOatFile(const OatFile& oat_file); + void RegisterOatFile(const OatFile& oat_file) + LOCKS_EXCLUDED(dex_lock_); const std::vector& GetBootClassPath() { return boot_class_path_; } - void VisitClasses(ClassVisitor* visitor, void* arg) const; - - void VisitRoots(Heap::RootVisitor* visitor, void* arg) const; - - const DexFile& FindDexFile(const DexCache* dex_cache) const; - DexCache* FindDexCache(const DexFile& dex_file) const; - bool IsDexFileRegistered(const DexFile& dex_file) const; - void FixupDexCaches(Method* resolution_method) const; + void VisitClasses(ClassVisitor* visitor, void* arg) const + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + // Less efficient variant of VisitClasses that doesn't hold the classlinker_classes_lock_ + // when calling the visitor. + void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + + void VisitRoots(Heap::RootVisitor* visitor, void* arg) const + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_, dex_lock_); + + const DexFile& FindDexFile(const DexCache* dex_cache) const + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + DexCache* FindDexCache(const DexFile& dex_file) const + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsDexFileRegistered(const DexFile& dex_file) const + LOCKS_EXCLUDED(dex_lock_); + void FixupDexCaches(Method* resolution_method) const + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Generate an oat file from a dex file bool GenerateOatFile(const std::string& dex_filename, int oat_fd, const std::string& oat_cache_filename); - const OatFile* FindOatFileFromOatLocation(const std::string& location); + const OatFile* FindOatFileFromOatLocation(const std::string& location) + LOCKS_EXCLUDED(dex_lock_); + + const OatFile* FindOatFileFromOatLocationLocked(const std::string& location) + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); // Finds the oat file for a dex location, generating the oat file if // it is missing or out of date. Returns the DexFile from within the // created oat file. const DexFile* FindOrCreateOatFileForDexLocation(const std::string& dex_location, - const std::string& oat_location); + const std::string& oat_location) + LOCKS_EXCLUDED(dex_lock_); + const DexFile* FindOrCreateOatFileForDexLocationLocked(const std::string& dex_location, + const std::string& oat_location) + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); // Find a DexFile within an OatFile given a DexFile location. Note // that this returns null if the location checksum of the DexFile // does not match the OatFile. - const DexFile* FindDexFileInOatFileFromDexLocation(const std::string& location); + const DexFile* FindDexFileInOatFileFromDexLocation(const std::string& location) + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns true if oat file contains the dex file with the given location and checksum static bool VerifyOatFileChecksums(const OatFile* oat_file, const std::string& dex_location, - uint32_t dex_location_checksum); + uint32_t dex_location_checksum) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // TODO: replace this with multiple methods that allocate the correct managed type. template - ObjectArray* AllocObjectArray(size_t length) { + ObjectArray* AllocObjectArray(size_t length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return ObjectArray::Alloc(GetClassRoot(kObjectArrayClass), length); } - ObjectArray* AllocClassArray(size_t length) { + ObjectArray* AllocClassArray(size_t length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return ObjectArray::Alloc(GetClassRoot(kClassArrayClass), length); } - ObjectArray* AllocStackTraceElementArray(size_t length); + ObjectArray* AllocStackTraceElementArray(size_t length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void VerifyClass(Class* klass); + void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, - Class::Status& oat_file_class_status); - void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass); - void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, Method* klass); + Class::Status& oat_file_class_status) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, Method* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); Class* CreateProxyClass(String* name, ObjectArray* interfaces, ClassLoader* loader, - ObjectArray* methods, ObjectArray >* throws); - std::string GetDescriptorForProxy(const Class* proxy_class); - Method* FindMethodForProxy(const Class* proxy_class, const Method* proxy_method); + ObjectArray* methods, ObjectArray >* throws) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + std::string GetDescriptorForProxy(const Class* proxy_class) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* FindMethodForProxy(const Class* proxy_class, const Method* proxy_method) + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized - const void* GetOatCodeFor(const Method* method); + const void* GetOatCodeFor(const Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Relocate the OatFiles (ELF images) - void RelocateExecutable(); + void RelocateExecutable() LOCKS_EXCLUDED(dex_lock_); pid_t GetClassesLockOwner(); // For SignalCatcher. pid_t GetDexLockOwner(); // For SignalCatcher. @@ -291,47 +356,60 @@ class ClassLinker { private: explicit ClassLinker(InternTable*); - const OatFile::OatMethod GetOatMethodFor(const Method* method); + const OatFile::OatMethod GetOatMethodFor(const Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Initialize class linker by bootstraping from dex files - void InitFromCompiler(const std::vector& boot_class_path); + void InitFromCompiler(const std::vector& boot_class_path) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Initialize class linker from one or more images. - void InitFromImage(); - OatFile* OpenOat(const ImageSpace* space); - static void InitFromImageCallback(Object* obj, void* arg); + void InitFromImage() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + OatFile* OpenOat(const ImageSpace* space) + LOCKS_EXCLUDED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void InitFromImageCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void FinishInit(); + void FinishInit() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // For early bootstrapping by Init - Class* AllocClass(Class* java_lang_Class, size_t class_size); + Class* AllocClass(Class* java_lang_Class, size_t class_size) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Alloc* convenience functions to avoid needing to pass in Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. - Class* AllocClass(size_t class_size); - DexCache* AllocDexCache(const DexFile& dex_file); - Field* AllocField(); - - Method* AllocMethod(); + Class* AllocClass(size_t class_size) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + DexCache* AllocDexCache(const DexFile& dex_file) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Field* AllocField() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* AllocMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - InterfaceEntry* AllocInterfaceEntry(Class* interface); + InterfaceEntry* AllocInterfaceEntry(Class* interface) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Class* CreatePrimitiveClass(const char* descriptor, Primitive::Type type) { + Class* CreatePrimitiveClass(const char* descriptor, Primitive::Type type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return InitializePrimitiveClass(AllocClass(sizeof(Class)), descriptor, type); } Class* InitializePrimitiveClass(Class* primitive_class, const char* descriptor, - Primitive::Type type); + Primitive::Type type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader); + Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void AppendToBootClassPath(const DexFile& dex_file); - void AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache); + void AppendToBootClassPath(const DexFile& dex_file) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - Class* c, SafeMap& field_map); + Class* c, SafeMap& field_map) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t SizeOfClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); @@ -339,15 +417,18 @@ class ClassLinker { void LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, SirtRef& klass, - ClassLoader* class_loader); + ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef& klass, SirtRef& dst); void LoadMethod(const DexFile& dex_file, const ClassDataItemIterator& dex_method, - SirtRef& klass, SirtRef& dst); + SirtRef& klass, SirtRef& dst) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void FixupStaticTrampolines(Class* klass); + void FixupStaticTrampolines(Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor); @@ -355,68 +436,97 @@ class ClassLinker { // Attempts to insert a class into a class table. Returns NULL if // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. - Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class); + Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class) + LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); + void RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); - void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); + void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); - bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics); + bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock); - bool ValidateSuperClassDescriptors(const Class* klass); - bool InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields); + bool ValidateSuperClassDescriptors(const Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Initialize static fields, returns true if fields were initialized. - bool InitializeStaticFields(Class* klass); + bool InitializeStaticFields(Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsSameDescriptorInDifferentClassContexts(const char* descriptor, const Class* klass1, - const Class* klass2); + const Class* klass2) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsSameMethodSignatureInDifferentClassContexts(const Method* descriptor, const Class* klass1, - const Class* klass2); + const Class* klass2) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LinkClass(SirtRef& klass, ObjectArray* interfaces); + bool LinkClass(SirtRef& klass, ObjectArray* interfaces) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LinkSuperClass(SirtRef& klass); + bool LinkSuperClass(SirtRef& klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file); + bool LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LinkMethods(SirtRef& klass, ObjectArray* interfaces); + bool LinkMethods(SirtRef& klass, ObjectArray* interfaces) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LinkVirtualMethods(SirtRef& klass); + bool LinkVirtualMethods(SirtRef& klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LinkInterfaceMethods(SirtRef& klass, ObjectArray* interfaces); + bool LinkInterfaceMethods(SirtRef& klass, ObjectArray* interfaces) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool LinkStaticFields(SirtRef& klass); - bool LinkInstanceFields(SirtRef& klass); - bool LinkFields(SirtRef& klass, bool is_static); + bool LinkStaticFields(SirtRef& klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool LinkInstanceFields(SirtRef& klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool LinkFields(SirtRef& klass, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void CreateReferenceInstanceOffsets(SirtRef& klass); - void CreateReferenceStaticOffsets(SirtRef& klass); + void CreateReferenceInstanceOffsets(SirtRef& klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void CreateReferenceStaticOffsets(SirtRef& klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void CreateReferenceOffsets(SirtRef& klass, bool is_static, - uint32_t reference_offsets); + uint32_t reference_offsets) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots const std::vector& GetDexCaches() { return dex_caches_; } - const OatFile* FindOpenedOatFileForDexFile(const DexFile& dex_file); - const OatFile* FindOpenedOatFileFromDexLocation(const std::string& dex_location); - const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location); + const OatFile* FindOpenedOatFileForDexFile(const DexFile& dex_file) + LOCKS_EXCLUDED(dex_lock_); + const OatFile* FindOpenedOatFileFromDexLocation(const std::string& dex_location) + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); + const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); const DexFile* VerifyAndOpenDexFileFromOatFile(const OatFile* oat_file, const std::string& dex_location, uint32_t dex_location_checksum) - EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); + EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* CreateProxyConstructor(SirtRef& klass, Class* proxy_class); - Method* CreateProxyMethod(SirtRef& klass, SirtRef& prototype); + Method* CreateProxyConstructor(SirtRef& klass, Class* proxy_class) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* CreateProxyMethod(SirtRef& klass, SirtRef& prototype) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); std::vector boot_class_path_; - mutable Mutex dex_lock_; + mutable Mutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::vector dex_files_ GUARDED_BY(dex_lock_); std::vector dex_caches_ GUARDED_BY(dex_lock_); std::vector oat_files_ GUARDED_BY(dex_lock_); @@ -425,13 +535,14 @@ class ClassLinker { // multimap from a string hash code of a class descriptor to // Class* instances. Results should be compared for a matching // Class::descriptor_ and Class::class_loader_. - mutable Mutex classes_lock_; typedef std::multimap Table; - Table image_classes_ GUARDED_BY(classes_lock_); - Table classes_ GUARDED_BY(classes_lock_); + Table image_classes_ GUARDED_BY(GlobalSynchronization::classlinker_classes_lock_); + Table classes_ GUARDED_BY(GlobalSynchronization::classlinker_classes_lock_); Class* LookupClassLocked(const char* descriptor, const ClassLoader* class_loader, - size_t hash, const Table& classes) EXCLUSIVE_LOCKS_REQUIRED(classes_lock_); + size_t hash, const Table& classes) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::classlinker_classes_lock_); // indexes into class_roots_. // needs to be kept in sync with class_roots_descriptors_. @@ -447,8 +558,6 @@ class ClassLinker { kJavaLangReflectMethod, kJavaLangReflectProxy, kJavaLangClassLoader, - kDalvikSystemBaseDexClassLoader, - kDalvikSystemPathClassLoader, kJavaLangThrowable, kJavaLangClassNotFoundException, kJavaLangStackTraceElement, @@ -474,14 +583,16 @@ class ClassLinker { }; ObjectArray* class_roots_; - Class* GetClassRoot(ClassRoot class_root) { + Class* GetClassRoot(ClassRoot class_root) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(class_roots_ != NULL); Class* klass = class_roots_->Get(class_root); DCHECK(klass != NULL); return klass; } - void SetClassRoot(ClassRoot class_root, Class* klass); + void SetClassRoot(ClassRoot class_root, Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); ObjectArray* GetClassRoots() { DCHECK(class_roots_ != NULL); diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc index a7f9c66c51..1eb5e0d1c4 100644 --- a/src/class_linker_test.cc +++ b/src/class_linker_test.cc @@ -29,7 +29,8 @@ namespace art { class ClassLinkerTest : public CommonTest { protected: - void AssertNonExistentClass(const std::string& descriptor) { + void AssertNonExistentClass(const std::string& descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { EXPECT_TRUE(class_linker_->FindSystemClass(descriptor.c_str()) == NULL); Thread* self = Thread::Current(); EXPECT_TRUE(self->IsExceptionPending()); @@ -39,11 +40,13 @@ class ClassLinkerTest : public CommonTest { EXPECT_TRUE(exception->InstanceOf(exception_class)); } - void AssertPrimitiveClass(const std::string& descriptor) { + void AssertPrimitiveClass(const std::string& descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(descriptor.c_str())); } - void AssertPrimitiveClass(const std::string& descriptor, const Class* primitive) { + void AssertPrimitiveClass(const std::string& descriptor, const Class* primitive) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassHelper primitive_ch(primitive); ASSERT_TRUE(primitive != NULL); ASSERT_TRUE(primitive->GetClass() != NULL); @@ -79,7 +82,8 @@ class ClassLinkerTest : public CommonTest { void AssertArrayClass(const std::string& array_descriptor, const std::string& component_type, - ClassLoader* class_loader) { + ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* array = class_linker_->FindClass(array_descriptor.c_str(), class_loader); ClassHelper array_component_ch(array->GetComponentType()); EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor()); @@ -87,7 +91,8 @@ class ClassLinkerTest : public CommonTest { AssertArrayClass(array_descriptor, array); } - void AssertArrayClass(const std::string& array_descriptor, Class* array) { + void AssertArrayClass(const std::string& array_descriptor, Class* array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassHelper kh(array); ASSERT_TRUE(array != NULL); ASSERT_TRUE(array->GetClass() != NULL); @@ -130,7 +135,7 @@ class ClassLinkerTest : public CommonTest { EXPECT_STREQ(kh.GetDescriptor(), "Ljava/io/Serializable;"); } - void AssertMethod(Method* method) { + void AssertMethod(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { MethodHelper mh(method); EXPECT_TRUE(method != NULL); EXPECT_TRUE(method->GetClass() != NULL); @@ -151,7 +156,8 @@ class ClassLinkerTest : public CommonTest { method->GetDexCacheInitializedStaticStorage()); } - void AssertField(Class* klass, Field* field) { + void AssertField(Class* klass, Field* field) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FieldHelper fh(field); EXPECT_TRUE(field != NULL); EXPECT_TRUE(field->GetClass() != NULL); @@ -160,7 +166,8 @@ class ClassLinkerTest : public CommonTest { EXPECT_TRUE(fh.GetType() != NULL); } - void AssertClass(const std::string& descriptor, Class* klass) { + void AssertClass(const std::string& descriptor, Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassHelper kh(klass); EXPECT_STREQ(descriptor.c_str(), kh.GetDescriptor()); if (descriptor == "Ljava/lang/Object;") { @@ -283,7 +290,8 @@ class ClassLinkerTest : public CommonTest { total_num_reference_instance_fields == 0); } - void AssertDexFileClass(ClassLoader* class_loader, const std::string& descriptor) { + void AssertDexFileClass(ClassLoader* class_loader, const std::string& descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); ASSERT_TRUE(klass != NULL); @@ -298,7 +306,8 @@ class ClassLinkerTest : public CommonTest { } } - void AssertDexFile(const DexFile* dex, ClassLoader* class_loader) { + void AssertDexFile(const DexFile* dex, ClassLoader* class_loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ASSERT_TRUE(dex != NULL); // Verify all the classes defined in this file @@ -341,7 +350,7 @@ struct CheckOffsets { std::string class_descriptor; std::vector offsets; - bool Check() { + bool Check() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* klass = Runtime::Current()->GetClassLinker()->FindSystemClass(class_descriptor.c_str()); CHECK(klass != NULL) << class_descriptor; @@ -549,21 +558,6 @@ struct ClassLoaderOffsets : public CheckOffsets { }; }; -struct BaseDexClassLoaderOffsets : public CheckOffsets { - BaseDexClassLoaderOffsets() - : CheckOffsets(false, "Ldalvik/system/BaseDexClassLoader;") { - // alphabetical references - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(BaseDexClassLoader, original_library_path_), "originalLibraryPath")); - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(BaseDexClassLoader, original_path_), "originalPath")); - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(BaseDexClassLoader, path_list_), "pathList")); - }; -}; - -struct PathClassLoaderOffsets : public CheckOffsets { - PathClassLoaderOffsets() - : CheckOffsets(false, "Ldalvik/system/PathClassLoader;") {} -}; - struct ProxyOffsets : public CheckOffsets { ProxyOffsets() : CheckOffsets(false, "Ljava/lang/reflect/Proxy;") { // alphabetical references @@ -614,6 +608,7 @@ struct MethodClassOffsets : public CheckOffsets { // reorder the fields in the C++ class. Managed class fields are ordered by // ClassLinker::LinkFields. TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) { + ScopedObjectAccess soa(Thread::Current()); EXPECT_TRUE(ObjectOffsets().Check()); EXPECT_TRUE(ConstructorOffsets().Check()); EXPECT_TRUE(FieldOffsets().Check()); @@ -623,8 +618,6 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) { EXPECT_TRUE(ThrowableOffsets().Check()); EXPECT_TRUE(StackTraceElementOffsets().Check()); EXPECT_TRUE(ClassLoaderOffsets().Check()); - EXPECT_TRUE(BaseDexClassLoaderOffsets().Check()); - EXPECT_TRUE(PathClassLoaderOffsets().Check()); EXPECT_TRUE(ProxyOffsets().Check()); EXPECT_TRUE(ClassClassOffsets().Check()); @@ -634,12 +627,14 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) { } TEST_F(ClassLinkerTest, FindClassNonexistent) { + ScopedObjectAccess soa(Thread::Current()); AssertNonExistentClass("NoSuchClass;"); AssertNonExistentClass("LNoSuchClass;"); } TEST_F(ClassLinkerTest, FindClassNested) { - SirtRef class_loader(LoadDex("Nested")); + ScopedObjectAccess soa(Thread::Current()); + SirtRef class_loader(soa.Decode(LoadDex("Nested"))); Class* outer = class_linker_->FindClass("LNested;", class_loader.get()); ASSERT_TRUE(outer != NULL); @@ -653,6 +648,7 @@ TEST_F(ClassLinkerTest, FindClassNested) { } TEST_F(ClassLinkerTest, FindClass_Primitives) { + ScopedObjectAccess soa(Thread::Current()); const std::string expected("BCDFIJSZV"); for (int ch = 1; ch < 256; ++ch) { std::string descriptor; @@ -666,6 +662,7 @@ TEST_F(ClassLinkerTest, FindClass_Primitives) { } TEST_F(ClassLinkerTest, FindClass) { + ScopedObjectAccess soa(Thread::Current()); Class* JavaLangObject = class_linker_->FindSystemClass("Ljava/lang/Object;"); ClassHelper kh(JavaLangObject); ASSERT_TRUE(JavaLangObject != NULL); @@ -701,7 +698,7 @@ TEST_F(ClassLinkerTest, FindClass) { EXPECT_EQ(0U, JavaLangObject->NumStaticFields()); EXPECT_EQ(0U, kh.NumDirectInterfaces()); - SirtRef class_loader(LoadDex("MyClass")); + SirtRef class_loader(soa.Decode(LoadDex("MyClass"))); AssertNonExistentClass("LMyClass;"); Class* MyClass = class_linker_->FindClass("LMyClass;", class_loader.get()); kh.ChangeClass(MyClass); @@ -746,12 +743,14 @@ TEST_F(ClassLinkerTest, FindClass) { } TEST_F(ClassLinkerTest, LibCore) { + ScopedObjectAccess soa(Thread::Current()); AssertDexFile(java_lang_dex_file_, NULL); } // The first reference array element must be a multiple of 4 bytes from the // start of the object TEST_F(ClassLinkerTest, ValidateObjectArrayElementsOffset) { + ScopedObjectAccess soa(Thread::Current()); Class* array_class = class_linker_->FindSystemClass("[Ljava/lang/String;"); ObjectArray* array = ObjectArray::Alloc(array_class, 0); uint32_t array_offset = reinterpret_cast(array); @@ -765,6 +764,7 @@ TEST_F(ClassLinkerTest, ValidateObjectArrayElementsOffset) { } TEST_F(ClassLinkerTest, ValidatePrimitiveArrayElementsOffset) { + ScopedObjectAccess soa(Thread::Current()); SirtRef long_array(LongArray::Alloc(0)); EXPECT_EQ(class_linker_->FindSystemClass("[J"), long_array->GetClass()); uintptr_t data_offset = reinterpret_cast(long_array->GetData()); @@ -796,6 +796,7 @@ TEST_F(ClassLinkerTest, ValidatePrimitiveArrayElementsOffset) { TEST_F(ClassLinkerTest, ValidateBoxedTypes) { // Validate that the "value" field is always the 0th field in each of java.lang's box classes. // This lets UnboxPrimitive avoid searching for the field by name at runtime. + ScopedObjectAccess soa(Thread::Current()); Class* c; c = class_linker_->FindClass("Ljava/lang/Boolean;", NULL); FieldHelper fh(c->GetIFields()->Get(0)); @@ -824,8 +825,9 @@ TEST_F(ClassLinkerTest, ValidateBoxedTypes) { } TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) { - SirtRef class_loader_1(LoadDex("MyClass")); - SirtRef class_loader_2(LoadDex("MyClass")); + ScopedObjectAccess soa(Thread::Current()); + SirtRef class_loader_1(soa.Decode(LoadDex("MyClass"))); + SirtRef class_loader_2(soa.Decode(LoadDex("MyClass"))); Class* MyClass_1 = class_linker_->FindClass("LMyClass;", class_loader_1.get()); Class* MyClass_2 = class_linker_->FindClass("LMyClass;", class_loader_2.get()); EXPECT_TRUE(MyClass_1 != NULL); @@ -834,7 +836,8 @@ TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) { } TEST_F(ClassLinkerTest, StaticFields) { - SirtRef class_loader(LoadDex("Statics")); + ScopedObjectAccess soa(Thread::Current()); + SirtRef class_loader(soa.Decode(LoadDex("Statics"))); Class* statics = class_linker_->FindClass("LStatics;", class_loader.get()); class_linker_->EnsureInitialized(statics, true, true); @@ -915,7 +918,8 @@ TEST_F(ClassLinkerTest, StaticFields) { } TEST_F(ClassLinkerTest, Interfaces) { - SirtRef class_loader(LoadDex("Interfaces")); + ScopedObjectAccess soa(Thread::Current()); + SirtRef class_loader(soa.Decode(LoadDex("Interfaces"))); Class* I = class_linker_->FindClass("LInterfaces$I;", class_loader.get()); Class* J = class_linker_->FindClass("LInterfaces$J;", class_loader.get()); Class* K = class_linker_->FindClass("LInterfaces$K;", class_loader.get()); @@ -973,8 +977,10 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) { // case 1, get the uninitialized storage from StaticsFromCode. // case 2, get the initialized storage from StaticsFromCode.getS0 - SirtRef class_loader(LoadDex("StaticsFromCode")); - const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader.get())[0]; + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("StaticsFromCode"); + SirtRef class_loader(soa.Decode(jclass_loader)); + const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(jclass_loader)[0]; CHECK(dex_file != NULL); Class* klass = class_linker_->FindClass("LStaticsFromCode;", class_loader.get()); @@ -995,6 +1001,7 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) { } TEST_F(ClassLinkerTest, FinalizableBit) { + ScopedObjectAccess soa(Thread::Current()); Class* c; // Object has a finalize method, but we know it's empty. @@ -1028,6 +1035,7 @@ TEST_F(ClassLinkerTest, FinalizableBit) { } TEST_F(ClassLinkerTest, ClassRootDescriptors) { + ScopedObjectAccess soa(Thread::Current()); ClassHelper kh; for (int i = 0; i < ClassLinker::kClassRootsMax; i++) { Class* klass = class_linker_->GetClassRoot(ClassLinker::ClassRoot(i)); diff --git a/src/class_loader.cc b/src/class_loader.cc deleted file mode 100644 index 3adb4ec0b6..0000000000 --- a/src/class_loader.cc +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "class_loader.h" - -#include "class_linker.h" -#include "runtime.h" - -namespace art { - -// TODO: get global references for these -Class* PathClassLoader::dalvik_system_PathClassLoader_ = NULL; - -PathClassLoader* PathClassLoader::AllocCompileTime(std::vector& dex_files) { - CHECK(!Runtime::Current()->IsStarted()); - DCHECK(dalvik_system_PathClassLoader_ != NULL); - SirtRef p(down_cast(dalvik_system_PathClassLoader_->AllocObject())); - Runtime::Current()->SetCompileTimeClassPath(p.get(), dex_files); - return p.get(); -} - -void PathClassLoader::SetClass(Class* dalvik_system_PathClassLoader) { - CHECK(dalvik_system_PathClassLoader_ == NULL); - CHECK(dalvik_system_PathClassLoader != NULL); - dalvik_system_PathClassLoader_ = dalvik_system_PathClassLoader; -} - -void PathClassLoader::ResetClass() { - CHECK(dalvik_system_PathClassLoader_ != NULL); - dalvik_system_PathClassLoader_ = NULL; -} - -} // namespace art diff --git a/src/class_loader.h b/src/class_loader.h index 0e1148d961..029c4a20ca 100644 --- a/src/class_loader.h +++ b/src/class_loader.h @@ -36,30 +36,6 @@ class MANAGED ClassLoader : public Object { DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader); }; -// C++ mirror of dalvik.system.BaseDexClassLoader -class MANAGED BaseDexClassLoader : public ClassLoader { - private: - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - String* original_library_path_; - String* original_path_; - Object* path_list_; - - friend struct BaseDexClassLoaderOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(BaseDexClassLoader); -}; - -// C++ mirror of dalvik.system.PathClassLoader -class MANAGED PathClassLoader : public BaseDexClassLoader { - public: - static PathClassLoader* AllocCompileTime(std::vector& dex_files); - static void SetClass(Class* dalvik_system_PathClassLoader); - static void ResetClass(); - private: - static Class* dalvik_system_PathClassLoader_; - friend struct PathClassLoaderOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(PathClassLoader); -}; - } // namespace art #endif // ART_SRC_CLASS_LOADER_H_ diff --git a/src/common_test.h b/src/common_test.h index 58d0219643..41dc76c880 100644 --- a/src/common_test.h +++ b/src/common_test.h @@ -33,12 +33,15 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" +#include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" #include "stl_util.h" #include "stringprintf.h" #include "thread.h" #include "unicode/uclean.h" #include "unicode/uvernum.h" #include "UniquePtr.h" +#include "well_known_classes.h" namespace art { @@ -206,7 +209,7 @@ class CommonTest : public testing::Test { ); } - void MakeExecutable(Method* method) { + void MakeExecutable(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(method != NULL); MethodHelper mh(method); @@ -328,7 +331,17 @@ class CommonTest : public testing::Test { options.push_back(std::make_pair("-Xcheck:jni", reinterpret_cast(NULL))); options.push_back(std::make_pair(min_heap_string.c_str(), reinterpret_cast(NULL))); options.push_back(std::make_pair(max_heap_string.c_str(), reinterpret_cast(NULL))); - runtime_.reset(Runtime::Create(options, false)); + if(!Runtime::Create(options, false)) { + LOG(FATAL) << "Failed to create runtime"; + return; + } + runtime_.reset(Runtime::Current()); + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more managable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + // Whilst we're in native take the opportunity to initialize well known classes. + WellKnownClasses::InitClasses(Thread::Current()->GetJniEnv()); + ScopedObjectAccess soa(Thread::Current()); ASSERT_TRUE(runtime_.get() != NULL); class_linker_ = runtime_->GetClassLinker(); @@ -362,7 +375,7 @@ class CommonTest : public testing::Test { compiler_.reset(new Compiler(instruction_set, true, 2, false, image_classes_.get(), true, true)); - Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption before the test + runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test } virtual void TearDown() { @@ -436,16 +449,20 @@ class CommonTest : public testing::Test { return dex_file; } - ClassLoader* LoadDex(const char* dex_name) { + jobject LoadDex(const char* dex_name) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile* dex_file = OpenTestDexFile(dex_name); CHECK(dex_file != NULL); class_linker_->RegisterDexFile(*dex_file); std::vector class_path; class_path.push_back(dex_file); - SirtRef class_loader(PathClassLoader::AllocCompileTime(class_path)); - CHECK(class_loader.get() != NULL); - Thread::Current()->SetClassLoaderOverride(class_loader.get()); - return class_loader.get(); + ScopedObjectAccessUnchecked soa(Thread::Current()); + ScopedLocalRef class_loader_local(soa.Env(), + soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader)); + jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get()); + soa.Self()->SetClassLoaderOverride(soa.Decode(class_loader_local.get())); + Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path); + return class_loader; } void CompileClass(ClassLoader* class_loader, const char* class_name) { @@ -460,7 +477,7 @@ class CommonTest : public testing::Test { } } - void CompileMethod(Method* method) { + void CompileMethod(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(method != NULL); compiler_->CompileOne(method); MakeExecutable(method); @@ -471,7 +488,8 @@ class CommonTest : public testing::Test { void CompileDirectMethod(ClassLoader* class_loader, const char* class_name, const char* method_name, - const char* signature) { + const char* signature) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; @@ -484,7 +502,8 @@ class CommonTest : public testing::Test { void CompileVirtualMethod(ClassLoader* class_loader, const char* class_name, const char* method_name, - const char* signature) { + const char* signature) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; diff --git a/src/compiler.cc b/src/compiler.cc index ceb9d1167a..bcbb77d120 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -27,9 +27,12 @@ #include "jni_internal.h" #include "oat_compilation_unit.h" #include "oat_file.h" +#include "oat/runtime/stub.h" #include "object_utils.h" #include "runtime.h" #include "space.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" #include "stl_util.h" #include "timing_logger.h" #include "verifier/method_verifier.h" @@ -40,17 +43,6 @@ namespace art { -namespace arm { - ByteArray* CreateAbstractMethodErrorStub(); - ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type); - ByteArray* CreateJniDlsymLookupStub(); -} -namespace x86 { - ByteArray* CreateAbstractMethodErrorStub(); - ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type); - ByteArray* CreateJniDlsymLookupStub(); -} - static double Percentage(size_t x, size_t y) { return 100.0 * (static_cast(x)) / (static_cast(x + y)); } @@ -311,6 +303,7 @@ Compiler::Compiler(InstructionSet instruction_set, bool image, size_t thread_cou image_(image), thread_count_(thread_count), support_debugging_(support_debugging), + start_ns_(0), stats_(new AOTCompilationStats), dump_stats_(dump_stats), dump_timings_(dump_timings), @@ -435,7 +428,7 @@ ByteArray* Compiler::CreateAbstractMethodErrorStub(InstructionSet instruction_se } } -void Compiler::CompileAll(ClassLoader* class_loader, +void Compiler::CompileAll(jobject class_loader, const std::vector& dex_files) { DCHECK(!Runtime::Current()->IsStarted()); @@ -464,27 +457,41 @@ void Compiler::CompileAll(ClassLoader* class_loader, void Compiler::CompileOne(const Method* method) { DCHECK(!Runtime::Current()->IsStarted()); + Thread* self = Thread::Current(); + jobject class_loader; + const DexCache* dex_cache; + const DexFile* dex_file; + { + ScopedObjectAccessUnchecked soa(self); + ScopedLocalRef + local_class_loader(soa.Env(), + soa.AddLocalReference(method->GetDeclaringClass()->GetClassLoader())); + class_loader = soa.Env()->NewGlobalRef(local_class_loader.get()); + // Find the dex_file + dex_cache = method->GetDeclaringClass()->GetDexCache(); + dex_file = &Runtime::Current()->GetClassLinker()->FindDexFile(dex_cache); + } + self->TransitionFromRunnableToSuspended(kNative); - ClassLoader* class_loader = method->GetDeclaringClass()->GetClassLoader(); - - // Find the dex_file - const DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); - const DexFile& dex_file = Runtime::Current()->GetClassLinker()->FindDexFile(dex_cache); std::vector dex_files; - dex_files.push_back(&dex_file); + dex_files.push_back(dex_file); TimingLogger timings("CompileOne"); PreCompile(class_loader, dex_files, timings); uint32_t method_idx = method->GetDexMethodIndex(); - const DexFile::CodeItem* code_item = dex_file.GetCodeItem(method->GetCodeItemOffset()); - CompileMethod(code_item, method->GetAccessFlags(), method_idx, class_loader, dex_file); + const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); + CompileMethod(code_item, method->GetAccessFlags(), method_idx, class_loader, *dex_file); PostCompile(class_loader, dex_files); + + self->GetJniEnv()->DeleteGlobalRef(class_loader); + + self->TransitionFromSuspendedToRunnable(); } -void Compiler::Resolve(ClassLoader* class_loader, - const std::vector& dex_files, TimingLogger& timings) { +void Compiler::Resolve(jobject class_loader, const std::vector& dex_files, + TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -492,8 +499,8 @@ void Compiler::Resolve(ClassLoader* class_loader, } } -void Compiler::PreCompile(ClassLoader* class_loader, - const std::vector& dex_files, TimingLogger& timings) { +void Compiler::PreCompile(jobject class_loader, const std::vector& dex_files, + TimingLogger& timings) { Resolve(class_loader, dex_files, timings); Verify(class_loader, dex_files); @@ -503,8 +510,7 @@ void Compiler::PreCompile(ClassLoader* class_loader, timings.AddSplit("PreCompile.InitializeClassesWithoutClinit"); } -void Compiler::PostCompile(ClassLoader* class_loader, - const std::vector& dex_files) { +void Compiler::PostCompile(jobject class_loader, const std::vector& dex_files) { SetGcMaps(class_loader, dex_files); } @@ -515,8 +521,10 @@ bool Compiler::IsImageClass(const std::string& descriptor) const { return image_classes_->find(descriptor) != image_classes_->end(); } -bool Compiler::CanAssumeTypeIsPresentInDexCache(const DexCache* dex_cache, +bool Compiler::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) { + ScopedObjectAccess soa(Thread::Current()); + DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); if (!IsImage()) { stats_->TypeNotInDexCache(); return false; @@ -535,14 +543,19 @@ bool Compiler::CanAssumeTypeIsPresentInDexCache(const DexCache* dex_cache, return result; } -bool Compiler::CanAssumeStringIsPresentInDexCache(const DexCache* dex_cache, +bool Compiler::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) { // TODO: Add support for loading strings referenced by image_classes_ // See also Compiler::ResolveDexFile // The following is a test saying that if we're building the image without a restricted set of // image classes then we can assume the string is present in the dex cache if it is there now - bool result = IsImage() && image_classes_ == NULL && dex_cache->GetResolvedString(string_idx) != NULL; + bool result = IsImage() && image_classes_ == NULL; + if (result) { + ScopedObjectAccess soa(Thread::Current()); + DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + result = dex_cache->GetResolvedString(string_idx) != NULL; + } if (result) { stats_->StringInDexCache(); } else { @@ -551,8 +564,10 @@ bool Compiler::CanAssumeStringIsPresentInDexCache(const DexCache* dex_cache, return result; } -bool Compiler::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexCache* dex_cache, - const DexFile& dex_file, uint32_t type_idx) { +bool Compiler::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, + uint32_t type_idx) { + ScopedObjectAccess soa(Thread::Current()); + DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); // Get type from dex cache assuming it was populated by the verifier Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == NULL) { @@ -577,9 +592,10 @@ bool Compiler::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexCache* } bool Compiler::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, - const DexCache* dex_cache, const DexFile& dex_file, uint32_t type_idx) { + ScopedObjectAccess soa(Thread::Current()); + DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); // Get type from dex cache assuming it was populated by the verifier. Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == NULL) { @@ -603,36 +619,44 @@ bool Compiler::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, return result; } -static Class* ComputeReferrerClass(OatCompilationUnit* mUnit) { - const DexFile::MethodId& referrer_method_id = - mUnit->dex_file_->GetMethodId(mUnit->method_idx_); - - return mUnit->class_linker_->ResolveType( - *mUnit->dex_file_, referrer_method_id.class_idx_, - mUnit->dex_cache_, mUnit->class_loader_); +static Class* ComputeReferrerClass(ScopedObjectAccess& soa, + OatCompilationUnit* mUnit) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); + const DexFile::MethodId& referrer_method_id = mUnit->dex_file_->GetMethodId(mUnit->method_idx_); + return mUnit->class_linker_->ResolveType(*mUnit->dex_file_, referrer_method_id.class_idx_, + dex_cache, class_loader); } -static Field* ComputeReferrerField(OatCompilationUnit* mUnit, uint32_t field_idx) { - return mUnit->class_linker_->ResolveField( - *mUnit->dex_file_, field_idx, mUnit->dex_cache_, - mUnit->class_loader_, false); +static Field* ComputeReferrerField(ScopedObjectAccess& soa, + OatCompilationUnit* mUnit, uint32_t field_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); + return mUnit->class_linker_->ResolveField(*mUnit->dex_file_, field_idx, dex_cache, + class_loader, false); } -static Method* ComputeReferrerMethod(OatCompilationUnit* mUnit, uint32_t method_idx) { - return mUnit->class_linker_->ResolveMethod( - *mUnit->dex_file_, method_idx, mUnit->dex_cache_, - mUnit->class_loader_, true); +static Method* ComputeReferrerMethod(ScopedObjectAccess& soa, + OatCompilationUnit* mUnit, uint32_t method_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); + return mUnit->class_linker_->ResolveMethod(*mUnit->dex_file_, method_idx, dex_cache, + class_loader, true); } bool Compiler::ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, bool& is_volatile, bool is_put) { + ScopedObjectAccess soa(Thread::Current()); // Conservative defaults field_offset = -1; is_volatile = true; // Try to resolve field - Field* resolved_field = ComputeReferrerField(mUnit, field_idx); + Field* resolved_field = ComputeReferrerField(soa, mUnit, field_idx); if (resolved_field != NULL) { - Class* referrer_class = ComputeReferrerClass(mUnit); + Class* referrer_class = ComputeReferrerClass(soa, mUnit); if (referrer_class != NULL) { Class* fields_class = resolved_field->GetDeclaringClass(); bool access_ok = referrer_class->CanAccess(fields_class) && @@ -661,9 +685,8 @@ bool Compiler::ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* } } // Clean up any exception left by field/type resolution - Thread* thread = Thread::Current(); - if (thread->IsExceptionPending()) { - thread->ClearException(); + if (soa.Self()->IsExceptionPending()) { + soa.Self()->ClearException(); } stats_->UnresolvedInstanceField(); return false; // Incomplete knowledge needs slow path. @@ -672,16 +695,17 @@ bool Compiler::ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, int& ssb_index, bool& is_referrers_class, bool& is_volatile, bool is_put) { + ScopedObjectAccess soa(Thread::Current()); // Conservative defaults field_offset = -1; ssb_index = -1; is_referrers_class = false; is_volatile = true; // Try to resolve field - Field* resolved_field = ComputeReferrerField(mUnit, field_idx); + Field* resolved_field = ComputeReferrerField(soa, mUnit, field_idx); if (resolved_field != NULL) { DCHECK(resolved_field->IsStatic()); - Class* referrer_class = ComputeReferrerClass(mUnit); + Class* referrer_class = ComputeReferrerClass(soa, mUnit); if (referrer_class != NULL) { Class* fields_class = resolved_field->GetDeclaringClass(); if (fields_class == referrer_class) { @@ -714,7 +738,8 @@ bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mU // in its static storage base (which may fail if it doesn't have a slot for it) // TODO: for images we can elide the static storage base null check // if we know there's a non-null entry in the image - if (fields_class->GetDexCache() == mUnit->dex_cache_) { + DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + if (fields_class->GetDexCache() == dex_cache) { // common case where the dex cache of both the referrer and the field are the same, // no need to search the dex file ssb_index = fields_class->GetDexTypeIndex(); @@ -745,9 +770,8 @@ bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mU } } // Clean up any exception left by field/type resolution - Thread* thread = Thread::Current(); - if (thread->IsExceptionPending()) { - thread->ClearException(); + if (soa.Self()->IsExceptionPending()) { + soa.Self()->ClearException(); } stats_->UnresolvedStaticField(); return false; // Incomplete knowledge needs slow path. @@ -793,12 +817,13 @@ void Compiler::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_t bool Compiler::ComputeInvokeInfo(uint32_t method_idx, OatCompilationUnit* mUnit, InvokeType& type, int& vtable_idx, uintptr_t& direct_code, uintptr_t& direct_method) { + ScopedObjectAccess soa(Thread::Current()); vtable_idx = -1; direct_code = 0; direct_method = 0; - Method* resolved_method = ComputeReferrerMethod(mUnit, method_idx); + Method* resolved_method = ComputeReferrerMethod(soa, mUnit, method_idx); if (resolved_method != NULL) { - Class* referrer_class = ComputeReferrerClass(mUnit); + Class* referrer_class = ComputeReferrerClass(soa, mUnit); if (referrer_class != NULL) { Class* methods_class = resolved_method->GetDeclaringClass(); if (!referrer_class->CanAccess(methods_class) || @@ -847,40 +872,35 @@ bool Compiler::ComputeInvokeInfo(uint32_t method_idx, OatCompilationUnit* mUnit, } } // Clean up any exception left by method/type resolution - Thread* thread = Thread::Current(); - if (thread->IsExceptionPending()) { - thread->ClearException(); + if (soa.Self()->IsExceptionPending()) { + soa.Self()->ClearException(); } stats_->UnresolvedMethod(type); return false; // Incomplete knowledge needs slow path. } -void Compiler::AddCodePatch(DexCache* dex_cache, - const DexFile* dex_file, +void Compiler::AddCodePatch(const DexFile* dex_file, uint32_t referrer_method_idx, uint32_t referrer_access_flags, uint32_t target_method_idx, bool target_is_direct, size_t literal_offset) { MutexLock mu(compiled_methods_lock_); - code_to_patch_.push_back(new PatchInformation(dex_cache, - dex_file, + code_to_patch_.push_back(new PatchInformation(dex_file, referrer_method_idx, referrer_access_flags, target_method_idx, target_is_direct, literal_offset)); } -void Compiler::AddMethodPatch(DexCache* dex_cache, - const DexFile* dex_file, +void Compiler::AddMethodPatch(const DexFile* dex_file, uint32_t referrer_method_idx, uint32_t referrer_access_flags, uint32_t target_method_idx, bool target_is_direct, size_t literal_offset) { MutexLock mu(compiled_methods_lock_); - methods_to_patch_.push_back(new PatchInformation(dex_cache, - dex_file, + methods_to_patch_.push_back(new PatchInformation(dex_file, referrer_method_idx, referrer_access_flags, target_method_idx, @@ -888,73 +908,47 @@ void Compiler::AddMethodPatch(DexCache* dex_cache, literal_offset)); } -// Return true if the class should be skipped during compilation. We -// never skip classes in the boot class loader. However, if we have a -// non-boot class loader and we can resolve the class in the boot -// class loader, we do skip the class. This happens if an app bundles -// classes found in the boot classpath. Since at runtime we will -// select the class from the boot classpath, do not attempt to resolve -// or compile it now. -static bool SkipClass(ClassLoader* class_loader, - const DexFile& dex_file, - const DexFile::ClassDef& class_def) { - if (class_loader == NULL) { - return false; - } - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* klass = class_linker->FindClass(descriptor, NULL); - if (klass == NULL) { - Thread* self = Thread::Current(); - CHECK(self->IsExceptionPending()); - self->ClearException(); - return false; - } - return true; -} - class CompilationContext { public: CompilationContext(ClassLinker* class_linker, - ClassLoader* class_loader, + jobject class_loader, Compiler* compiler, - DexCache* dex_cache, const DexFile* dex_file) : class_linker_(class_linker), class_loader_(class_loader), compiler_(compiler), - dex_cache_(dex_cache), dex_file_(dex_file) {} - ClassLinker* GetClassLinker() { + ClassLinker* GetClassLinker() const { CHECK(class_linker_ != NULL); return class_linker_; } - ClassLoader* GetClassLoader() { + + jobject GetClassLoader() const { return class_loader_; } - Compiler* GetCompiler() { + + Compiler* GetCompiler() const { CHECK(compiler_ != NULL); return compiler_; } - DexCache* GetDexCache() { - CHECK(dex_cache_ != NULL); - return dex_cache_; - } - const DexFile* GetDexFile() { + + const DexFile* GetDexFile() const { CHECK(dex_file_ != NULL); return dex_file_; } private: - ClassLinker* class_linker_; - ClassLoader* class_loader_; - Compiler* compiler_; - DexCache* dex_cache_; - const DexFile* dex_file_; + ClassLinker* const class_linker_; + const jobject class_loader_; + Compiler* const compiler_; + const DexFile* const dex_file_; }; -typedef void Callback(CompilationContext* context, size_t index); +typedef void Callback(const CompilationContext* context, size_t index); + +static void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback, + size_t thread_count); class WorkerThread { public: @@ -977,48 +971,49 @@ class WorkerThread { } private: - static void* Go(void* arg) { + static void* Go(void* arg) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { WorkerThread* worker = reinterpret_cast(arg); Runtime* runtime = Runtime::Current(); if (worker->spawn_) { runtime->AttachCurrentThread("Compiler Worker", true, NULL); } - Thread::Current()->SetState(kRunnable); worker->Run(); if (worker->spawn_) { - Thread::Current()->SetState(kNative); runtime->DetachCurrentThread(); } return NULL; } - void Go() { + void Go() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { Go(this); } - void Run() { + void Run() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { Thread* self = Thread::Current(); for (size_t i = begin_; i < end_; i += stripe_) { callback_(context_, i); - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()) << " " << i; + self->AssertNoPendingException(); } } pthread_t pthread_; - bool spawn_; + // Was this thread spawned or is it the main thread? + const bool spawn_; - CompilationContext* context_; - size_t begin_; - size_t end_; - Callback* callback_; - size_t stripe_; + const CompilationContext* const context_; + const size_t begin_; + const size_t end_; + const Callback* callback_; + const size_t stripe_; friend void ForAll(CompilationContext*, size_t, size_t, Callback, size_t); }; -void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback, size_t thread_count) { +static void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback, + size_t thread_count) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { Thread* self = Thread::Current(); - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()); + self->AssertNoPendingException(); CHECK_GT(thread_count, 0U); std::vector threads; @@ -1027,12 +1022,45 @@ void ForAll(CompilationContext* context, size_t begin, size_t end, Callback call } threads[0]->Go(); - // Switch to kVmWait while we're blocked waiting for the other threads to finish. - ScopedThreadStateChange tsc(self, kVmWait); + // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker + // thread destructor's called below perform join). + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_NE(self->GetState(), kRunnable); + } STLDeleteElements(&threads); } -static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t class_def_index) { +// Return true if the class should be skipped during compilation. We +// never skip classes in the boot class loader. However, if we have a +// non-boot class loader and we can resolve the class in the boot +// class loader, we do skip the class. This happens if an app bundles +// classes found in the boot classpath. Since at runtime we will +// select the class from the boot classpath, do not attempt to resolve +// or compile it now. +static bool SkipClass(ClassLoader* class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& class_def) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + if (class_loader == NULL) { + return false; + } + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Class* klass = class_linker->FindClass(descriptor, NULL); + if (klass == NULL) { + Thread* self = Thread::Current(); + CHECK(self->IsExceptionPending()); + self->ClearException(); + return false; + } + return true; +} + +static void ResolveClassFieldsAndMethods(const CompilationContext* context, size_t class_def_index) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + ScopedObjectAccess soa(Thread::Current()); + ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); const DexFile& dex_file = *context->GetDexFile(); // Method and Field are the worst. We can't resolve without either @@ -1043,7 +1071,7 @@ static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t cla // definitions, since many of them many never be referenced by // generated code. const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - if (SkipClass(context->GetClassLoader(), dex_file, class_def)) { + if (SkipClass(class_loader, dex_file, class_def)) { return; } @@ -1061,7 +1089,7 @@ static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t cla ClassDataItemIterator it(dex_file, class_data); while (it.HasNextStaticField()) { Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, - context->GetClassLoader(), true); + class_loader, true); if (field == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1070,7 +1098,7 @@ static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t cla } while (it.HasNextInstanceField()) { Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, - context->GetClassLoader(), false); + class_loader, false); if (field == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1079,7 +1107,7 @@ static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t cla } while (it.HasNextDirectMethod()) { Method* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), dex_cache, - context->GetClassLoader(), true); + class_loader, true); if (method == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1088,7 +1116,7 @@ static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t cla } while (it.HasNextVirtualMethod()) { Method* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), dex_cache, - context->GetClassLoader(), false); + class_loader, false); if (method == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1098,43 +1126,38 @@ static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t cla DCHECK(!it.HasNext()); } -static void ResolveType(CompilationContext* context, size_t type_idx) { +static void ResolveType(const CompilationContext* context, size_t type_idx) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { // Class derived values are more complicated, they require the linker and loader. - Thread* self = Thread::Current(); - Class* klass = context->GetClassLinker()->ResolveType(*context->GetDexFile(), - type_idx, - context->GetDexCache(), - context->GetClassLoader()); + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = context->GetClassLinker(); + const DexFile& dex_file = *context->GetDexFile(); + DexCache* dex_cache = class_linker->FindDexCache(dex_file); + ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); + Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + if (klass == NULL) { - CHECK(self->IsExceptionPending()); + CHECK(soa.Self()->IsExceptionPending()); Thread::Current()->ClearException(); } } -void Compiler::ResolveDexFile(ClassLoader* class_loader, const DexFile& dex_file, TimingLogger& timings) { +void Compiler::ResolveDexFile(jobject class_loader, const DexFile& dex_file, + TimingLogger& timings) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - DexCache* dex_cache = class_linker->FindDexCache(dex_file); - // Strings are easy in that they always are simply resolved to literals in the same file - if (image_ && image_classes_ == NULL) { - // TODO: Add support for loading strings referenced by image_classes_ - // See also Compiler::CanAssumeTypeIsPresentInDexCache. - for (size_t string_idx = 0; string_idx < dex_cache->NumStrings(); string_idx++) { - class_linker->ResolveString(dex_file, string_idx, dex_cache); - } - timings.AddSplit("Resolve " + dex_file.GetLocation() + " Strings"); - } + // TODO: we could resolve strings here, although the string table is largely filled with class + // and method names. - CompilationContext context(class_linker, class_loader, this, dex_cache, &dex_file); - ForAll(&context, 0, dex_cache->NumResolvedTypes(), ResolveType, thread_count_); + CompilationContext context(class_linker, class_loader, this, &dex_file); + ForAll(&context, 0, dex_file.NumTypeIds(), ResolveType, thread_count_); timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types"); ForAll(&context, 0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_); timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields"); } -void Compiler::Verify(ClassLoader* class_loader, - const std::vector& dex_files) { +void Compiler::Verify(jobject class_loader, const std::vector& dex_files) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -1142,10 +1165,14 @@ void Compiler::Verify(ClassLoader* class_loader, } } -static void VerifyClass(CompilationContext* context, size_t class_def_index) { +static void VerifyClass(const CompilationContext* context, size_t class_def_index) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + ScopedObjectAccess soa(Thread::Current()); const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def); - Class* klass = context->GetClassLinker()->FindClass(descriptor, context->GetClassLoader()); + Class* klass = + context->GetClassLinker()->FindClass(descriptor, + soa.Decode(context->GetClassLoader())); if (klass == NULL) { Thread* self = Thread::Current(); CHECK(self->IsExceptionPending()); @@ -1156,9 +1183,13 @@ static void VerifyClass(CompilationContext* context, size_t class_def_index) { * This is to ensure the class is structurally sound for compilation. An unsound class * will be rejected by the verifier and later skipped during compilation in the compiler. */ + DexCache* dex_cache = context->GetClassLinker()->FindDexCache(*context->GetDexFile()); std::string error_msg; - if (verifier::MethodVerifier::VerifyClass(context->GetDexFile(), context->GetDexCache(), - context->GetClassLoader(), class_def_index, error_msg) == verifier::MethodVerifier::kHardFailure) { + if (verifier::MethodVerifier::VerifyClass(context->GetDexFile(), + dex_cache, + soa.Decode(context->GetClassLoader()), + class_def_index, error_msg) == + verifier::MethodVerifier::kHardFailure) { const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(context->GetDexFile()->GetClassDescriptor(class_def)) @@ -1173,24 +1204,32 @@ static void VerifyClass(CompilationContext* context, size_t class_def_index) { // ClassLinker::VerifyClass throws, which isn't useful in the compiler. CHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); - art::Compiler::ClassReference ref(context->GetDexFile(), class_def_index); } CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) << PrettyClass(klass); CHECK(!Thread::Current()->IsExceptionPending()) << PrettyTypeOf(Thread::Current()->GetException()); } -void Compiler::VerifyDexFile(ClassLoader* class_loader, const DexFile& dex_file) { +void Compiler::VerifyDexFile(jobject class_loader, const DexFile& dex_file) { dex_file.ChangePermissions(PROT_READ | PROT_WRITE); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - CompilationContext context(class_linker, class_loader, this, class_linker->FindDexCache(dex_file), &dex_file); + jobject dex_cache; + { + ScopedObjectAccess soa(Thread::Current()); + ScopedLocalRef + dex_cache_local(soa.Env(), + soa.AddLocalReference(class_linker->FindDexCache(dex_file))); + dex_cache = soa.Env()->NewGlobalRef(dex_cache_local.get()); + } + CompilationContext context(class_linker, class_loader, this, &dex_file); ForAll(&context, 0, dex_file.NumClassDefs(), VerifyClass, thread_count_); + Thread::Current()->GetJniEnv()->DeleteGlobalRef(dex_cache); dex_file.ChangePermissions(PROT_READ); } -void Compiler::InitializeClassesWithoutClinit(ClassLoader* class_loader, +void Compiler::InitializeClassesWithoutClinit(jobject class_loader, const std::vector& dex_files) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; @@ -1199,7 +1238,9 @@ void Compiler::InitializeClassesWithoutClinit(ClassLoader* class_loader, } } -void Compiler::InitializeClassesWithoutClinit(ClassLoader* class_loader, const DexFile& dex_file) { +void Compiler::InitializeClassesWithoutClinit(jobject jni_class_loader, const DexFile& dex_file) { + ScopedObjectAccess soa(Thread::Current()); + ClassLoader* class_loader = soa.Decode(jni_class_loader); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs(); class_def_index++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); @@ -1216,9 +1257,9 @@ void Compiler::InitializeClassesWithoutClinit(ClassLoader* class_loader, const D // record the final class status if necessary Class::Status status = klass->GetStatus(); ClassReference ref(&dex_file, class_def_index); - MutexLock mu(compiled_classes_lock_); CompiledClass* compiled_class = GetCompiledClass(ref); if (compiled_class == NULL) { + MutexLock mu(compiled_classes_lock_); compiled_class = new CompiledClass(status); compiled_classes_.Put(ref, compiled_class); } else { @@ -1274,10 +1315,11 @@ class DexFilesWorkerThread { if (worker->spawn_) { runtime->AttachCurrentThread("Compiler Worker", true, NULL); } - Thread::Current()->SetState(kRunnable); - worker->Run(); + { + ScopedObjectAccess soa(Thread::Current()); + worker->Run(); + } if (worker->spawn_) { - Thread::Current()->SetState(kNative); runtime->DetachCurrentThread(); } return NULL; @@ -1296,12 +1338,12 @@ class DexFilesWorkerThread { // Destroy the old context delete context_; - // TODO: Add a callback to let the client specify the class_linker and - // dex_cache in the context for the current working dex file. + // TODO: Add a callback to let the client specify the class_linker in the context for the + // current working dex file. context_ = new CompilationContext(/* class_linker */NULL, worker_context_->GetClassLoader(), worker_context_->GetCompiler(), - /* dex_cache */NULL, dex_file); + dex_file); CHECK(context_ != NULL); } @@ -1314,8 +1356,7 @@ class DexFilesWorkerThread { SwitchToDexFile(0); while (true) { - size_t class_index = - static_cast(android_atomic_inc(shared_class_index_)); + size_t class_index = static_cast(android_atomic_inc(shared_class_index_)); const DexFile* dex_file; do { @@ -1339,7 +1380,7 @@ class DexFilesWorkerThread { class_index -= class_index_base; class_callback_(context_, class_index); - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()); + self->AssertNoPendingException(); } } @@ -1362,7 +1403,7 @@ void ForClassesInAllDexFiles(CompilationContext* worker_context, const std::vector& dex_files, Callback class_callback, size_t thread_count) { Thread* self = Thread::Current(); - CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException()); + self->AssertNoPendingException(); CHECK_GT(thread_count, 0U); std::vector threads; @@ -1375,13 +1416,16 @@ void ForClassesInAllDexFiles(CompilationContext* worker_context, } threads[0]->Go(); - // Switch to kVmWait while we're blocked waiting for the other threads to finish. - ScopedThreadStateChange tsc(self, kVmWait); + // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker + // thread destructor's called below perform join). + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_NE(self->GetState(), kRunnable); + } STLDeleteElements(&threads); } -void Compiler::Compile(ClassLoader* class_loader, - const std::vector& dex_files) { +void Compiler::Compile(jobject class_loader, const std::vector& dex_files) { #if defined(ART_USE_LLVM_COMPILER) if (dex_files.size() <= 0) { return; // No dex file @@ -1397,12 +1441,16 @@ void Compiler::Compile(ClassLoader* class_loader, #endif } -void Compiler::CompileClass(CompilationContext* context, size_t class_def_index) { - ClassLoader* class_loader = context->GetClassLoader(); +void Compiler::CompileClass(const CompilationContext* context, size_t class_def_index) { + jobject class_loader = context->GetClassLoader(); const DexFile& dex_file = *context->GetDexFile(); const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - if (SkipClass(class_loader, dex_file, class_def)) { - return; + { + ScopedObjectAccess soa(Thread::Current()); + ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); + if (SkipClass(class_loader, dex_file, class_def)) { + return; + } } ClassReference ref(&dex_file, class_def_index); // Skip compiling classes with generic verifier failures since they will still fail at runtime @@ -1455,8 +1503,8 @@ void Compiler::CompileClass(CompilationContext* context, size_t class_def_index) DCHECK(!it.HasNext()); } -void Compiler::CompileDexFile(ClassLoader* class_loader, const DexFile& dex_file) { - CompilationContext context(NULL, class_loader, this, NULL, &dex_file); +void Compiler::CompileDexFile(jobject class_loader, const DexFile& dex_file) { + CompilationContext context(NULL, class_loader, this, &dex_file); ForAll(&context, 0, dex_file.NumClassDefs(), Compiler::CompileClass, thread_count_); } @@ -1469,7 +1517,7 @@ static std::string MakeInvokeStubKey(bool is_static, const char* shorty) { } void Compiler::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, - uint32_t method_idx, ClassLoader* class_loader, + uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { CompiledMethod* compiled_method = NULL; uint64_t start_ns = NanoTime(); @@ -1492,8 +1540,10 @@ void Compiler::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access if (compiled_method != NULL) { MethodReference ref(&dex_file, method_idx); CHECK(GetCompiledMethod(ref) == NULL) << PrettyMethod(method_idx, dex_file); - MutexLock mu(compiled_methods_lock_); - compiled_methods_.Put(ref, compiled_method); + { + MutexLock mu(compiled_methods_lock_); + compiled_methods_.Put(ref, compiled_method); + } DCHECK(GetCompiledMethod(ref) != NULL) << PrettyMethod(method_idx, dex_file); } @@ -1519,7 +1569,11 @@ void Compiler::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access } #endif - CHECK(!Thread::Current()->IsExceptionPending()) << PrettyMethod(method_idx, dex_file); + if (Thread::Current()->IsExceptionPending()) { + ScopedObjectAccess soa(Thread::Current()); + LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n" + << Thread::Current()->GetException()->Dump(); + } } const CompiledInvokeStub* Compiler::FindInvokeStub(bool is_static, const char* shorty) const { @@ -1595,7 +1649,7 @@ CompiledMethod* Compiler::GetCompiledMethod(MethodReference ref) const { return it->second; } -void Compiler::SetGcMaps(ClassLoader* class_loader, const std::vector& dex_files) { +void Compiler::SetGcMaps(jobject class_loader, const std::vector& dex_files) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -1603,7 +1657,9 @@ void Compiler::SetGcMaps(ClassLoader* class_loader, const std::vector(jni_class_loader); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(dex_file); for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs(); class_def_index++) { diff --git a/src/compiler.h b/src/compiler.h index 85f3ead26f..01ef037be2 100644 --- a/src/compiler.h +++ b/src/compiler.h @@ -52,11 +52,12 @@ class Compiler { ~Compiler(); - void CompileAll(ClassLoader* class_loader, - const std::vector& dex_files); + void CompileAll(jobject class_loader, const std::vector& dex_files) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); // Compile a single Method - void CompileOne(const Method* method); + void CompileOne(const Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsDebuggingSupported() { return support_debugging_; @@ -71,75 +72,89 @@ class Compiler { } // Stub to throw AbstractMethodError - static ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set); + static ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Generate the trampoline that's invoked by unresolved direct methods static ByteArray* CreateResolutionStub(InstructionSet instruction_set, - Runtime::TrampolineType type); + Runtime::TrampolineType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set); + static ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile typedef std::pair ClassReference; - CompiledClass* GetCompiledClass(ClassReference ref) const; + CompiledClass* GetCompiledClass(ClassReference ref) const + LOCKS_EXCLUDED(compiled_classes_lock_); // A method is uniquely located by its DexFile and the method_ids_ table index into that DexFile typedef std::pair MethodReference; - CompiledMethod* GetCompiledMethod(MethodReference ref) const; + CompiledMethod* GetCompiledMethod(MethodReference ref) const + LOCKS_EXCLUDED(compiled_methods_lock_); const CompiledInvokeStub* FindInvokeStub(bool is_static, const char* shorty) const; - const CompiledInvokeStub* FindInvokeStub(const std::string& key) const; + const CompiledInvokeStub* FindInvokeStub(const std::string& key) const + LOCKS_EXCLUDED(compiled_invoke_stubs_lock_); #if defined(ART_USE_LLVM_COMPILER) const CompiledInvokeStub* FindProxyStub(const char* shorty) const; #endif - // Callbacks from OAT/ART compiler to see what runtime checks must be generated + // Callbacks from compiler to see what runtime checks must be generated. - bool CanAssumeTypeIsPresentInDexCache(const DexCache* dex_cache, uint32_t type_idx); + bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); - bool CanAssumeStringIsPresentInDexCache(const DexCache* dex_cache, uint32_t string_idx); + bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); // Are runtime access checks necessary in the compiled code? - bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexCache* dex_cache, - const DexFile& dex_file, uint32_t type_idx); + bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, + uint32_t type_idx) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? - bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexCache* dex_cache, - const DexFile& dex_file, uint32_t type_idx); + bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, + uint32_t type_idx) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); - // Can we fast path instance field access? Computes field's offset and volatility + // Can we fast path instance field access? Computes field's offset and volatility. bool ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, - int& field_offset, bool& is_volatile, bool is_put); + int& field_offset, bool& is_volatile, bool is_put) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); // Can we fastpath static field access? Computes field's offset, volatility and whether the - // field is within the referrer (which can avoid checking class initialization) + // field is within the referrer (which can avoid checking class initialization). bool ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, int& ssb_index, - bool& is_referrers_class, bool& is_volatile, bool is_put); + bool& is_referrers_class, bool& is_volatile, bool is_put) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); - // Can we fastpath a interface, super class or virtual method call? Computes method's vtable index + // Can we fastpath a interface, super class or virtual method call? Computes method's vtable + // index. bool ComputeInvokeInfo(uint32_t method_idx, OatCompilationUnit* mUnit, InvokeType& type, - int& vtable_idx, uintptr_t& direct_code, uintptr_t& direct_method); + int& vtable_idx, uintptr_t& direct_code, uintptr_t& direct_method) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); - // Record patch information for later fix up - void AddCodePatch(DexCache* dex_cache, - const DexFile* dex_file, + // Record patch information for later fix up. + void AddCodePatch(const DexFile* dex_file, uint32_t referrer_method_idx, uint32_t referrer_access_flags, uint32_t target_method_idx, bool target_is_direct, - size_t literal_offset); - void AddMethodPatch(DexCache* dex_cache, - const DexFile* dex_file, + size_t literal_offset) + LOCKS_EXCLUDED(compiled_methods_lock_); + void AddMethodPatch(const DexFile* dex_file, uint32_t referrer_method_idx, uint32_t referrer_access_flags, uint32_t target_method_idx, bool target_is_direct, - size_t literal_offset); + size_t literal_offset) + LOCKS_EXCLUDED(compiled_methods_lock_); #if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_QUICK_COMPILER) void SetBitcodeFileName(std::string const& filename); @@ -159,9 +174,6 @@ class Compiler { class PatchInformation { public: - DexCache* GetDexCache() const { - return dex_cache_; - } const DexFile& GetDexFile() const { return *dex_file_; } @@ -182,15 +194,13 @@ class Compiler { } private: - PatchInformation(DexCache* dex_cache, - const DexFile* dex_file, + PatchInformation(const DexFile* dex_file, uint32_t referrer_method_idx, uint32_t referrer_access_flags, uint32_t target_method_idx, uint32_t target_is_direct, size_t literal_offset) - : dex_cache_(dex_cache), - dex_file_(dex_file), + : dex_file_(dex_file), referrer_method_idx_(referrer_method_idx), referrer_is_direct_(Method::IsDirect(referrer_access_flags)), target_method_idx_(target_method_idx), @@ -199,7 +209,6 @@ class Compiler { CHECK(dex_file_ != NULL); } - DexCache* dex_cache_; const DexFile* dex_file_; uint32_t referrer_method_idx_; bool referrer_is_direct_; @@ -221,39 +230,55 @@ class Compiler { private: // Compute constant code and method pointers when possible void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, Method* method, - uintptr_t& direct_code, uintptr_t& direct_method); + uintptr_t& direct_code, uintptr_t& direct_method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Checks if class specified by type_idx is one of the image_classes_ bool IsImageClass(const std::string& descriptor) const; - void PreCompile(ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings); - void PostCompile(ClassLoader* class_loader, const std::vector& dex_files); + void PreCompile(jobject class_loader, const std::vector& dex_files, + TimingLogger& timings) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void PostCompile(jobject class_loader, const std::vector& dex_files); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. - void Resolve(ClassLoader* class_loader, const std::vector& dex_files, TimingLogger& timings); - void ResolveDexFile(ClassLoader* class_loader, const DexFile& dex_file, TimingLogger& timings); - - void Verify(ClassLoader* class_loader, const std::vector& dex_files); - void VerifyDexFile(ClassLoader* class_loader, const DexFile& dex_file); - - void InitializeClassesWithoutClinit(ClassLoader* class_loader, const std::vector& dex_files); - void InitializeClassesWithoutClinit(ClassLoader* class_loader, const DexFile& dex_file); - - void Compile(ClassLoader* class_loader, - const std::vector& dex_files); - void CompileDexFile(ClassLoader* class_loader, const DexFile& dex_file); + void Resolve(jobject class_loader, const std::vector& dex_files, + TimingLogger& timings) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void ResolveDexFile(jobject class_loader, const DexFile& dex_file, TimingLogger& timings) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + + void Verify(jobject class_loader, const std::vector& dex_files); + void VerifyDexFile(jobject class_loader, const DexFile& dex_file) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + + void InitializeClassesWithoutClinit(jobject class_loader, + const std::vector& dex_files) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void InitializeClassesWithoutClinit(jobject class_loader, const DexFile& dex_file) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, compiled_classes_lock_); + + void Compile(jobject class_loader, const std::vector& dex_files); + void CompileDexFile(jobject class_loader, const DexFile& dex_file) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx, - ClassLoader* class_loader, const DexFile& dex_file); + jobject class_loader, const DexFile& dex_file) + LOCKS_EXCLUDED(compiled_methods_lock_); - static void CompileClass(CompilationContext* context, size_t class_def_index); + static void CompileClass(const CompilationContext* context, size_t class_def_index) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); - void SetGcMaps(ClassLoader* class_loader, const std::vector& dex_files); - void SetGcMapsDexFile(ClassLoader* class_loader, const DexFile& dex_file); - void SetGcMapsMethod(const DexFile& dex_file, Method* method); + void SetGcMaps(jobject class_loader, const std::vector& dex_files) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void SetGcMapsDexFile(jobject class_loader, const DexFile& dex_file) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void SetGcMapsMethod(const DexFile& dex_file, Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void InsertInvokeStub(const std::string& key, const CompiledInvokeStub* compiled_invoke_stub); + void InsertInvokeStub(const std::string& key, const CompiledInvokeStub* compiled_invoke_stub) + LOCKS_EXCLUDED(compiled_invoke_stubs_lock_); #if defined(ART_USE_LLVM_COMPILER) void InsertProxyStub(const char* shorty, const CompiledInvokeStub* compiled_proxy_stub); @@ -266,17 +291,17 @@ class Compiler { typedef SafeMap ClassTable; // All class references that this compiler has compiled - mutable Mutex compiled_classes_lock_; + mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_); typedef SafeMap MethodTable; // All method references that this compiler has compiled - mutable Mutex compiled_methods_lock_; + mutable Mutex compiled_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_); typedef SafeMap InvokeStubTable; // Invocation stubs created to allow invocation of the compiled methods - mutable Mutex compiled_invoke_stubs_lock_; + mutable Mutex compiled_invoke_stubs_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; InvokeStubTable compiled_invoke_stubs_ GUARDED_BY(compiled_invoke_stubs_lock_); #if defined(ART_USE_LLVM_COMPILER) @@ -308,7 +333,7 @@ class Compiler { typedef CompiledMethod* (*CompilerFn)(Compiler& compiler, const DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx, - ClassLoader* class_loader, + jobject class_loader, const DexFile& dex_file); CompilerFn compiler_; diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h index 088768b2c9..00ab796a81 100644 --- a/src/compiler/Compiler.h +++ b/src/compiler/Compiler.h @@ -204,7 +204,7 @@ BasicBlock* oatFindBlock(CompilationUnit* cUnit, unsigned int codeOffset); extern "C" art::CompiledMethod* ArtCompileMethod(art::Compiler& compiler, const art::DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx, - const art::ClassLoader* class_loader, + jobject class_loader, const art::DexFile& dex_file); #endif // ART_SRC_COMPILER_COMPILER_H_ diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h index 1ecf61aef5..453ccdf556 100644 --- a/src/compiler/CompilerIR.h +++ b/src/compiler/CompilerIR.h @@ -326,7 +326,6 @@ struct CompilationUnit { compiler(NULL), class_linker(NULL), dex_file(NULL), - dex_cache(NULL), class_loader(NULL), method_idx(0), code_item(NULL), @@ -436,8 +435,7 @@ struct CompilationUnit { Compiler* compiler; // Compiler driving this compiler ClassLinker* class_linker; // Linker to resolve fields and methods const DexFile* dex_file; // DexFile containing the method being compiled - DexCache* dex_cache; // DexFile's corresponding cache - ClassLoader* class_loader; // compiling method's class loader + jobject class_loader; // compiling method's class loader uint32_t method_idx; // compiling method's index into method_ids of DexFile const DexFile::CodeItem* code_item; // compiling method's DexFile code_item uint32_t access_flags; // compiling method's access flags diff --git a/src/compiler/Dataflow.cc b/src/compiler/Dataflow.cc index 33ef0addad..fc1d262454 100644 --- a/src/compiler/Dataflow.cc +++ b/src/compiler/Dataflow.cc @@ -2274,7 +2274,7 @@ bool invokeUsesMethodStar(CompilationUnit* cUnit, MIR* mir) return false; } OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, - *cUnit->dex_file, *cUnit->dex_cache, + *cUnit->dex_file, cUnit->code_item, cUnit->method_idx, cUnit->access_flags); // TODO: add a flag so we don't counts the stats for this twice diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc index 7585b77d02..6cdfebc014 100644 --- a/src/compiler/Frontend.cc +++ b/src/compiler/Frontend.cc @@ -733,7 +733,7 @@ void oatInit(CompilationUnit* cUnit, const Compiler& compiler) { CompiledMethod* oatCompileMethod(Compiler& compiler, const DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx, - const ClassLoader* class_loader, + jobject class_loader, const DexFile& dex_file) { VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "..."; @@ -751,7 +751,6 @@ CompiledMethod* oatCompileMethod(Compiler& compiler, cUnit->compiler = &compiler; cUnit->class_linker = class_linker; cUnit->dex_file = &dex_file; - cUnit->dex_cache = class_linker->FindDexCache(dex_file); cUnit->method_idx = method_idx; cUnit->code_item = code_item; cUnit->access_flags = access_flags; @@ -1161,7 +1160,7 @@ extern "C" art::CompiledMethod* ArtCompileMethod(art::Compiler& compiler, const art::DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx, - const art::ClassLoader* class_loader, + jobject class_loader, const art::DexFile& dex_file) { CHECK_EQ(compiler.GetInstructionSet(), art::oatInstructionSet()); diff --git a/src/compiler/codegen/CodegenUtil.cc b/src/compiler/codegen/CodegenUtil.cc index a33a1627f4..dd7daba1d6 100644 --- a/src/compiler/codegen/CodegenUtil.cc +++ b/src/compiler/codegen/CodegenUtil.cc @@ -644,7 +644,7 @@ void installLiteralPools(CompilationUnit* cUnit) if (dataLIR != NULL) { while (dataLIR != NULL) { uint32_t target = dataLIR->operands[0]; - cUnit->compiler->AddCodePatch(cUnit->dex_cache, cUnit->dex_file, + cUnit->compiler->AddCodePatch(cUnit->dex_file, cUnit->method_idx, cUnit->access_flags, target, @@ -659,7 +659,7 @@ void installLiteralPools(CompilationUnit* cUnit) dataLIR = cUnit->methodLiteralList; while (dataLIR != NULL) { uint32_t target = dataLIR->operands[0]; - cUnit->compiler->AddMethodPatch(cUnit->dex_cache, cUnit->dex_file, + cUnit->compiler->AddMethodPatch(cUnit->dex_file, cUnit->method_idx, cUnit->access_flags, target, diff --git a/src/compiler/codegen/GenCommon.cc b/src/compiler/codegen/GenCommon.cc index baa4b48bbe..6cb701f829 100644 --- a/src/compiler/codegen/GenCommon.cc +++ b/src/compiler/codegen/GenCommon.cc @@ -502,7 +502,6 @@ void genNewArray(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest, oatFlushAllRegs(cUnit); /* Everything to home location */ int funcOffset; if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx, - cUnit->dex_cache, *cUnit->dex_file, type_idx)) { funcOffset = ENTRYPOINT_OFFSET(pAllocArrayFromCode); @@ -527,7 +526,6 @@ void genFilledNewArray(CompilationUnit* cUnit, CallInfo* info) oatFlushAllRegs(cUnit); /* Everything to home location */ int funcOffset; if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx, - cUnit->dex_cache, *cUnit->dex_file, typeIdx)) { funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); @@ -637,10 +635,8 @@ void genSput(CompilationUnit* cUnit, uint32_t fieldIdx, RegLocation rlSrc, bool isVolatile; bool isReferrersClass; - OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, - *cUnit->dex_file, *cUnit->dex_cache, - cUnit->code_item, cUnit->method_idx, - cUnit->access_flags); + OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, *cUnit->dex_file, + cUnit->code_item, cUnit->method_idx, cUnit->access_flags); bool fastPath = cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit, @@ -735,7 +731,7 @@ void genSget(CompilationUnit* cUnit, uint32_t fieldIdx, RegLocation rlDest, bool isReferrersClass; OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, - *cUnit->dex_file, *cUnit->dex_cache, + *cUnit->dex_file, cUnit->code_item, cUnit->method_idx, cUnit->access_flags); @@ -978,7 +974,7 @@ bool fastInstance(CompilationUnit* cUnit, uint32_t fieldIdx, int& fieldOffset, bool& isVolatile, bool isPut) { OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, - *cUnit->dex_file, *cUnit->dex_cache, + *cUnit->dex_file, cUnit->code_item, cUnit->method_idx, cUnit->access_flags); return cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit, @@ -1102,7 +1098,6 @@ void genConstClass(CompilationUnit* cUnit, uint32_t type_idx, int resReg = oatAllocTemp(cUnit); RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true); if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx, - cUnit->dex_cache, *cUnit->dex_file, type_idx)) { // Call out to helper which resolves type and verifies access. @@ -1121,7 +1116,7 @@ void genConstClass(CompilationUnit* cUnit, uint32_t type_idx, Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*) * type_idx); loadWordDisp(cUnit, resReg, offset_of_type, rlResult.lowReg); - if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(cUnit->dex_cache, + if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(*cUnit->dex_file, type_idx) || SLOW_TYPE_PATH) { // Slow path, at runtime test if type is null and if so initialize oatFlushAllRegs(cUnit); @@ -1164,7 +1159,7 @@ void genConstString(CompilationUnit* cUnit, uint32_t string_idx, int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() + (sizeof(String*) * string_idx); if (!cUnit->compiler->CanAssumeStringIsPresentInDexCache( - cUnit->dex_cache, string_idx) || SLOW_STRING_PATH) { + *cUnit->dex_file, string_idx) || SLOW_STRING_PATH) { // slow path, resolve string if not in dex cache oatFlushAllRegs(cUnit); oatLockCallTemps(cUnit); // Using explicit registers @@ -1222,7 +1217,7 @@ void genNewInstance(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDes // access because the verifier was unable to? int funcOffset; if (cUnit->compiler->CanAccessInstantiableTypeWithoutChecks( - cUnit->method_idx, cUnit->dex_cache, *cUnit->dex_file, type_idx)) { + cUnit->method_idx, *cUnit->dex_file, type_idx)) { funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCode); } else { funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); @@ -1248,7 +1243,6 @@ void genInstanceof(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method* int classReg = rARG2; // rARG2 will hold the Class* if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx, - cUnit->dex_cache, *cUnit->dex_file, type_idx)) { // Check we have access to type_idx and if not throw IllegalAccessError, @@ -1268,7 +1262,7 @@ void genInstanceof(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest * type_idx); loadWordDisp(cUnit, classReg, offset_of_type, classReg); if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache( - cUnit->dex_cache, type_idx)) { + *cUnit->dex_file, type_idx)) { // Need to test presence of type in dex cache at runtime LIR* hopBranch = opCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL); // Not resolved @@ -1333,7 +1327,6 @@ void genCheckCast(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlSrc) loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method* int classReg = rARG2; // rARG2 will hold the Class* if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx, - cUnit->dex_cache, *cUnit->dex_file, type_idx)) { // Check we have access to type_idx and if not throw IllegalAccessError, @@ -1352,7 +1345,7 @@ void genCheckCast(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlSrc) (sizeof(Class*) * type_idx); loadWordDisp(cUnit, classReg, offset_of_type, classReg); if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache( - cUnit->dex_cache, type_idx)) { + *cUnit->dex_file, type_idx)) { // Need to test presence of type in dex cache at runtime LIR* hopBranch = opCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL); // Not resolved diff --git a/src/compiler/codegen/MethodCodegenDriver.cc b/src/compiler/codegen/MethodCodegenDriver.cc index b93cbd94b1..e2a5f979d5 100644 --- a/src/compiler/codegen/MethodCodegenDriver.cc +++ b/src/compiler/codegen/MethodCodegenDriver.cc @@ -68,7 +68,7 @@ void genInvoke(CompilationUnit* cUnit, CallInfo* info) oatLockCallTemps(cUnit); OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, - *cUnit->dex_file, *cUnit->dex_cache, + *cUnit->dex_file, cUnit->code_item, cUnit->method_idx, cUnit->access_flags); diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc index a42696c597..2eb9c65d13 100644 --- a/src/compiler_llvm/runtime_support_llvm.cc +++ b/src/compiler_llvm/runtime_support_llvm.cc @@ -28,7 +28,7 @@ #include "reflection.h" #include "runtime_support.h" #include "runtime_support_func_list.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "thread_list.h" #include "utils_llvm.h" @@ -656,7 +656,7 @@ void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) { // Start new JNI local reference state JNIEnvExt* env = thread->GetJniEnv(); - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); ScopedJniEnvLocalRefState env_state(env); // Create local ref. copies of the receiver diff --git a/src/compiler_test.cc b/src/compiler_test.cc index 088726fff5..0c53bb9981 100644 --- a/src/compiler_test.cc +++ b/src/compiler_test.cc @@ -31,14 +31,16 @@ namespace art { class CompilerTest : public CommonTest { protected: - void CompileAll(ClassLoader* class_loader) { + void CompileAll(jobject class_loader) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { compiler_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader)); MakeAllExecutable(class_loader); } - void EnsureCompiled(ClassLoader* class_loader, const char* class_name, const char* method, - const char* signature, bool is_virtual) { + void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, + const char* signature, bool is_virtual) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { CompileAll(class_loader); + Thread::Current()->TransitionFromSuspendedToRunnable(); runtime_->Start(); env_ = Thread::Current()->GetJniEnv(); class_ = env_->FindClass(class_name); @@ -51,7 +53,7 @@ class CompilerTest : public CommonTest { CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature; } - void MakeAllExecutable(ClassLoader* class_loader) { + void MakeAllExecutable(jobject class_loader) { const std::vector& class_path = Runtime::Current()->GetCompileTimeClassPath(class_loader); for (size_t i = 0; i != class_path.size(); ++i) { @@ -61,12 +63,13 @@ class CompilerTest : public CommonTest { } } - void MakeDexFileExecutable(ClassLoader* class_loader, const DexFile& dex_file) { + void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (size_t i = 0; i < dex_file.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(i); const char* descriptor = dex_file.GetClassDescriptor(class_def); - Class* c = class_linker->FindClass(descriptor, class_loader); + ScopedObjectAccess soa(Thread::Current()); + Class* c = class_linker->FindClass(descriptor, soa.Decode(class_loader)); CHECK(c != NULL); for (size_t i = 0; i < c->NumDirectMethods(); i++) { MakeExecutable(c->GetDirectMethod(i)); @@ -87,6 +90,7 @@ TEST_F(CompilerTest, DISABLED_LARGE_CompileDexLibCore) { CompileAll(NULL); // All libcore references should resolve + ScopedObjectAccess soa(Thread::Current()); const DexFile* dex = java_lang_dex_file_; DexCache* dex_cache = class_linker_->FindDexCache(*dex); EXPECT_EQ(dex->NumStringIds(), dex_cache->NumStrings()); @@ -125,12 +129,15 @@ TEST_F(CompilerTest, DISABLED_LARGE_CompileDexLibCore) { } TEST_F(CompilerTest, AbstractMethodErrorStub) { - CompileVirtualMethod(NULL, "java.lang.Class", "isFinalizable", "()Z"); - CompileDirectMethod(NULL, "java.lang.Object", "", "()V"); - - SirtRef class_loader(LoadDex("AbstractMethod")); - ASSERT_TRUE(class_loader.get() != NULL); - EnsureCompiled(class_loader.get(), "AbstractClass", "foo", "()V", true); + jobject class_loader; + { + ScopedObjectAccess soa(Thread::Current()); + CompileVirtualMethod(NULL, "java.lang.Class", "isFinalizable", "()Z"); + CompileDirectMethod(NULL, "java.lang.Object", "", "()V"); + class_loader = LoadDex("AbstractMethod"); + } + ASSERT_TRUE(class_loader != NULL); + EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true); // Create a jobj_ of ConcreteClass, NOT AbstractClass. jclass c_class = env_->FindClass("ConcreteClass"); @@ -138,11 +145,13 @@ TEST_F(CompilerTest, AbstractMethodErrorStub) { jobject jobj_ = env_->NewObject(c_class, constructor); ASSERT_TRUE(jobj_ != NULL); - Class* jlame = class_linker_->FindClass("Ljava/lang/AbstractMethodError;", class_loader.get()); // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception. env_->CallNonvirtualVoidMethod(jobj_, class_, mid_); - EXPECT_TRUE(Thread::Current()->IsExceptionPending()); - EXPECT_TRUE(Thread::Current()->GetException()->InstanceOf(jlame)); + EXPECT_EQ(env_->ExceptionCheck(), JNI_TRUE); + jthrowable exception = env_->ExceptionOccurred(); + env_->ExceptionClear(); + jclass jlame = env_->FindClass("java/lang/AbstractMethodError"); + EXPECT_TRUE(env_->IsInstanceOf(exception, jlame)); Thread::Current()->ClearException(); } diff --git a/src/debugger.cc b/src/debugger.cc index cd52f8260a..edb6e7f1e7 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -28,10 +28,9 @@ #endif #include "object_utils.h" #include "safe_map.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock.h" #include "ScopedLocalRef.h" #include "ScopedPrimitiveArray.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "stack_indirect_reference_table.h" #include "thread_list.h" @@ -91,7 +90,7 @@ class ObjectRegistry { } private: - Mutex lock_; + Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; SafeMap map_; }; @@ -99,7 +98,7 @@ struct AllocRecordStackTraceElement { Method* method; uint32_t dex_pc; - int32_t LineNumber() const { + int32_t LineNumber() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return MethodHelper(method).GetLineNumFromDexPC(dex_pc); } }; @@ -125,7 +124,8 @@ struct Breakpoint { Breakpoint(Method* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {} }; -static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) { +static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc); return os; } @@ -171,17 +171,18 @@ static Dbg::HpsgWhat gDdmNhsgWhat; static ObjectRegistry* gRegistry = NULL; // Recent allocation tracking. -static Mutex gAllocTrackerLock("AllocTracker lock"); +static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER ("AllocTracker lock"); AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0; static size_t gAllocRecordCount GUARDED_BY(gAllocTrackerLock) = 0; // Breakpoints and single-stepping. -static Mutex gBreakpointsLock("breakpoints lock"); +static Mutex gBreakpointsLock DEFAULT_MUTEX_ACQUIRED_AFTER ("breakpoints lock"); static std::vector gBreakpoints GUARDED_BY(gBreakpointsLock); static SingleStepControl gSingleStepControl GUARDED_BY(gBreakpointsLock); -static bool IsBreakpoint(Method* m, uint32_t dex_pc) { +static bool IsBreakpoint(Method* m, uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { MutexLock mu(gBreakpointsLock); for (size_t i = 0; i < gBreakpoints.size(); ++i) { if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) { @@ -192,7 +193,8 @@ static bool IsBreakpoint(Method* m, uint32_t dex_pc) { return false; } -static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) { +static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; @@ -206,7 +208,8 @@ static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) { return o->AsArray(); } -static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) { +static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; @@ -220,13 +223,15 @@ static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) { return o->AsClass(); } -static Thread* DecodeThread(JDWP::ObjectId threadId) { - ScopedJniThreadState ts(Thread::Current()); +static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId threadId) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Object* thread_peer = gRegistry->Get(threadId); if (thread_peer == NULL || thread_peer == kInvalidObject) { return NULL; } - return Thread::FromManagedThread(ts, thread_peer); + Thread* thread = Thread::FromManagedThread(soa, thread_peer); + return thread; } static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { @@ -235,7 +240,8 @@ static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { return static_cast(descriptor[0]); } -static JDWP::JdwpTag TagFromClass(Class* c) { +static JDWP::JdwpTag TagFromClass(Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(c != NULL); if (c->IsArrayClass()) { return JDWP::JT_ARRAY; @@ -265,7 +271,8 @@ static JDWP::JdwpTag TagFromClass(Class* c) { * * Null objects are tagged JT_OBJECT. */ -static JDWP::JdwpTag TagFromObject(const Object* o) { +static JDWP::JdwpTag TagFromObject(const Object* o) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass()); } @@ -417,7 +424,7 @@ void Dbg::StartJdwp() { // If a debugger has already attached, send the "welcome" message. // This may cause us to suspend all threads. if (gJdwpState->IsActive()) { - //ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + ScopedObjectAccess soa(Thread::Current()); if (!gJdwpState->PostVMStart()) { LOG(WARNING) << "Failed to post 'start' message to debugger"; } @@ -432,14 +439,17 @@ void Dbg::StopJdwp() { void Dbg::GcDidFinish() { if (gDdmHpifWhen != HPIF_WHEN_NEVER) { + ScopedObjectAccess soa(Thread::Current()); LOG(DEBUG) << "Sending heap info to DDM"; DdmSendHeapInfo(gDdmHpifWhen); } if (gDdmHpsgWhen != HPSG_WHEN_NEVER) { + ScopedObjectAccess soa(Thread::Current()); LOG(DEBUG) << "Dumping heap to DDM"; DdmSendHeapSegments(false); } if (gDdmNhsgWhen != HPSG_WHEN_NEVER) { + ScopedObjectAccess soa(Thread::Current()); LOG(DEBUG) << "Dumping native heap to DDM"; DdmSendHeapSegments(true); } @@ -481,6 +491,7 @@ static void SetDebuggerUpdatesEnabledCallback(Thread* t, void* user_data) { } static void SetDebuggerUpdatesEnabled(bool enabled) { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(SetDebuggerUpdatesEnabledCallback, &enabled); } @@ -528,18 +539,6 @@ int64_t Dbg::LastDebuggerActivity() { return gJdwpState->LastDebuggerActivity(); } -int Dbg::ThreadRunning() { - return static_cast(Thread::Current()->SetState(kRunnable)); -} - -int Dbg::ThreadWaiting() { - return static_cast(Thread::Current()->SetState(kVmWait)); -} - -int Dbg::ThreadContinuing(int new_state) { - return static_cast(Thread::Current()->SetState(static_cast(new_state))); -} - void Dbg::UndoDebuggerSuspensions() { Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); } @@ -829,7 +828,9 @@ JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId arrayId, int offset, int count, return JDWP::ERR_NONE; } -JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId arrayId, int offset, int count, const uint8_t* src) { +JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId arrayId, int offset, int count, + const uint8_t* src) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { JDWP::JdwpError status; Array* a = DecodeArray(arrayId, status); if (a == NULL) { @@ -898,7 +899,8 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId classId, JDWP::ObjectId& new_o /* * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]". */ -JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId arrayClassId, uint32_t length, JDWP::ObjectId& new_array) { +JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId arrayClassId, uint32_t length, + JDWP::ObjectId& new_array) { JDWP::JdwpError status; Class* c = DecodeClass(arrayClassId, status); if (c == NULL) { @@ -917,7 +919,8 @@ bool Dbg::MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) { return c1->IsAssignableFrom(c2); } -static JDWP::FieldId ToFieldId(const Field* f) { +static JDWP::FieldId ToFieldId(const Field* f) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -925,7 +928,8 @@ static JDWP::FieldId ToFieldId(const Field* f) { #endif } -static JDWP::MethodId ToMethodId(const Method* m) { +static JDWP::MethodId ToMethodId(const Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -933,7 +937,8 @@ static JDWP::MethodId ToMethodId(const Method* m) { #endif } -static Field* FromFieldId(JDWP::FieldId fid) { +static Field* FromFieldId(JDWP::FieldId fid) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -941,7 +946,8 @@ static Field* FromFieldId(JDWP::FieldId fid) { #endif } -static Method* FromMethodId(JDWP::MethodId mid) { +static Method* FromMethodId(JDWP::MethodId mid) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -949,7 +955,8 @@ static Method* FromMethodId(JDWP::MethodId mid) { #endif } -static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc) { +static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (m == NULL) { memset(&location, 0, sizeof(location)); } else { @@ -961,7 +968,8 @@ static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc } } -std::string Dbg::GetMethodName(JDWP::RefTypeId, JDWP::MethodId methodId) { +std::string Dbg::GetMethodName(JDWP::RefTypeId, JDWP::MethodId methodId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* m = FromMethodId(methodId); return MethodHelper(m).GetName(); } @@ -1004,7 +1012,8 @@ static uint16_t MangleSlot(uint16_t slot, const char* name) { return newSlot; } -static uint16_t DemangleSlot(uint16_t slot, Method* m) { +static uint16_t DemangleSlot(uint16_t slot, Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (slot == kEclipseWorkaroundSlot) { return 0; } else if (slot == 0) { @@ -1042,7 +1051,8 @@ JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId classId, bool with_gen return JDWP::ERR_NONE; } -JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId classId, bool with_generic, JDWP::ExpandBuf* pReply) { +JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId classId, bool with_generic, + JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; Class* c = DecodeClass(classId, status); if (c == NULL) { @@ -1085,7 +1095,8 @@ JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId classId, JDWP::Exp return JDWP::ERR_NONE; } -void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) { +void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { struct DebugCallbackContext { int numItems; JDWP::ExpandBuf* pReply; @@ -1098,7 +1109,6 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId methodId, JDWP::Expand return true; } }; - Method* m = FromMethodId(methodId); MethodHelper mh(m); uint64_t start, end; @@ -1153,7 +1163,6 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId methodId, bool wit ++pContext->variable_count; } }; - Method* m = FromMethodId(methodId); MethodHelper mh(m); const DexFile::CodeItem* code_item = mh.GetCodeItem(); @@ -1186,7 +1195,10 @@ JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId fieldId) { return BasicTagFromDescriptor(FieldHelper(FromFieldId(fieldId)).GetTypeDescriptor()); } -static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId refTypeId, JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply, bool is_static) { +static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId refTypeId, JDWP::ObjectId objectId, + JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply, + bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { JDWP::JdwpError status; Class* c = DecodeClass(refTypeId, status); if (refTypeId != 0 && c == NULL) { @@ -1245,7 +1257,8 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId refTypeId, JDWP::Object return JDWP::ERR_NONE; } -JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) { +JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, + JDWP::ExpandBuf* pReply) { return GetFieldValueImpl(0, objectId, fieldId, pReply, false); } @@ -1253,7 +1266,9 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldI return GetFieldValueImpl(refTypeId, 0, fieldId, pReply, true); } -static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width, bool is_static) { +static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId objectId, JDWP::FieldId fieldId, + uint64_t value, int width, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Object* o = gRegistry->Get(objectId); if ((!is_static && o == NULL) || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; @@ -1300,7 +1315,8 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId objectId, JDWP::FieldId return JDWP::ERR_NONE; } -JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width) { +JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, + int width) { return SetFieldValueImpl(objectId, fieldId, value, width, false); } @@ -1314,8 +1330,9 @@ std::string Dbg::StringToUtf8(JDWP::ObjectId strId) { } bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) { - ScopedThreadListLock thread_list_lock; - Thread* thread = DecodeThread(threadId); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + ScopedObjectAccessUnchecked soa(Thread::Current()); + Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return false; } @@ -1324,13 +1341,15 @@ bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) { } JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply) { + ScopedObjectAccess soa(Thread::Current()); Object* thread = gRegistry->Get(threadId); if (thread == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } // Okay, so it's an object, but is it actually a thread? - if (DecodeThread(threadId) == NULL) { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + if (DecodeThread(soa, threadId) == NULL) { return JDWP::ERR_INVALID_THREAD; } @@ -1347,6 +1366,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pR } std::string Dbg::GetThreadGroupName(JDWP::ObjectId threadGroupId) { + ScopedObjectAccess soa(Thread::Current()); Object* thread_group = gRegistry->Get(threadGroupId); CHECK(thread_group != NULL); @@ -1371,27 +1391,30 @@ JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId threadGroupId) { } JDWP::ObjectId Dbg::GetSystemThreadGroupId() { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccessUnchecked soa(Thread::Current()); Object* group = - ts.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup)->GetObject(NULL); + soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup)->GetObject(NULL); return gRegistry->Add(group); } JDWP::ObjectId Dbg::GetMainThreadGroupId() { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccess soa(Thread::Current()); Object* group = - ts.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup)->GetObject(NULL); + soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup)->GetObject(NULL); return gRegistry->Add(group); } bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) { - ScopedThreadListLock thread_list_lock; + ScopedObjectAccess soa(Thread::Current()); - Thread* thread = DecodeThread(threadId); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return false; } + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + // TODO: if we're in Thread.sleep(long), we should return TS_SLEEPING, // even if it's implemented using Object.wait(long). switch (thread->GetState()) { @@ -1402,7 +1425,16 @@ bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThre case kWaiting: *pThreadStatus = JDWP::TS_WAIT; break; case kStarting: *pThreadStatus = JDWP::TS_ZOMBIE; break; case kNative: *pThreadStatus = JDWP::TS_RUNNING; break; - case kVmWait: *pThreadStatus = JDWP::TS_WAIT; break; + case kWaitingForGcToComplete: // Fall-through. + case kWaitingPerformingGc: // Fall-through. + case kWaitingForDebuggerSend: // Fall-through. + case kWaitingForDebuggerToAttach: // Fall-through. + case kWaitingInMainDebuggerLoop: // Fall-through. + case kWaitingForDebuggerSuspension: // Fall-through. + case kWaitingForJniOnLoad: // Fall-through. + case kWaitingForSignalCatcherOutput: // Fall-through. + case kWaitingInMainSignalCatcherLoop: + *pThreadStatus = JDWP::TS_WAIT; break; case kSuspended: *pThreadStatus = JDWP::TS_RUNNING; break; // Don't add a 'default' here so the compiler can spot incompatible enum changes. } @@ -1412,34 +1444,49 @@ bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThre return true; } -JDWP::JdwpError Dbg::GetThreadSuspendCount(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply) { - Thread* thread = DecodeThread(threadId); +JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply) { + ScopedObjectAccess soa(Thread::Current()); + + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return JDWP::ERR_INVALID_THREAD; } - expandBufAdd4BE(pReply, thread->GetSuspendCount()); + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + expandBufAdd4BE(pReply, thread->GetDebugSuspendCount()); return JDWP::ERR_NONE; } bool Dbg::ThreadExists(JDWP::ObjectId threadId) { - return DecodeThread(threadId) != NULL; + ScopedObjectAccess soa(Thread::Current()); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + return DecodeThread(soa, threadId) != NULL; } bool Dbg::IsSuspended(JDWP::ObjectId threadId) { - return DecodeThread(threadId)->IsSuspended(); + ScopedObjectAccess soa(Thread::Current()); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = DecodeThread(soa, threadId); + CHECK(thread != NULL); + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + return thread->IsSuspended(); } void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector& thread_ids) { class ThreadListVisitor { public: - ThreadListVisitor(const ScopedJniThreadState& ts, Object* thread_group, std::vector& thread_ids) + ThreadListVisitor(const ScopedObjectAccessUnchecked& ts, Object* thread_group, + std::vector& thread_ids) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : ts_(ts), thread_group_(thread_group), thread_ids_(thread_ids) {} static void Visit(Thread* t, void* arg) { reinterpret_cast(arg)->Visit(t); } - void Visit(Thread* t) { + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + void Visit(Thread* t) NO_THREAD_SAFETY_ANALYSIS { if (t == Dbg::GetDebugThread()) { // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and // query all threads, so it's easier if we just don't tell them about this thread. @@ -1451,19 +1498,20 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector } private: - const ScopedJniThreadState& ts_; + const ScopedObjectAccessUnchecked& ts_; Object* const thread_group_; std::vector& thread_ids_; }; - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccessUnchecked soa(Thread::Current()); Object* thread_group = gRegistry->Get(thread_group_id); - ThreadListVisitor tlv(ts, thread_group, thread_ids); + ThreadListVisitor tlv(soa, thread_group, thread_ids); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); } void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector& child_thread_group_ids) { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccess soa(Thread::Current()); Object* thread_group = gRegistry->Get(thread_group_id); // Get the ArrayList "groups" out of this thread group... @@ -1482,7 +1530,8 @@ void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector* trace_stack) @@ -1497,28 +1546,34 @@ static int GetStackDepth(Thread* thread) { size_t depth; }; + if (kIsDebugBuild) { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK(thread->IsSuspended()); + } CountStackDepthVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack()); visitor.WalkStack(); return visitor.depth; } int Dbg::GetThreadFrameCount(JDWP::ObjectId threadId) { - ScopedThreadListLock thread_list_lock; - return GetStackDepth(DecodeThread(threadId)); + ScopedObjectAccess soa(Thread::Current()); + return GetStackDepth(DecodeThread(soa, threadId)); } JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) { - ScopedThreadListLock thread_list_lock; class GetFrameVisitor : public StackVisitor { public: GetFrameVisitor(const ManagedStack* stack, const std::vector* trace_stack, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), depth_(0), start_frame_(start_frame), frame_count_(frame_count), buf_(buf) { expandBufAdd4BE(buf_, frame_count_); } - bool VisitFrame() { + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { if (GetMethod()->IsRuntimeMethod()) { return true; // The debugger can't do anything useful with a frame that has no Method*. } @@ -1543,7 +1598,9 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram const size_t frame_count_; JDWP::ExpandBuf* buf_; }; - Thread* thread = DecodeThread(thread_id); + + ScopedObjectAccessUnchecked soa(Thread::Current()); + Thread* thread = DecodeThread(soa, thread_id); // Caller already checked thread is suspended. GetFrameVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), start_frame, frame_count, buf); visitor.WalkStack(); return JDWP::ERR_NONE; @@ -1554,36 +1611,51 @@ JDWP::ObjectId Dbg::GetThreadSelfId() { } void Dbg::SuspendVM() { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); // TODO: do we really want to change back? should the JDWP thread be Runnable usually? - Runtime::Current()->GetThreadList()->SuspendAll(true); + Runtime::Current()->GetThreadList()->SuspendAllForDebugger(); } void Dbg::ResumeVM() { Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); } -void Dbg::SuspendThread(JDWP::ObjectId threadId) { - ScopedJniThreadState ts(Thread::Current()); - Object* peer = gRegistry->Get(threadId); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, peer); - if (thread == NULL) { - LOG(WARNING) << "No such thread for suspend: " << peer; - return; +JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId threadId, bool request_suspension) { + + bool timeout; + ScopedLocalRef peer(Thread::Current()->GetJniEnv(), NULL); + { + ScopedObjectAccess soa(Thread::Current()); + peer.reset(soa.AddLocalReference(gRegistry->Get(threadId))); + } + if (peer.get() == NULL) { + LOG(WARNING) << "No such thread for suspend: " << threadId; + return JDWP::ERR_THREAD_NOT_ALIVE; + } + // Suspend thread to build stack trace. + Thread* thread = Thread::SuspendForDebugger(peer.get(), request_suspension, &timeout); + if (thread != NULL) { + return JDWP::ERR_NONE; + } else if (timeout) { + return JDWP::ERR_INTERNAL; + } else { + return JDWP::ERR_THREAD_NOT_ALIVE; } - Runtime::Current()->GetThreadList()->Suspend(thread, true); } void Dbg::ResumeThread(JDWP::ObjectId threadId) { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccessUnchecked soa(Thread::Current()); Object* peer = gRegistry->Get(threadId); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, peer); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for resume: " << peer; return; } - if (thread->GetSuspendCount() > 0) { + bool needs_resume; + { + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + needs_resume = thread->GetSuspendCount() > 0; + } + if (needs_resume) { Runtime::Current()->GetThreadList()->Resume(thread, true); } } @@ -1595,9 +1667,12 @@ void Dbg::SuspendSelf() { struct GetThisVisitor : public StackVisitor { GetThisVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, JDWP::FrameId frameId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, context), this_object(NULL), frame_id(frameId) {} - virtual bool VisitFrame() { + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { if (frame_id != GetFrameId()) { return true; // continue } @@ -1615,7 +1690,8 @@ struct GetThisVisitor : public StackVisitor { JDWP::FrameId frame_id; }; -static Object* GetThis(Thread* self, Method* m, size_t frame_id) { +static Object* GetThis(Thread* self, Method* m, size_t frame_id) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // TODO: should we return the 'this' we passed through to non-static native methods? if (m->IsNative() || m->IsStatic()) { return NULL; @@ -1627,12 +1703,21 @@ static Object* GetThis(Thread* self, Method* m, size_t frame_id) { return visitor.this_object; } -JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result) { - Thread* thread = DecodeThread(thread_id); - if (thread == NULL) { - return JDWP::ERR_INVALID_THREAD; +JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, + JDWP::ObjectId* result) { + ScopedObjectAccessUnchecked soa(Thread::Current()); + Thread* thread; + { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + thread = DecodeThread(soa, thread_id); + if (thread == NULL) { + return JDWP::ERR_INVALID_THREAD; + } + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + if (!thread->IsSuspended()) { + return JDWP::ERR_THREAD_NOT_SUSPENDED; + } } - UniquePtr context(Context::Create()); GetThisVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), context.get(), frame_id); visitor.WalkStack(); @@ -1640,15 +1725,19 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame return JDWP::ERR_NONE; } -void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) { +void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, + uint8_t* buf, size_t width) { struct GetLocalVisitor : public StackVisitor { GetLocalVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, context), frame_id_(frameId), slot_(slot), tag_(tag), buf_(buf), width_(width) {} - bool VisitFrame() { + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { if (GetFrameId() != frame_id_) { return true; // Not our frame, carry on. } @@ -1746,7 +1835,9 @@ void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot uint8_t* const buf_; const size_t width_; }; - Thread* thread = DecodeThread(threadId); + + ScopedObjectAccessUnchecked soa(Thread::Current()); + Thread* thread = DecodeThread(soa, threadId); UniquePtr context(Context::Create()); GetLocalVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), context.get(), frameId, slot, tag, buf, width); @@ -1759,10 +1850,13 @@ void Dbg::SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot SetLocalVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, context), frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width) {} - bool VisitFrame() { + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { if (GetFrameId() != frame_id_) { return true; // Not our frame, carry on. } @@ -1817,7 +1911,9 @@ void Dbg::SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot const uint64_t value_; const size_t width_; }; - Thread* thread = DecodeThread(threadId); + + ScopedObjectAccessUnchecked soa(Thread::Current()); + Thread* thread = DecodeThread(soa, threadId); UniquePtr context(Context::Create()); SetLocalVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), context.get(), frameId, slot, tag, value, width); @@ -2018,14 +2114,15 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) { } } -JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize step_size, JDWP::JdwpStepDepth step_depth) { - Thread* thread = DecodeThread(threadId); +JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize step_size, + JDWP::JdwpStepDepth step_depth) { + ScopedObjectAccessUnchecked soa(Thread::Current()); + Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return JDWP::ERR_INVALID_THREAD; } MutexLock mu(gBreakpointsLock); - // TODO: there's no theoretical reason why we couldn't support single-stepping // of multiple threads at once, but we never did so historically. if (gSingleStepControl.thread != NULL && thread != gSingleStepControl.thread) { @@ -2041,14 +2138,18 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize s struct SingleStepStackVisitor : public StackVisitor { SingleStepStackVisitor(const ManagedStack* stack, const std::vector* trace_stack) + EXCLUSIVE_LOCKS_REQUIRED(gBreakpointsLock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { - MutexLock mu(gBreakpointsLock); // Keep GCC happy. + gBreakpointsLock.AssertHeld(); gSingleStepControl.method = NULL; gSingleStepControl.stack_depth = 0; } - bool VisitFrame() { - MutexLock mu(gBreakpointsLock); // Keep GCC happy. + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { + gBreakpointsLock.AssertHeld(); const Method* m = GetMethod(); if (!m->IsRuntimeMethod()) { ++gSingleStepControl.stack_depth; @@ -2185,14 +2286,21 @@ static char JdwpTagToShortyChar(JDWP::JdwpTag tag) { } } -JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, JDWP::RefTypeId classId, JDWP::MethodId methodId, uint32_t arg_count, uint64_t* arg_values, JDWP::JdwpTag* arg_types, uint32_t options, JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, JDWP::ObjectId* pExceptionId) { +JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, + JDWP::RefTypeId classId, JDWP::MethodId methodId, + uint32_t arg_count, uint64_t* arg_values, + JDWP::JdwpTag* arg_types, uint32_t options, + JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, + JDWP::ObjectId* pExceptionId) { ThreadList* thread_list = Runtime::Current()->GetThreadList(); Thread* targetThread = NULL; DebugInvokeReq* req = NULL; + Thread* self = Thread::Current(); { - ScopedThreadListLock thread_list_lock; - targetThread = DecodeThread(threadId); + ScopedObjectAccessUnchecked soa(self); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + targetThread = DecodeThread(soa, threadId); if (targetThread == NULL) { LOG(ERROR) << "InvokeMethod request for non-existent thread " << threadId; return JDWP::ERR_INVALID_THREAD; @@ -2217,7 +2325,11 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object * by rejecting the method invocation request. Without this, we will * be stuck waiting on a suspended thread. */ - int suspend_count = targetThread->GetSuspendCount(); + int suspend_count; + { + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + suspend_count = targetThread->GetSuspendCount(); + } if (suspend_count > 1) { LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count; return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here. @@ -2287,7 +2399,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object * run out of memory. It's also a good idea to change it before locking * the invokeReq mutex, although that should never be held for long. */ - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); + self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); VLOG(jdwp) << " Transferring control to event thread"; { @@ -2295,7 +2407,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) { VLOG(jdwp) << " Resuming all threads"; - thread_list->ResumeAll(true); + thread_list->UndoDebuggerSuspensions(); } else { VLOG(jdwp) << " Resuming event thread only"; thread_list->Resume(targetThread, true); @@ -2309,8 +2421,8 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object VLOG(jdwp) << " Control has returned from event thread"; /* wait for thread to re-suspend itself */ - targetThread->WaitUntilSuspended(); - //dvmWaitForSuspend(targetThread); + SuspendThread(threadId, false /* request_suspension */ ); + self->TransitionFromSuspendedToRunnable(); } /* @@ -2321,8 +2433,10 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object * so we want to resume the target thread once to keep the books straight. */ if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) { + self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); VLOG(jdwp) << " Suspending all threads"; - thread_list->SuspendAll(true); + thread_list->SuspendAllForDebugger(); + self->TransitionFromSuspendedToRunnable(); VLOG(jdwp) << " Resuming event thread to balance the count"; thread_list->Resume(targetThread, true); } @@ -2339,12 +2453,12 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object } void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccess soa(Thread::Current()); // We can be called while an exception is pending. We need // to preserve that across the method invocation. - SirtRef old_exception(ts.Self()->GetException()); - ts.Self()->ClearException(); + SirtRef old_exception(soa.Self()->GetException()); + soa.Self()->ClearException(); // Translate the method through the vtable, unless the debugger wants to suppress it. Method* m = pReq->method_; @@ -2360,15 +2474,17 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { CHECK_EQ(sizeof(jvalue), sizeof(uint64_t)); - LOG(INFO) << "self=" << ts.Self() << " pReq->receiver_=" << pReq->receiver_ << " m=" << m << " #" << pReq->arg_count_ << " " << pReq->arg_values_; - pReq->result_value = InvokeWithJValues(ts, pReq->receiver_, m, reinterpret_cast(pReq->arg_values_)); + LOG(INFO) << "self=" << soa.Self() << " pReq->receiver_=" << pReq->receiver_ << " m=" << m + << " #" << pReq->arg_count_ << " " << pReq->arg_values_; + pReq->result_value = InvokeWithJValues(soa, pReq->receiver_, m, + reinterpret_cast(pReq->arg_values_)); - pReq->exception = gRegistry->Add(ts.Self()->GetException()); + pReq->exception = gRegistry->Add(soa.Self()->GetException()); pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty()); if (pReq->exception != 0) { - Object* exc = ts.Self()->GetException(); + Object* exc = soa.Self()->GetException(); VLOG(jdwp) << " JDWP invocation returning with exception=" << exc << " " << PrettyTypeOf(exc); - ts.Self()->ClearException(); + soa.Self()->ClearException(); pReq->result_value.SetJ(0); } else if (pReq->result_tag == JDWP::JT_OBJECT) { /* if no exception thrown, examine object result more closely */ @@ -2391,7 +2507,7 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { } if (old_exception.get() != NULL) { - ts.Self()->SetException(old_exception.get()); + soa.Self()->SetException(old_exception.get()); } } @@ -2507,9 +2623,12 @@ void Dbg::DdmBroadcast(bool connect) { VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "..."; Thread* self = Thread::Current(); - if (self->GetState() != kRunnable) { - LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); - /* try anyway? */ + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + if (self->GetState() != kRunnable) { + LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); + /* try anyway? */ + } } JNIEnv* env = self->GetJniEnv(); @@ -2550,8 +2669,8 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf); } else { CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; - ScopedJniThreadState ts(Thread::Current()); - SirtRef name(t->GetThreadName(ts)); + ScopedObjectAccessUnchecked soa(Thread::Current()); + SirtRef name(t->GetThreadName(soa)); size_t char_count = (name.get() != NULL) ? name->GetLength() : 0; const jchar* chars = name->GetCharArray()->GetData(); @@ -2563,19 +2682,27 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { } } -static void DdmSendThreadStartCallback(Thread* t, void*) { - Dbg::DdmSendThreadNotification(t, CHUNK_TYPE("THCR")); -} - void Dbg::DdmSetThreadNotification(bool enable) { - // We lock the thread list to avoid sending duplicate events or missing - // a thread change. We should be okay holding this lock while sending - // the messages out. (We have to hold it while accessing a live thread.) - ScopedThreadListLock thread_list_lock; - + // Enable/disable thread notifications. gDdmThreadNotification = enable; if (enable) { - Runtime::Current()->GetThreadList()->ForEach(DdmSendThreadStartCallback, NULL); + // Suspend the VM then post thread start notifications for all threads. Threads attaching will + // see a suspension in progress and block until that ends. They then post their own start + // notification. + SuspendVM(); + std::list threads; + { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + threads = Runtime::Current()->GetThreadList()->GetList(); + } + { + ScopedObjectAccess soa(Thread::Current()); + typedef std::list::const_iterator It; // TODO: C++0x auto + for (It it = threads.begin(), end = threads.end(); it != end; ++it) { + Dbg::DdmSendThreadNotification(*it, CHUNK_TYPE("THCR")); + } + } + ResumeVM(); } } @@ -2758,7 +2885,7 @@ class HeapChunkContext { needHeader_ = false; } - void Flush() { + void Flush() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Patch the "length of piece" field. CHECK_LE(&buf_[0], pieceLenField_); CHECK_LE(pieceLenField_, p_); @@ -2768,7 +2895,8 @@ class HeapChunkContext { Reset(); } - static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { + static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { reinterpret_cast(arg)->HeapChunkCallback(start, end, used_bytes); } @@ -2782,7 +2910,8 @@ class HeapChunkContext { pieceLenField_ = NULL; } - void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes) { + void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken // in the following code not to allocate memory, by ensuring buf_ is of the correct size @@ -2834,10 +2963,17 @@ class HeapChunkContext { // If we're looking at the native heap, we'll just return // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks. - if (is_native_heap || !Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) { + if (is_native_heap) { return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } + { + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) { + return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); + } + } + Class* c = o->GetClass(); if (c == NULL) { // The object was probably just created but hasn't been initialized yet. @@ -2942,9 +3078,12 @@ void Dbg::SetAllocTrackingEnabled(bool enabled) { struct AllocRecordStackVisitor : public StackVisitor { AllocRecordStackVisitor(const ManagedStack* stack, const std::vector* trace_stack, AllocRecord* record) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), record(record), depth(0) {} - bool VisitFrame() { + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { if (depth >= kMaxAllocRecordStackDepth) { return false; } @@ -3011,6 +3150,7 @@ static inline int HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(gAllocTrackerLock) { } void Dbg::DumpRecentAllocations() { + ScopedObjectAccess soa(Thread::Current()); MutexLock mu(gAllocTrackerLock); if (recent_allocation_records_ == NULL) { LOG(INFO) << "Not recording tracked allocations"; diff --git a/src/debugger.h b/src/debugger.h index 4c0c1b62dc..4db9bd76fd 100644 --- a/src/debugger.h +++ b/src/debugger.h @@ -38,7 +38,10 @@ struct Thread; */ struct DebugInvokeReq { DebugInvokeReq() - : invoke_needed_(false), + : ready(false), invoke_needed_(false), + receiver_(NULL), thread_(NULL), class_(NULL), method_(NULL), + arg_count_(0), arg_values_(NULL), options_(0), error(JDWP::ERR_NONE), + result_tag(JDWP::JT_VOID), exception(0), lock_("a DebugInvokeReq lock"), cond_("a DebugInvokeReq condition variable") { } @@ -78,7 +81,7 @@ class Dbg { static void StopJdwp(); // Invoked by the GC in case we need to keep DDMS informed. - static void GcDidFinish(); + static void GcDidFinish() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); // Return the DebugInvokeReq for the current thread. static DebugInvokeReq* GetInvokeReq(); @@ -111,15 +114,6 @@ class Dbg { */ static int64_t LastDebuggerActivity(); - /* - * Block/allow GC depending on what we're doing. These return the old - * status, which can be fed to ThreadContinuing() to restore the previous - * mode. - */ - static int ThreadRunning(); - static int ThreadWaiting(); - static int ThreadContinuing(int status); - static void UndoDebuggerSuspensions(); static void Exit(int status); @@ -129,84 +123,154 @@ class Dbg { /* * Class, Object, Array */ - static std::string GetClassName(JDWP::RefTypeId id); - static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& classObjectId); - static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclassId); - static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId classId, JDWP::ExpandBuf* pReply); - static void GetClassList(std::vector& classes); - static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId classId, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor); - static void FindLoadedClassBySignature(const char* descriptor, std::vector& ids); + static std::string GetClassName(JDWP::RefTypeId id) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& classObjectId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclassId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId classId, JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void GetClassList(std::vector& classes) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId classId, JDWP::JdwpTypeTag* pTypeTag, + uint32_t* pStatus, std::string* pDescriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void FindLoadedClassBySignature(const char* descriptor, std::vector& ids) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static JDWP::JdwpError GetReferenceType(JDWP::ObjectId objectId, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError GetSignature(JDWP::RefTypeId refTypeId, std::string& signature); - static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId refTypeId, std::string& source_file); - static JDWP::JdwpError GetObjectTag(JDWP::ObjectId objectId, uint8_t& tag); + static JDWP::JdwpError GetSignature(JDWP::RefTypeId refTypeId, std::string& signature) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId refTypeId, std::string& source_file) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetObjectTag(JDWP::ObjectId objectId, uint8_t& tag) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static size_t GetTagWidth(JDWP::JdwpTag tag); - static JDWP::JdwpError GetArrayLength(JDWP::ObjectId arrayId, int& length); - static JDWP::JdwpError OutputArray(JDWP::ObjectId arrayId, int firstIndex, int count, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError SetArrayElements(JDWP::ObjectId arrayId, int firstIndex, int count, const uint8_t* buf); - - static JDWP::ObjectId CreateString(const std::string& str); - static JDWP::JdwpError CreateObject(JDWP::RefTypeId classId, JDWP::ObjectId& new_object); - static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId arrayTypeId, uint32_t length, JDWP::ObjectId& new_array); - - static bool MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId); + static JDWP::JdwpError GetArrayLength(JDWP::ObjectId arrayId, int& length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError OutputArray(JDWP::ObjectId arrayId, int firstIndex, int count, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError SetArrayElements(JDWP::ObjectId arrayId, int firstIndex, int count, + const uint8_t* buf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static JDWP::ObjectId CreateString(const std::string& str) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError CreateObject(JDWP::RefTypeId classId, JDWP::ObjectId& new_object) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId arrayTypeId, uint32_t length, + JDWP::ObjectId& new_array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static bool MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Method and Field */ - static std::string GetMethodName(JDWP::RefTypeId refTypeId, JDWP::MethodId id); - static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId refTypeId, bool withGeneric, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId refTypeId, bool withGeneric, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId refTypeId, JDWP::ExpandBuf* pReply); - static void OutputLineTable(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply); - static void OutputVariableTable(JDWP::RefTypeId refTypeId, JDWP::MethodId id, bool withGeneric, JDWP::ExpandBuf* pReply); - - static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId fieldId); - static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId fieldId); - static JDWP::JdwpError GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width); - static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply); - static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId fieldId, uint64_t value, int width); - - static std::string StringToUtf8(JDWP::ObjectId strId); + static std::string GetMethodName(JDWP::RefTypeId refTypeId, JDWP::MethodId id) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId refTypeId, bool withGeneric, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId refTypeId, bool withGeneric, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId refTypeId, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void OutputLineTable(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void OutputVariableTable(JDWP::RefTypeId refTypeId, JDWP::MethodId id, bool withGeneric, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId fieldId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId fieldId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);; + static JDWP::JdwpError GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, + uint64_t value, int width) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldId fieldId, + JDWP::ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId fieldId, uint64_t value, int width) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static std::string StringToUtf8(JDWP::ObjectId strId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Thread, ThreadGroup, Frame */ - static bool GetThreadName(JDWP::ObjectId threadId, std::string& name); + static bool GetThreadName(JDWP::ObjectId threadId, std::string& name) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply); static std::string GetThreadGroupName(JDWP::ObjectId threadGroupId); - static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId threadGroupId); - static JDWP::ObjectId GetSystemThreadGroupId(); + static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId threadGroupId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::ObjectId GetSystemThreadGroupId() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static JDWP::ObjectId GetMainThreadGroupId(); static bool GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus); - static JDWP::JdwpError GetThreadSuspendCount(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply); + static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply); static bool ThreadExists(JDWP::ObjectId threadId); static bool IsSuspended(JDWP::ObjectId threadId); //static void WaitForSuspend(JDWP::ObjectId threadId); // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0, // returns all threads. - static void GetThreads(JDWP::ObjectId thread_group_id, std::vector& thread_ids); + static void GetThreads(JDWP::ObjectId thread_group_id, std::vector& thread_ids) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector& child_thread_group_ids); static int GetThreadFrameCount(JDWP::ObjectId threadId); - static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf); - - static JDWP::ObjectId GetThreadSelfId(); - static void SuspendVM(); + static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, + size_t frame_count, JDWP::ExpandBuf* buf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static JDWP::ObjectId GetThreadSelfId() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void SuspendVM() + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); static void ResumeVM(); - static void SuspendThread(JDWP::ObjectId threadId); - static void ResumeThread(JDWP::ObjectId threadId); + static JDWP::JdwpError SuspendThread(JDWP::ObjectId threadId, bool request_suspension = true) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + + static void ResumeThread(JDWP::ObjectId threadId) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void SuspendSelf(); - static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result); - static void GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen); - static void SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width); + static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, + JDWP::ObjectId* result) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, + JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, + JDWP::JdwpTag tag, uint64_t value, size_t width) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Debugger notification @@ -217,20 +281,41 @@ class Dbg { kMethodEntry = 0x04, kMethodExit = 0x08, }; - static void PostLocationEvent(const Method* method, int pcOffset, Object* thisPtr, int eventFlags); - static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, Method* throw_method, uint32_t throw_dex_pc, Method* catch_method, uint32_t catch_dex_pc, Throwable* exception); - static void PostThreadStart(Thread* t); - static void PostThreadDeath(Thread* t); - static void PostClassPrepare(Class* c); - - static void UpdateDebugger(int32_t dex_pc, Thread* self); - - static void WatchLocation(const JDWP::JdwpLocation* pLoc); - static void UnwatchLocation(const JDWP::JdwpLocation* pLoc); - static JDWP::JdwpError ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth); + static void PostLocationEvent(const Method* method, int pcOffset, Object* thisPtr, int eventFlags) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, Method* throw_method, + uint32_t throw_dex_pc, Method* catch_method, uint32_t catch_dex_pc, + Throwable* exception) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void PostThreadStart(Thread* t) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void PostThreadDeath(Thread* t) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void PostClassPrepare(Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static void UpdateDebugger(int32_t dex_pc, Thread* self) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static void WatchLocation(const JDWP::JdwpLocation* pLoc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void UnwatchLocation(const JDWP::JdwpLocation* pLoc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static JDWP::JdwpError ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize size, + JDWP::JdwpStepDepth depth) + LOCKS_EXCLUDED(gBreakpointsLock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void UnconfigureStep(JDWP::ObjectId threadId); - static JDWP::JdwpError InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, JDWP::RefTypeId classId, JDWP::MethodId methodId, uint32_t arg_count, uint64_t* arg_values, JDWP::JdwpTag* arg_types, uint32_t options, JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, JDWP::ObjectId* pExceptObj); + static JDWP::JdwpError InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, + JDWP::RefTypeId classId, JDWP::MethodId methodId, + uint32_t arg_count, uint64_t* arg_values, + JDWP::JdwpTag* arg_types, uint32_t options, + JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, + JDWP::ObjectId* pExceptObj) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void ExecuteMethod(DebugInvokeReq* pReq); /* perform "late registration" of an object ID */ @@ -239,22 +324,28 @@ class Dbg { /* * DDM support. */ - static void DdmSendThreadNotification(Thread* t, uint32_t type); + static void DdmSendThreadNotification(Thread* t, uint32_t type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void DdmSetThreadNotification(bool enable); static bool DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, int* pReplyLen); - static void DdmConnected(); - static void DdmDisconnected(); - static void DdmSendChunk(uint32_t type, const std::vector& bytes); - static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf); - static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count); + static void DdmConnected() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmDisconnected() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmSendChunk(uint32_t type, const std::vector& bytes) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Recent allocation tracking support. */ - static void RecordAllocation(Class* type, size_t byte_count); + static void RecordAllocation(Class* type, size_t byte_count) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void SetAllocTrackingEnabled(bool enabled); static inline bool IsAllocTrackingEnabled() { return recent_allocation_records_ != NULL; } - static jbyteArray GetRecentAllocations(); + static jbyteArray GetRecentAllocations() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void DumpRecentAllocations(); enum HpifWhen { @@ -263,7 +354,8 @@ class Dbg { HPIF_WHEN_NEXT_GC = 2, HPIF_WHEN_EVERY_GC = 3 }; - static int DdmHandleHpifChunk(HpifWhen when); + static int DdmHandleHpifChunk(HpifWhen when) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); enum HpsgWhen { HPSG_WHEN_NEVER = 0, @@ -275,12 +367,15 @@ class Dbg { }; static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); - static void DdmSendHeapInfo(HpifWhen reason); - static void DdmSendHeapSegments(bool native); + static void DdmSendHeapInfo(HpifWhen reason) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmSendHeapSegments(bool native) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: - static void DdmBroadcast(bool); - static void PostThreadStartOrStop(Thread*, uint32_t); + static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void PostThreadStartOrStop(Thread*, uint32_t) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static AllocRecord* recent_allocation_records_; }; diff --git a/src/dex2oat.cc b/src/dex2oat.cc index 4587b30d7b..b68a75b18b 100644 --- a/src/dex2oat.cc +++ b/src/dex2oat.cc @@ -33,9 +33,12 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" +#include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" #include "stl_util.h" #include "stringpiece.h" #include "timing_logger.h" +#include "well_known_classes.h" #include "zip_archive.h" namespace art { @@ -118,13 +121,15 @@ static void Usage(const char* fmt, ...) { class Dex2Oat { public: - static Dex2Oat* Create(Runtime::Options& options, InstructionSet instruction_set, - size_t thread_count, bool support_debugging) { - UniquePtr runtime(CreateRuntime(options, instruction_set)); - if (runtime.get() == NULL) { - return NULL; + static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, InstructionSet instruction_set, + size_t thread_count, bool support_debugging) + SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_) { + if (!CreateRuntime(options, instruction_set)) { + *p_dex2oat = NULL; + return false; } - return new Dex2Oat(runtime.release(), instruction_set, thread_count, support_debugging); + *p_dex2oat = new Dex2Oat(Runtime::Current(), instruction_set, thread_count, support_debugging); + return true; } ~Dex2Oat() { @@ -133,7 +138,8 @@ class Dex2Oat { } // Make a list of descriptors for classes to include in the image - const std::set* GetImageClassDescriptors(const char* image_classes_filename) { + const std::set* GetImageClassDescriptors(const char* image_classes_filename) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { UniquePtr image_classes_file(new std::ifstream(image_classes_filename, std::ifstream::in)); if (image_classes_file.get() == NULL) { LOG(ERROR) << "Failed to open image classes file " << image_classes_filename; @@ -206,14 +212,10 @@ class Dex2Oat { bool image, const std::set* image_classes, bool dump_stats, - bool dump_timings) { + bool dump_timings) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // SirtRef and ClassLoader creation needs to come after Runtime::Create - UniquePtr > class_loader(new SirtRef(NULL)); - if (class_loader.get() == NULL) { - LOG(ERROR) << "Failed to create SirtRef for class loader"; - return NULL; - } - + jobject class_loader = NULL; if (!boot_image_option.empty()) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); std::vector class_path_files(dex_files); @@ -221,7 +223,12 @@ class Dex2Oat { for (size_t i = 0; i < class_path_files.size(); i++) { class_linker->RegisterDexFile(*class_path_files[i]); } - class_loader.get()->reset(PathClassLoader::AllocCompileTime(class_path_files)); + ScopedObjectAccessUnchecked soa(Thread::Current()); + soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader); + ScopedLocalRef class_loader_local(soa.Env(), + soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader)); + class_loader = soa.Env()->NewGlobalRef(class_loader_local.get()); + Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path_files); } UniquePtr compiler(new Compiler(instruction_set_, @@ -236,7 +243,11 @@ class Dex2Oat { compiler->SetBitcodeFileName(bitcode_filename); #endif - compiler->CompileAll(class_loader->get(), dex_files); + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + + compiler->CompileAll(class_loader, dex_files); + + Thread::Current()->TransitionFromSuspendedToRunnable(); std::string image_file_location; uint32_t image_file_location_checksum = 0; @@ -251,7 +262,7 @@ class Dex2Oat { } if (!OatWriter::Create(oat_file, - class_loader->get(), + class_loader, dex_files, image_file_location_checksum, image_file_location, @@ -267,7 +278,8 @@ class Dex2Oat { const std::set* image_classes, const std::string& oat_filename, const std::string& oat_location, - const Compiler& compiler) { + const Compiler& compiler) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { ImageWriter image_writer(image_classes); if (!image_writer.Write(image_filename, image_base, oat_filename, oat_location, compiler)) { LOG(ERROR) << "Failed to create image file " << image_filename; @@ -286,13 +298,13 @@ class Dex2Oat { start_ns_(NanoTime()) { } - static Runtime* CreateRuntime(Runtime::Options& options, InstructionSet instruction_set) { - Runtime* runtime = Runtime::Create(options, false); - if (runtime == NULL) { + static bool CreateRuntime(Runtime::Options& options, InstructionSet instruction_set) + SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_) { + if (!Runtime::Create(options, false)) { LOG(ERROR) << "Failed to create runtime"; - return NULL; + return false; } - + Runtime* runtime = Runtime::Current(); // if we loaded an existing image, we will reuse values from the image roots. if (!runtime->HasJniDlsymLookupStub()) { runtime->SetJniDlsymLookupStub(Compiler::CreateJniDlsymLookupStub(instruction_set)); @@ -316,11 +328,12 @@ class Dex2Oat { } } runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod()); - return runtime; + return true; } static void ResolveExceptionsForMethod(MethodHelper* mh, - std::set >& exceptions_to_resolve) { + std::set >& exceptions_to_resolve) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile::CodeItem* code_item = mh->GetCodeItem(); if (code_item == NULL) { return; // native or abstract method @@ -355,7 +368,9 @@ class Dex2Oat { } } } - static bool ResolveCatchBlockExceptionsClassVisitor(Class* c, void* arg) { + + static bool ResolveCatchBlockExceptionsClassVisitor(Class* c, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::set >* exceptions_to_resolve = reinterpret_cast >*>(arg); MethodHelper mh; @@ -371,7 +386,9 @@ class Dex2Oat { } return true; } - static bool RecordImageClassesVisitor(Class* klass, void* arg) { + + static bool RecordImageClassesVisitor(Class* klass, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::set* image_classes = reinterpret_cast*>(arg); if (klass->IsArrayClass() || klass->IsPrimitive()) { return true; @@ -684,8 +701,18 @@ static int dex2oat(int argc, char** argv) { options.push_back(std::make_pair(runtime_args[i], reinterpret_cast(NULL))); } - UniquePtr dex2oat(Dex2Oat::Create(options, instruction_set, thread_count, - support_debugging)); + Dex2Oat* p_dex2oat; + if (!Dex2Oat::Create(&p_dex2oat, options, instruction_set, thread_count, support_debugging)) { + LOG(ERROR) << "Failed to create dex2oat"; + return EXIT_FAILURE; + } + UniquePtr dex2oat(p_dex2oat); + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more managable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + // Whilst we're in native take the opportunity to initialize well known classes. + WellKnownClasses::InitClasses(Thread::Current()->GetJniEnv()); + ScopedObjectAccess soa(Thread::Current()); // If --image-classes was specified, calculate the full list of classes to include in the image UniquePtr > image_classes(NULL); @@ -744,12 +771,15 @@ static int dex2oat(int argc, char** argv) { return EXIT_SUCCESS; } - if (!dex2oat->CreateImageFile(image_filename, - image_base, - image_classes.get(), - oat_filename, - oat_location, - *compiler.get())) { + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + bool image_creation_success = dex2oat->CreateImageFile(image_filename, + image_base, + image_classes.get(), + oat_filename, + oat_location, + *compiler.get()); + Thread::Current()->TransitionFromSuspendedToRunnable(); + if (!image_creation_success) { return EXIT_FAILURE; } diff --git a/src/dex_cache.h b/src/dex_cache.h index f604701645..b3c5ce6e91 100644 --- a/src/dex_cache.h +++ b/src/dex_cache.h @@ -38,11 +38,12 @@ class MANAGED DexCache : public ObjectArray { ObjectArray* types, ObjectArray* methods, ObjectArray* fields, - ObjectArray* initialized_static_storage); + ObjectArray* initialized_static_storage) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Fixup(Method* trampoline); + void Fixup(Method* trampoline) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - String* GetLocation() const { + String* GetLocation() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return Get(kLocation)->AsString(); } @@ -61,43 +62,49 @@ class MANAGED DexCache : public ObjectArray { kResolvedMethods * sizeof(Object*)); } - size_t NumStrings() const { + size_t NumStrings() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetStrings()->GetLength(); } - size_t NumResolvedTypes() const { + size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetResolvedTypes()->GetLength(); } - size_t NumResolvedMethods() const { + size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetResolvedMethods()->GetLength(); } - size_t NumResolvedFields() const { + size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetResolvedFields()->GetLength(); } - size_t NumInitializedStaticStorage() const { + size_t NumInitializedStaticStorage() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetInitializedStaticStorage()->GetLength(); } - String* GetResolvedString(uint32_t string_idx) const { + String* GetResolvedString(uint32_t string_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetStrings()->Get(string_idx); } - void SetResolvedString(uint32_t string_idx, String* resolved) { + void SetResolvedString(uint32_t string_idx, String* resolved) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { GetStrings()->Set(string_idx, resolved); } - Class* GetResolvedType(uint32_t type_idx) const { + Class* GetResolvedType(uint32_t type_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetResolvedTypes()->Get(type_idx); } - void SetResolvedType(uint32_t type_idx, Class* resolved) { + void SetResolvedType(uint32_t type_idx, Class* resolved) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { GetResolvedTypes()->Set(type_idx, resolved); } - Method* GetResolvedMethod(uint32_t method_idx) const { + Method* GetResolvedMethod(uint32_t method_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method = GetResolvedMethods()->Get(method_idx); // Hide resolution trampoline methods from the caller if (method != NULL && method->GetDexMethodIndex() == DexFile::kDexNoIndex16) { @@ -108,31 +115,39 @@ class MANAGED DexCache : public ObjectArray { } } - void SetResolvedMethod(uint32_t method_idx, Method* resolved) { + void SetResolvedMethod(uint32_t method_idx, Method* resolved) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { GetResolvedMethods()->Set(method_idx, resolved); } - Field* GetResolvedField(uint32_t field_idx) const { + Field* GetResolvedField(uint32_t field_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetResolvedFields()->Get(field_idx); } - void SetResolvedField(uint32_t field_idx, Field* resolved) { + void SetResolvedField(uint32_t field_idx, Field* resolved) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { GetResolvedFields()->Set(field_idx, resolved); } - ObjectArray* GetStrings() const { + ObjectArray* GetStrings() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return static_cast*>(GetNonNull(kStrings)); } - ObjectArray* GetResolvedTypes() const { + ObjectArray* GetResolvedTypes() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return static_cast*>(GetNonNull(kResolvedTypes)); } - ObjectArray* GetResolvedMethods() const { + ObjectArray* GetResolvedMethods() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return static_cast*>(GetNonNull(kResolvedMethods)); } - ObjectArray* GetResolvedFields() const { + ObjectArray* GetResolvedFields() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return static_cast*>(GetNonNull(kResolvedFields)); } - ObjectArray* GetInitializedStaticStorage() const { + ObjectArray* GetInitializedStaticStorage() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return static_cast*>(GetNonNull(kInitializedStaticStorage)); } @@ -151,7 +166,8 @@ class MANAGED DexCache : public ObjectArray { kMax = 6, }; - Object* GetNonNull(ArrayIndex array_index) const { + Object* GetNonNull(ArrayIndex array_index) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Object* obj = Get(array_index); DCHECK(obj != NULL); return obj; diff --git a/src/dex_cache_test.cc b/src/dex_cache_test.cc index 6340d79089..da021251c3 100644 --- a/src/dex_cache_test.cc +++ b/src/dex_cache_test.cc @@ -27,6 +27,7 @@ namespace art { class DexCacheTest : public CommonTest {}; TEST_F(DexCacheTest, Open) { + ScopedObjectAccess soa(Thread::Current()); SirtRef dex_cache(class_linker_->AllocDexCache(*java_lang_dex_file_)); ASSERT_TRUE(dex_cache.get() != NULL); diff --git a/src/dex_file.cc b/src/dex_file.cc index 2b81e72544..e763e671f9 100644 --- a/src/dex_file.cc +++ b/src/dex_file.cc @@ -220,18 +220,37 @@ DexFile::~DexFile() { // the global reference table is otherwise empty! } -jobject DexFile::GetDexObject(JNIEnv* env) const { - MutexLock mu(dex_object_lock_); - if (dex_object_ != NULL) { - return dex_object_; +class ScopedJniMonitorLock { + public: + ScopedJniMonitorLock(JNIEnv* env, jobject locked) : env_(env), locked_(locked){ + env->MonitorEnter(locked_); } + ~ScopedJniMonitorLock() { + env_->MonitorExit(locked_); + } + private: + JNIEnv* const env_; + const jobject locked_; +}; +jobject DexFile::GetDexObject(JNIEnv* env) const { + { + ScopedJniMonitorLock lock(env, WellKnownClasses::com_android_dex_Dex); + if (dex_object_ != NULL) { + return dex_object_; + } + } void* address = const_cast(reinterpret_cast(begin_)); jobject byte_buffer = env->NewDirectByteBuffer(address, size_); if (byte_buffer == NULL) { return NULL; } + ScopedJniMonitorLock lock(env, WellKnownClasses::com_android_dex_Dex); + // Re-test to see if someone beat us to the creation when we had the lock released. + if (dex_object_ != NULL) { + return dex_object_; + } jvalue args[1]; args[0].l = byte_buffer; jobject local = env->CallStaticObjectMethodA(WellKnownClasses::com_android_dex_Dex, diff --git a/src/dex_file.h b/src/dex_file.h index f1f76a8eca..5f33ef8a32 100644 --- a/src/dex_file.h +++ b/src/dex_file.h @@ -773,7 +773,8 @@ class DexFile { // Returns -2 for native methods (as expected in exception traces). // // This is used by runtime; therefore use art::Method not art::DexFile::Method. - int32_t GetLineNumFromPC(const Method* method, uint32_t rel_pc) const; + int32_t GetLineNumFromPC(const Method* method, uint32_t rel_pc) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb, @@ -833,7 +834,6 @@ class DexFile { location_(location), location_checksum_(location_checksum), mem_map_(mem_map), - dex_object_lock_("a dex_object_lock_"), dex_object_(NULL), header_(0), string_ids_(0), @@ -892,8 +892,7 @@ class DexFile { UniquePtr mem_map_; // A cached com.android.dex.Dex instance, possibly NULL. Use GetDexObject. - mutable Mutex dex_object_lock_; - mutable jobject dex_object_ GUARDED_BY(dex_object_lock_); + mutable jobject dex_object_; // Points to the header section. const Header* header_; @@ -1105,9 +1104,11 @@ class Field; class EncodedStaticFieldValueIterator { public: EncodedStaticFieldValueIterator(const DexFile& dex_file, DexCache* dex_cache, - ClassLinker* linker, const DexFile::ClassDef& class_def); + ClassLinker* linker, const DexFile::ClassDef& class_def) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void ReadValueToField(Field* field) const; + void ReadValueToField(Field* field) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool HasNext() { return pos_ < array_size_; } diff --git a/src/exception_test.cc b/src/exception_test.cc index 5c7e1a635c..06ae7fb139 100644 --- a/src/exception_test.cc +++ b/src/exception_test.cc @@ -19,7 +19,7 @@ #include "dex_file.h" #include "gtest/gtest.h" #include "runtime.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "UniquePtr.h" @@ -30,7 +30,8 @@ class ExceptionTest : public CommonTest { virtual void SetUp() { CommonTest::SetUp(); - SirtRef class_loader(LoadDex("ExceptionHandle")); + ScopedObjectAccess soa(Thread::Current()); + SirtRef class_loader(soa.Decode(LoadDex("ExceptionHandle"))); my_klass_ = class_linker_->FindClass("LExceptionHandle;", class_loader.get()); ASSERT_TRUE(my_klass_ != NULL); class_linker_->EnsureInitialized(my_klass_, false, true); @@ -116,7 +117,11 @@ TEST_F(ExceptionTest, FindCatchHandler) { } TEST_F(ExceptionTest, StackTraceElement) { + Thread::Current()->TransitionFromSuspendedToRunnable(); runtime_->Start(); + Thread* thread = Thread::Current(); + JNIEnv* env = thread->GetJniEnv(); + ScopedObjectAccess soa(env); std::vector fake_stack; ASSERT_EQ(kStackAlignment, 16); @@ -149,7 +154,6 @@ TEST_F(ExceptionTest, StackTraceElement) { fake_stack.push_back(0); // Set up thread to appear as if we called out of method_g_ at pc dex 3 - Thread* thread = Thread::Current(); thread->SetTopOfStack(&fake_stack[0], method_g_->ToNativePC(dex_pc) + 2); // return pc #else // Create/push fake 20-byte shadow frame for method g @@ -171,14 +175,12 @@ TEST_F(ExceptionTest, StackTraceElement) { thread->PushShadowFrame(reinterpret_cast(&fake_stack[0])); #endif - JNIEnv* env = thread->GetJniEnv(); - ScopedJniThreadState ts(env); - jobject internal = thread->CreateInternalStackTrace(ts); + jobject internal = thread->CreateInternalStackTrace(soa); ASSERT_TRUE(internal != NULL); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal); ASSERT_TRUE(ste_array != NULL); ObjectArray* trace_array = - ts.Decode*>(ste_array); + soa.Decode*>(ste_array); ASSERT_TRUE(trace_array != NULL); ASSERT_TRUE(trace_array->Get(0) != NULL); diff --git a/src/heap.cc b/src/heap.cc index 626adf9728..658755e293 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -31,10 +31,8 @@ #include "object.h" #include "object_utils.h" #include "os.h" -#include "scoped_heap_lock.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock_releaser.h" #include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "stl_util.h" #include "thread_list.h" @@ -136,26 +134,26 @@ static bool GenerateImage(const std::string& image_file_name) { } Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity, - const std::string& original_image_file_name) - : lock_(NULL), - alloc_space_(NULL), + const std::string& original_image_file_name, bool concurrent_gc) + : alloc_space_(NULL), card_table_(NULL), + concurrent_gc_(concurrent_gc), + have_zygote_space_(false), card_marking_disabled_(false), is_gc_running_(false), concurrent_start_bytes_(std::numeric_limits::max()), concurrent_start_size_(128 * KB), concurrent_min_free_(256 * KB), - try_running_gc_(false), - requesting_gc_(false), num_bytes_allocated_(0), num_objects_allocated_(0), last_trim_time_(0), + try_running_gc_(false), + requesting_gc_(false), reference_referent_offset_(0), reference_queue_offset_(0), reference_queueNext_offset_(0), reference_pendingNext_offset_(0), finalizer_reference_zombie_offset_(0), - have_zygote_space_(false), target_utilization_(0.5), verify_objects_(false) { if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { @@ -246,8 +244,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity, // It's still too early to take a lock because there are no threads yet, // but we can create the heap lock now. We don't create it earlier to // make it clear that you can't use locks during heap initialization. - lock_.reset(new Mutex("Heap lock", kHeapLock)); - condition_.reset(new ConditionVariable("Heap condition variable")); + statistics_lock_ = new Mutex("statistics lock"); + gc_complete_lock_ = new Mutex("GC complete lock"); + gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable")); if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() exiting"; @@ -263,6 +262,7 @@ class SpaceSorter { }; void Heap::AddSpace(Space* space) { + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); DCHECK(space != NULL); DCHECK(space->GetLiveBitmap() != NULL); live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap()); @@ -280,6 +280,9 @@ Heap::~Heap() { // all daemon threads are suspended, and we also know that the threads list have been deleted, so // those threads can't resume. We're the only running thread, and we can do whatever we like... STLDeleteElements(&spaces_); + delete statistics_lock_; + delete gc_complete_lock_; + } Space* Heap::FindSpaceFromObject(const Object* obj) const { @@ -326,38 +329,39 @@ Object* Heap::AllocObject(Class* c, size_t byte_count) { int64_t total_bytes_free; size_t max_contiguous_allocation; - { - ScopedHeapLock heap_lock; - DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) || - (c->IsVariableSize() || c->GetObjectSize() == byte_count) || - strlen(ClassHelper(c).GetDescriptor()) == 0); - DCHECK_GE(byte_count, sizeof(Object)); - Object* obj = AllocateLocked(byte_count); - if (obj != NULL) { - obj->SetClass(c); - if (Dbg::IsAllocTrackingEnabled()) { - Dbg::RecordAllocation(c, byte_count); - } - - if (!is_gc_running_ && num_bytes_allocated_ >= concurrent_start_bytes_) { - // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. - SirtRef ref(obj); - RequestConcurrentGC(); - } - VerifyObject(obj); + DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) || + (c->IsVariableSize() || c->GetObjectSize() == byte_count) || + strlen(ClassHelper(c).GetDescriptor()) == 0); + DCHECK_GE(byte_count, sizeof(Object)); + Object* obj = Allocate(byte_count); + if (obj != NULL) { + obj->SetClass(c); + if (Dbg::IsAllocTrackingEnabled()) { + Dbg::RecordAllocation(c, byte_count); + } + bool request_concurrent_gc; + { + MutexLock mu(*statistics_lock_); + request_concurrent_gc = num_bytes_allocated_ >= concurrent_start_bytes_; + } + if (request_concurrent_gc) { + // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. + SirtRef ref(obj); + RequestConcurrentGC(); + } + VerifyObject(obj); - // Additional verification to ensure that we did not allocate into a zygote space. - DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace()); + // Additional verification to ensure that we did not allocate into a zygote space. + DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace()); - return obj; - } - total_bytes_free = GetFreeMemory(); - max_contiguous_allocation = 0; - // TODO: C++0x auto - for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) { - if ((*cur)->IsAllocSpace()) { - (*cur)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); - } + return obj; + } + total_bytes_free = GetFreeMemory(); + max_contiguous_allocation = 0; + // TODO: C++0x auto + for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) { + if ((*cur)->IsAllocSpace()) { + (*cur)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); } } @@ -387,7 +391,7 @@ bool Heap::IsHeapAddress(const Object* obj) { } bool Heap::IsLiveObjectLocked(const Object* obj) { - lock_->AssertHeld(); + GlobalSynchronization::heap_bitmap_lock_->AssertReaderHeld(); return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj); } @@ -399,7 +403,7 @@ void Heap::VerifyObject(const Object* obj) { return; } { - ScopedHeapLock heap_lock; + ReaderMutexLock mu(GlobalSynchronization::heap_bitmap_lock_); Heap::VerifyObjectLocked(obj); } } @@ -413,7 +417,7 @@ void Heap::DumpSpaces() { } void Heap::VerifyObjectLocked(const Object* obj) { - lock_->AssertHeld(); + GlobalSynchronization::heap_bitmap_lock_->AssertReaderHeld(); if (!IsAligned(obj)) { LOG(FATAL) << "Object isn't aligned: " << obj; } else if (!GetLiveBitmap()->Test(obj)) { @@ -455,35 +459,35 @@ void Heap::VerificationCallback(Object* obj, void* arg) { } void Heap::VerifyHeap() { - ScopedHeapLock heap_lock; + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); GetLiveBitmap()->Walk(Heap::VerificationCallback, this); } -void Heap::RecordAllocationLocked(AllocSpace* space, const Object* obj) { -#ifndef NDEBUG - if (Runtime::Current()->IsStarted()) { - lock_->AssertHeld(); +void Heap::RecordAllocation(AllocSpace* space, const Object* obj) { + { + MutexLock mu(*statistics_lock_); + size_t size = space->AllocationSize(obj); + DCHECK_GT(size, 0u); + num_bytes_allocated_ += size; + num_objects_allocated_ += 1; + + if (Runtime::Current()->HasStatsEnabled()) { + RuntimeStats* global_stats = Runtime::Current()->GetStats(); + RuntimeStats* thread_stats = Thread::Current()->GetStats(); + ++global_stats->allocated_objects; + ++thread_stats->allocated_objects; + global_stats->allocated_bytes += size; + thread_stats->allocated_bytes += size; + } } -#endif - size_t size = space->AllocationSize(obj); - DCHECK_GT(size, 0u); - num_bytes_allocated_ += size; - num_objects_allocated_ += 1; - - if (Runtime::Current()->HasStatsEnabled()) { - RuntimeStats* global_stats = Runtime::Current()->GetStats(); - RuntimeStats* thread_stats = Thread::Current()->GetStats(); - ++global_stats->allocated_objects; - ++thread_stats->allocated_objects; - global_stats->allocated_bytes += size; - thread_stats->allocated_bytes += size; + { + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + live_bitmap_->Set(obj); } - - live_bitmap_->Set(obj); } -void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) { - lock_->AssertHeld(); +void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { + MutexLock mu(*statistics_lock_); if (freed_objects < num_objects_allocated_) { num_objects_allocated_ -= freed_objects; @@ -506,32 +510,39 @@ void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) { } } -Object* Heap::AllocateLocked(size_t size) { - lock_->AssertHeld(); - - // Try the default alloc space first. - Object* obj = AllocateLocked(alloc_space_, size); +Object* Heap::Allocate(size_t size) { + Object* obj = Allocate(alloc_space_, size); if (obj != NULL) { - RecordAllocationLocked(alloc_space_, obj); + RecordAllocation(alloc_space_, obj); return obj; } return NULL; } -Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) { - lock_->AssertHeld(); - +Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) { + Thread* self = Thread::Current(); // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are // done in the runnable state where suspension is expected. - DCHECK_EQ(Thread::Current()->GetState(), kRunnable); - Thread::Current()->AssertThreadSuspensionIsAllowable(); +#ifndef NDEBUG + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(self->GetState(), kRunnable); + } + self->AssertThreadSuspensionIsAllowable(); +#endif // Fail impossible allocations if (alloc_size > space->Capacity()) { // On failure collect soft references WaitForConcurrentGcToComplete(); - CollectGarbageInternal(false, false, true); + if (Runtime::Current()->HasStatsEnabled()) { + ++Runtime::Current()->GetStats()->gc_for_alloc_count; + ++Thread::Current()->GetStats()->gc_for_alloc_count; + } + self->TransitionFromRunnableToSuspended(kWaitingPerformingGc); + CollectGarbageInternal(false, true); + self->TransitionFromSuspendedToRunnable(); return NULL; } @@ -540,43 +551,40 @@ Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) { return ptr; } - // The allocation failed. If the GC is running, block until it completes and retry. - if (is_gc_running_) { - // The GC is concurrently tracing the heap. Release the heap lock, wait for the GC to - // complete, and retrying allocating. - WaitForConcurrentGcToComplete(); - ptr = space->AllocWithoutGrowth(alloc_size); - if (ptr != NULL) { - return ptr; + // The allocation failed. If the GC is running, block until it completes else request a + // foreground partial collection. + if (!WaitForConcurrentGcToComplete()) { + // No concurrent GC so perform a foreground collection. + if (Runtime::Current()->HasStatsEnabled()) { + ++Runtime::Current()->GetStats()->gc_for_alloc_count; + ++Thread::Current()->GetStats()->gc_for_alloc_count; } + self->TransitionFromRunnableToSuspended(kWaitingPerformingGc); + CollectGarbageInternal(have_zygote_space_, false); + self->TransitionFromSuspendedToRunnable(); } - // Another failure. Our thread was starved or there may be too many - // live objects. Try a foreground GC. This will have no effect if - // the concurrent GC is already running. - if (Runtime::Current()->HasStatsEnabled()) { - ++Runtime::Current()->GetStats()->gc_for_alloc_count; - ++Thread::Current()->GetStats()->gc_for_alloc_count; + ptr = space->AllocWithoutGrowth(alloc_size); + if (ptr != NULL) { + return ptr; } - if (have_zygote_space_) { - // We don't need a WaitForConcurrentGcToComplete here since we checked is_gc_running_ earlier - // and we are in a heap lock. Try partial GC first. - CollectGarbageInternal(true, false, false); + if (!have_zygote_space_) { + // Partial GC didn't free enough memory, try a full GC. + if (Runtime::Current()->HasStatsEnabled()) { + ++Runtime::Current()->GetStats()->gc_for_alloc_count; + ++Thread::Current()->GetStats()->gc_for_alloc_count; + } + self->TransitionFromRunnableToSuspended(kWaitingPerformingGc); + CollectGarbageInternal(false, false); + self->TransitionFromSuspendedToRunnable(); ptr = space->AllocWithoutGrowth(alloc_size); if (ptr != NULL) { return ptr; } } - // Partial GC didn't free enough memory, try a full GC. - CollectGarbageInternal(false, false, false); - ptr = space->AllocWithoutGrowth(alloc_size); - if (ptr != NULL) { - return ptr; - } - - // Even that didn't work; this is an exceptional state. + // Allocations have failed after GCs; this is an exceptional state. // Try harder, growing the heap if necessary. ptr = space->AllocWithGrowth(alloc_size); if (ptr != NULL) { @@ -595,13 +603,20 @@ Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) { // OLD-TODO: wait for the finalizers from the previous GC to finish VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation"; + + if (Runtime::Current()->HasStatsEnabled()) { + ++Runtime::Current()->GetStats()->gc_for_alloc_count; + ++Thread::Current()->GetStats()->gc_for_alloc_count; + } // We don't need a WaitForConcurrentGcToComplete here either. - CollectGarbageInternal(false, false, true); + self->TransitionFromRunnableToSuspended(kWaitingPerformingGc); + CollectGarbageInternal(false, true); + self->TransitionFromSuspendedToRunnable(); ptr = space->AllocWithGrowth(alloc_size); if (ptr != NULL) { return ptr; } - + // Allocation failed. return NULL; } @@ -621,12 +636,14 @@ int64_t Heap::GetTotalMemory() { } int64_t Heap::GetFreeMemory() { + MutexLock mu(*statistics_lock_); return GetMaxMemory() - num_bytes_allocated_; } class InstanceCounter { public: InstanceCounter(Class* c, bool count_assignable) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : class_(c), count_assignable_(count_assignable), count_(0) { } @@ -634,12 +651,13 @@ class InstanceCounter { return count_; } - static void Callback(Object* o, void* arg) { + static void Callback(Object* o, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { reinterpret_cast(arg)->VisitInstance(o); } private: - void VisitInstance(Object* o) { + void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* instance_class = o->GetClass(); if (count_assignable_) { if (instance_class == class_) { @@ -658,23 +676,24 @@ class InstanceCounter { }; int64_t Heap::CountInstances(Class* c, bool count_assignable) { - ScopedHeapLock heap_lock; + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); InstanceCounter counter(c, count_assignable); GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter); return counter.GetCount(); } void Heap::CollectGarbage(bool clear_soft_references) { - ScopedHeapLock heap_lock; // If we just waited for a GC to complete then we do not need to do another // GC unless we clear soft references. if (!WaitForConcurrentGcToComplete() || clear_soft_references) { - CollectGarbageInternal(have_zygote_space_, true, clear_soft_references); + ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc); + CollectGarbageInternal(have_zygote_space_, clear_soft_references); } } void Heap::PreZygoteFork() { - ScopedHeapLock heap_lock; + static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); + MutexLock mu(zygote_creation_lock_); // Try to see if we have any Zygote spaces. if (have_zygote_space_) { @@ -702,20 +721,59 @@ void Heap::PreZygoteFork() { } } -void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_soft_references) { - lock_->AssertHeld(); +void Heap::CollectGarbageInternal(bool partial_gc, bool clear_soft_references) { + GlobalSynchronization::mutator_lock_->AssertNotHeld(); +#ifndef NDEBUG + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc); + } +#endif - CHECK(!is_gc_running_) << "Attempted recursive GC"; - is_gc_running_ = true; + // Ensure there is only one GC at a time. + bool start_collect = false; + while (!start_collect) { + { + MutexLock mu(*gc_complete_lock_); + if (!is_gc_running_) { + is_gc_running_ = true; + start_collect = true; + } + } + if (!start_collect) { + WaitForConcurrentGcToComplete(); + // TODO: if another thread beat this one to do the GC, perhaps we should just return here? + // Not doing at the moment to ensure soft references are cleared. + } + } + gc_complete_lock_->AssertNotHeld(); + if (concurrent_gc_) { + CollectGarbageConcurrentMarkSweepPlan(partial_gc, clear_soft_references); + } else { + CollectGarbageMarkSweepPlan(partial_gc, clear_soft_references); + } + gc_complete_lock_->AssertNotHeld(); + MutexLock mu(*gc_complete_lock_); + is_gc_running_ = false; + // Wake anyone who may have been waiting for the GC to complete. + gc_complete_cond_->Broadcast(); +} +void Heap::CollectGarbageMarkSweepPlan(bool partial_gc, bool clear_soft_references) { TimingLogger timings("CollectGarbageInternal"); - uint64_t t0 = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0; + uint64_t t0 = NanoTime(), dirty_end = 0; + // Suspend all threads are get exclusive access to the heap. ThreadList* thread_list = Runtime::Current()->GetThreadList(); thread_list->SuspendAll(); timings.AddSplit("SuspendAll"); + GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); - size_t initial_size = num_bytes_allocated_; + size_t initial_size; + { + MutexLock mu(*statistics_lock_); + initial_size = num_bytes_allocated_; + } Object* cleared_references = NULL; { MarkSweep mark_sweep(mark_stack_.get()); @@ -735,8 +793,6 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s mod_union_table_->ClearCards(*it); } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) { zygote_mod_union_table_->ClearCards(space); - } else if (concurrent) { - card_table_->ClearSpaceCards(space); } } timings.AddSplit("ClearCards"); @@ -746,6 +802,7 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s zygote_mod_union_table_->Verify(); #endif + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); if (partial_gc) { // Copy the mark bits over from the live bits, do this as early as possible or else we can // accidentally un-mark roots. @@ -760,15 +817,6 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s // Roots are marked on the bitmap and the mark_stack is empty. DCHECK(mark_sweep.IsMarkStackEmpty()); - if (concurrent) { - // We need to resume before unlocking or else a thread waiting for the - // heap lock would re-suspend since we have not yet called ResumeAll. - thread_list->ResumeAll(); - Unlock(); - root_end = NanoTime(); - timings.AddSplit("RootEnd"); - } - // Update zygote mod union table. if (partial_gc) { zygote_mod_union_table_->Update(); @@ -790,21 +838,6 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s mark_sweep.RecursiveMark(partial_gc); timings.AddSplit(partial_gc ? "PartialMark" : "RecursiveMark"); - if (concurrent) { - dirty_begin = NanoTime(); - Lock(); - thread_list->SuspendAll(); - timings.AddSplit("ReSuspend"); - - // Re-mark root set. - mark_sweep.ReMarkRoots(); - timings.AddSplit("ReMarkRoots"); - - // Scan dirty objects, this is only required if we are not doing concurrent GC. - mark_sweep.RecursiveMarkDirtyObjects(); - timings.AddSplit("RecursiveMarkDirtyObjects"); - } - mark_sweep.ProcessReferences(clear_soft_references); timings.AddSplit("ProcessReferences"); @@ -826,32 +859,204 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s mark_sweep.VerifyImageRoots(); timings.AddSplit("VerifyImageRoots"); - if (concurrent) { - thread_list->ResumeAll(); - dirty_end = NanoTime(); - Unlock(); - } - mark_sweep.Sweep(partial_gc); timings.AddSplit("Sweep"); cleared_references = mark_sweep.GetClearedReferences(); } - if (concurrent) { - // Relock since we unlocked earlier. - // TODO: We probably don't need to have the heap locked for all remainder of the function, except for GrowForUtilization. - Lock(); - } - GrowForUtilization(); timings.AddSplit("GrowForUtilization"); - if (!concurrent) { + thread_list->ResumeAll(); + dirty_end = NanoTime(); + + EnqueueClearedReferences(&cleared_references); + RequestHeapTrim(); + timings.AddSplit("Finish"); + + if (VLOG_IS_ON(gc)) { + uint64_t t1 = NanoTime(); + + MutexLock mu(*statistics_lock_); + // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging. + // Reason: For CMS sometimes initial_size < num_bytes_allocated_ results in overflow (3GB freed message). + size_t bytes_freed = initial_size - num_bytes_allocated_; + uint64_t duration_ns = t1 - t0; + duration_ns -= duration_ns % 1000; + + // If the GC was slow, then print timings in the log. + if (duration_ns > MsToNs(50)) { + uint64_t markSweepTime = (dirty_end - t0) / 1000 * 1000; + LOG(INFO) << (partial_gc ? "Partial " : "") + << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, " + << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", " + << "paused " << PrettyDuration(markSweepTime) + << ", total " << PrettyDuration(duration_ns); + } + } + Dbg::GcDidFinish(); + if (VLOG_IS_ON(heap)) { + timings.Dump(); + } +} + +void Heap::CollectGarbageConcurrentMarkSweepPlan(bool partial_gc, bool clear_soft_references) { + TimingLogger timings("CollectGarbageInternal"); + uint64_t t0 = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0; + + // Suspend all threads are get exclusive access to the heap. + ThreadList* thread_list = Runtime::Current()->GetThreadList(); + thread_list->SuspendAll(); + timings.AddSplit("SuspendAll"); + GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + + size_t initial_size; + { + MutexLock mu(*statistics_lock_); + initial_size = num_bytes_allocated_; + } + Object* cleared_references = NULL; + { + MarkSweep mark_sweep(mark_stack_.get()); + timings.AddSplit("ctor"); + + mark_sweep.Init(); + timings.AddSplit("Init"); + + // Make sure that the tables have the correct pointer for the mark sweep. + mod_union_table_->Init(&mark_sweep); + zygote_mod_union_table_->Init(&mark_sweep); + + // Clear image space cards and keep track of cards we cleared in the mod-union table. + for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) { + Space* space = *it; + if (space->IsImageSpace()) { + mod_union_table_->ClearCards(*it); + } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) { + zygote_mod_union_table_->ClearCards(space); + } else { + card_table_->ClearSpaceCards(space); + } + } + timings.AddSplit("ClearCards"); + +#if VERIFY_MOD_UNION + mod_union_table_->Verify(); + zygote_mod_union_table_->Verify(); +#endif + + if (partial_gc) { + // Copy the mark bits over from the live bits, do this as early as possible or else we can + // accidentally un-mark roots. + // Needed for scanning dirty objects. + mark_sweep.CopyMarkBits(); + timings.AddSplit("CopyMarkBits"); + } + + { + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + mark_sweep.MarkRoots(); + timings.AddSplit("MarkRoots"); + } + + // Roots are marked on the bitmap and the mark_stack is empty. + DCHECK(mark_sweep.IsMarkStackEmpty()); + + // Allow mutators to go again, acquire share on mutator_lock_ to continue. + thread_list->ResumeAll(); + { + ReaderMutexLock reader_lock(*GlobalSynchronization::mutator_lock_); + root_end = NanoTime(); + timings.AddSplit("RootEnd"); + + { + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + // Update zygote mod union table. + if (partial_gc) { + zygote_mod_union_table_->Update(); + timings.AddSplit("UpdateZygoteModUnionTable"); + + zygote_mod_union_table_->MarkReferences(); + timings.AddSplit("ZygoteMarkReferences"); + } + + // Processes the cards we cleared earlier and adds their objects into the mod-union table. + mod_union_table_->Update(); + timings.AddSplit("UpdateModUnionTable"); + } + { + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + // Scans all objects in the mod-union table. + mod_union_table_->MarkReferences(); + timings.AddSplit("MarkImageToAllocSpaceReferences"); + + // Recursively mark all the non-image bits set in the mark bitmap. + mark_sweep.RecursiveMark(partial_gc); + timings.AddSplit(partial_gc ? "PartialMark" : "RecursiveMark"); + } + } + // Release share on mutator_lock_ and then get exclusive access. + dirty_begin = NanoTime(); + thread_list->SuspendAll(); + timings.AddSplit("ReSuspend"); + GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + + { + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + // Re-mark root set. + mark_sweep.ReMarkRoots(); + timings.AddSplit("ReMarkRoots"); + + // Scan dirty objects, this is only required if we are not doing concurrent GC. + mark_sweep.RecursiveMarkDirtyObjects(); + timings.AddSplit("RecursiveMarkDirtyObjects"); + } + { + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + mark_sweep.ProcessReferences(clear_soft_references); + timings.AddSplit("ProcessReferences"); + } + // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps + // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations + // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark + // bit instead, resulting in no new allocated objects being incorrectly freed by sweep. + { + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) { + Space* space = *it; + // We never allocate into zygote spaces. + if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) { + live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap()); + mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap()); + space->AsAllocSpace()->SwapBitmaps(); + } + } + } + + if (kIsDebugBuild) { + // Verify that we only reach marked objects from the image space. + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + mark_sweep.VerifyImageRoots(); + timings.AddSplit("VerifyImageRoots"); + } thread_list->ResumeAll(); dirty_end = NanoTime(); + GlobalSynchronization::mutator_lock_->AssertNotHeld(); + + { + // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above). + WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + mark_sweep.Sweep(partial_gc); + timings.AddSplit("Sweep"); + } + + cleared_references = mark_sweep.GetClearedReferences(); } + GrowForUtilization(); + timings.AddSplit("GrowForUtilization"); + EnqueueClearedReferences(&cleared_references); RequestHeapTrim(); timings.AddSplit("Finish"); @@ -859,6 +1064,7 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s if (VLOG_IS_ON(gc)) { uint64_t t1 = NanoTime(); + MutexLock mu(*statistics_lock_); // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging. // Reason: For CMS sometimes initial_size < num_bytes_allocated_ results in overflow (3GB freed message). size_t bytes_freed = initial_size - num_bytes_allocated_; @@ -866,61 +1072,55 @@ void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_s duration_ns -= duration_ns % 1000; // If the GC was slow, then print timings in the log. - if (concurrent) { - uint64_t pause_roots = (root_end - t0) / 1000 * 1000; - uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000; - if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) { - LOG(INFO) << (partial_gc ? "Partial " : "") - << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, " - << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", " - << "paused " << PrettyDuration(pause_roots) << "+" << PrettyDuration(pause_dirty) - << ", total " << PrettyDuration(duration_ns); - } - } else { - if (duration_ns > MsToNs(50)) { - uint64_t markSweepTime = (dirty_end - t0) / 1000 * 1000; - LOG(INFO) << (partial_gc ? "Partial " : "") - << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, " - << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", " - << "paused " << PrettyDuration(markSweepTime) - << ", total " << PrettyDuration(duration_ns); - } + uint64_t pause_roots = (root_end - t0) / 1000 * 1000; + uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000; + if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) { + LOG(INFO) << (partial_gc ? "Partial " : "") + << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, " + << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", " + << "paused " << PrettyDuration(pause_roots) << "+" << PrettyDuration(pause_dirty) + << ", total " << PrettyDuration(duration_ns); } } Dbg::GcDidFinish(); if (VLOG_IS_ON(heap)) { timings.Dump(); } - - is_gc_running_ = false; - - // Wake anyone who may have been waiting for the GC to complete. - condition_->Broadcast(); } bool Heap::WaitForConcurrentGcToComplete() { - lock_->AssertHeld(); - - // Busy wait for GC to finish - if (is_gc_running_) { - uint64_t wait_start = NanoTime(); - - do { - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); - ScopedThreadListLockReleaser list_lock_releaser; - condition_->Wait(*lock_); - } while (is_gc_running_); - uint64_t wait_time = NanoTime() - wait_start; - if (wait_time > MsToNs(5)) { - LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); + if (concurrent_gc_) { + bool do_wait = false; + uint64_t wait_start; + { + // Check if GC is running holding gc_complete_lock_. + MutexLock mu(*gc_complete_lock_); + if (is_gc_running_) { + wait_start = NanoTime(); + do_wait = true; + } + } + if (do_wait) { + // We must wait, change thread state then sleep on gc_complete_cond_; + ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete); + { + MutexLock mu(*gc_complete_lock_); + while (is_gc_running_) { + gc_complete_cond_->Wait(*gc_complete_lock_); + } + } + uint64_t wait_time = NanoTime() - wait_start; + if (wait_time > MsToNs(5)) { + LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); + } + return true; } - DCHECK(!is_gc_running_); - return true; } return false; } void Heap::DumpForSigQuit(std::ostream& os) { + MutexLock mu(*statistics_lock_); os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << "; " << num_objects_allocated_ << " objects\n"; @@ -950,56 +1150,42 @@ static const size_t kHeapIdealFree = 2 * MB; static const size_t kHeapMinFree = kHeapIdealFree / 4; void Heap::GrowForUtilization() { - lock_->AssertHeld(); - - // We know what our utilization is at this moment. - // This doesn't actually resize any memory. It just lets the heap grow more - // when necessary. - size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization()); + size_t target_size; + bool use_footprint_limit = false; + { + MutexLock mu(*statistics_lock_); + // We know what our utilization is at this moment. + // This doesn't actually resize any memory. It just lets the heap grow more when necessary. + target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization(); + + if (target_size > num_bytes_allocated_ + kHeapIdealFree) { + target_size = num_bytes_allocated_ + kHeapIdealFree; + } else if (target_size < num_bytes_allocated_ + kHeapMinFree) { + target_size = num_bytes_allocated_ + kHeapMinFree; + } - if (target_size > num_bytes_allocated_ + kHeapIdealFree) { - target_size = num_bytes_allocated_ + kHeapIdealFree; - } else if (target_size < num_bytes_allocated_ + kHeapMinFree) { - target_size = num_bytes_allocated_ + kHeapMinFree; + // Calculate when to perform the next ConcurrentGC. + if (GetTotalMemory() - num_bytes_allocated_ < concurrent_min_free_) { + // Not enough free memory to perform concurrent GC. + concurrent_start_bytes_ = std::numeric_limits::max(); + } else { + // Compute below to avoid holding both the statistics and the alloc space lock + use_footprint_limit = true; + } } - - // Calculate when to perform the next ConcurrentGC. - if (GetTotalMemory() - num_bytes_allocated_ < concurrent_min_free_) { - // Not enough free memory to perform concurrent GC. - concurrent_start_bytes_ = std::numeric_limits::max(); - } else { - concurrent_start_bytes_ = alloc_space_->GetFootprintLimit() - concurrent_start_size_; + if (use_footprint_limit) { + size_t foot_print_limit = alloc_space_->GetFootprintLimit(); + MutexLock mu(*statistics_lock_); + concurrent_start_bytes_ = foot_print_limit - concurrent_start_size_; } - SetIdealFootprint(target_size); } void Heap::ClearGrowthLimit() { - ScopedHeapLock heap_lock; WaitForConcurrentGcToComplete(); alloc_space_->ClearGrowthLimit(); } -pid_t Heap::GetLockOwner() { - return lock_->GetOwner(); -} - -void Heap::Lock() { - // Grab the lock, but put ourselves into kVmWait if it looks - // like we're going to have to wait on the mutex. This prevents - // deadlock if another thread is calling CollectGarbageInternal, - // since they will have the heap lock and be waiting for mutators to - // suspend. - if (!lock_->TryLock()) { - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); - lock_->Lock(); - } -} - -void Heap::Unlock() { - lock_->Unlock(); -} - void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, MemberOffset reference_queue_offset, MemberOffset reference_queueNext_offset, @@ -1076,19 +1262,41 @@ Object* Heap::DequeuePendingReference(Object** list) { } void Heap::AddFinalizerReference(Thread* self, Object* object) { - ScopedJniThreadState ts(self); + ScopedObjectAccess soa(self); JValue args[1]; args[0].SetL(object); - ts.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args, NULL); + soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, + NULL, args, NULL); +} + +size_t Heap::GetBytesAllocated() const { + MutexLock mu(*statistics_lock_); + return num_bytes_allocated_; +} + +size_t Heap::GetObjectsAllocated() const { + MutexLock mu(*statistics_lock_); + return num_objects_allocated_; +} + +size_t Heap::GetConcurrentStartSize() const { + MutexLock mu(*statistics_lock_); + return concurrent_start_size_; +} + +size_t Heap::GetConcurrentMinFree() const { + MutexLock mu(*statistics_lock_); + return concurrent_min_free_; } void Heap::EnqueueClearedReferences(Object** cleared) { DCHECK(cleared != NULL); if (*cleared != NULL) { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccess soa(Thread::Current()); JValue args[1]; args[0].SetL(*cleared); - ts.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(ts.Self(), NULL, args, NULL); + soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), + NULL, args, NULL); *cleared = NULL; } } @@ -1106,29 +1314,27 @@ void Heap::RequestConcurrentGC() { JNIEnv* env = Thread::Current()->GetJniEnv(); DCHECK(WellKnownClasses::java_lang_Daemons != NULL); DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL); - env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_requestGC); + env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, + WellKnownClasses::java_lang_Daemons_requestGC); CHECK(!env->ExceptionCheck()); requesting_gc_ = false; } void Heap::ConcurrentGC() { - if (Runtime::Current()->IsShuttingDown()) { + if (Runtime::Current()->IsShuttingDown() || !concurrent_gc_) { return; } - ScopedHeapLock heap_lock; - // We shouldn't need a WaitForConcurrentGcToComplete here since only - // concurrent GC resumes threads before the GC is completed and this function - // is only called within the GC daemon thread. - CHECK(!is_gc_running_); - // Current thread needs to be runnable or else we can't suspend all threads. - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + // TODO: We shouldn't need a WaitForConcurrentGcToComplete here since only + // concurrent GC resumes threads before the GC is completed and this function + // is only called within the GC daemon thread. if (!WaitForConcurrentGcToComplete()) { - CollectGarbageInternal(have_zygote_space_, true, false); + // Start a concurrent GC as one wasn't in progress + ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc); + CollectGarbageInternal(have_zygote_space_, false); } } void Heap::Trim(AllocSpace* alloc_space) { - lock_->AssertHeld(); WaitForConcurrentGcToComplete(); alloc_space->Trim(); } @@ -1140,12 +1346,15 @@ void Heap::RequestHeapTrim() { // to utilization (which is probably inversely proportional to how much benefit we can expect). // We could try mincore(2) but that's only a measure of how many pages we haven't given away, // not how much use we're making of those pages. - float utilization = static_cast(num_bytes_allocated_) / alloc_space_->Size(); uint64_t ms_time = NsToMs(NanoTime()); - if (utilization > 0.75f || ms_time - last_trim_time_ < 2 * 1000) { - // Don't bother trimming the heap if it's more than 75% utilized, or if a - // heap trim occurred in the last two seconds. - return; + { + MutexLock mu(*statistics_lock_); + float utilization = static_cast(num_bytes_allocated_) / alloc_space_->Size(); + if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) { + // Don't bother trimming the heap if it's more than 75% utilized, or if a + // heap trim occurred in the last two seconds. + return; + } } if (!Runtime::Current()->IsFinishedStarting() || Runtime::Current()->IsShuttingDown()) { // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) @@ -1156,7 +1365,8 @@ void Heap::RequestHeapTrim() { JNIEnv* env = Thread::Current()->GetJniEnv(); DCHECK(WellKnownClasses::java_lang_Daemons != NULL); DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); - env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_requestHeapTrim); + env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, + WellKnownClasses::java_lang_Daemons_requestHeapTrim); CHECK(!env->ExceptionCheck()); } diff --git a/src/heap.h b/src/heap.h index e908248f93..89b6ac45cf 100644 --- a/src/heap.h +++ b/src/heap.h @@ -62,12 +62,14 @@ class LOCKABLE Heap { // image_file_names names specify Spaces to load based on // ImageWriter output. explicit Heap(size_t starting_size, size_t growth_limit, size_t capacity, - const std::string& image_file_name); + const std::string& image_file_name, bool concurrent_gc); ~Heap(); // Allocates and initializes storage for an object instance. - Object* AllocObject(Class* klass, size_t num_bytes); + Object* AllocObject(Class* klass, size_t num_bytes) + LOCKS_EXCLUDED(statistics_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Check sanity of given reference. Requires the heap lock. #if VERIFY_OBJECT_ENABLED @@ -86,10 +88,12 @@ class LOCKABLE Heap { // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). // Requires the heap lock to be held. - bool IsLiveObjectLocked(const Object* obj); + bool IsLiveObjectLocked(const Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Initiates an explicit garbage collection. - void CollectGarbage(bool clear_soft_references); + void CollectGarbage(bool clear_soft_references) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. @@ -100,10 +104,12 @@ class LOCKABLE Heap { // Implements java.lang.Runtime.totalMemory. int64_t GetTotalMemory(); // Implements java.lang.Runtime.freeMemory. - int64_t GetFreeMemory(); + int64_t GetFreeMemory() LOCKS_EXCLUDED(statistics_lock_); // Implements VMDebug.countInstancesOfClass. - int64_t CountInstances(Class* c, bool count_assignable); + int64_t CountInstances(Class* c, bool count_assignable) + LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to // implement dalvik.system.VMRuntime.clearGrowthLimit. @@ -130,14 +136,6 @@ class LOCKABLE Heap { // true if we waited for the GC to complete. bool WaitForConcurrentGcToComplete(); - pid_t GetLockOwner(); // For SignalCatcher. - void AssertLockHeld() { - lock_->AssertHeld(); - } - void AssertLockNotHeld() { - lock_->AssertNotHeld(); - } - const Spaces& GetSpaces() { return spaces_; } @@ -178,8 +176,7 @@ class LOCKABLE Heap { verify_objects_ = false; } - // Callers must hold the heap lock. - void RecordFreeLocked(size_t freed_objects, size_t freed_bytes); + void RecordFree(size_t freed_objects, size_t freed_bytes) LOCKS_EXCLUDED(statistics_lock_); // Must be called if a field of an Object in the heap changes, and before any GC safe-point. // The call is not needed if NULL is stored in the field. @@ -190,7 +187,8 @@ class LOCKABLE Heap { } // Write barrier for array operations that update many field positions - void WriteBarrierArray(const Object* dst, int /*start_offset*/, size_t /*length TODO: element_count or byte_count?*/) { + void WriteBarrierArray(const Object* dst, int /*start_offset*/, + size_t /*length TODO: element_count or byte_count?*/) { if (UNLIKELY(!card_marking_disabled_)) { card_table_->MarkCard(dst); } @@ -207,34 +205,24 @@ class LOCKABLE Heap { void AddFinalizerReference(Thread* self, Object* object); - size_t GetBytesAllocated() { return num_bytes_allocated_; } - size_t GetObjectsAllocated() { return num_objects_allocated_; } - - size_t GetConcurrentStartSize() const { return concurrent_start_size_; } - - void SetConcurrentStartSize(size_t size) { - concurrent_start_size_ = size; - } - - size_t GetConcurrentMinFree() const { return concurrent_min_free_; } - - void SetConcurrentMinFree(size_t size) { - concurrent_min_free_ = size; - } + size_t GetBytesAllocated() const LOCKS_EXCLUDED(statistics_lock_); + size_t GetObjectsAllocated() const LOCKS_EXCLUDED(statistics_lock_); + size_t GetConcurrentStartSize() const LOCKS_EXCLUDED(statistics_lock_); + size_t GetConcurrentMinFree() const LOCKS_EXCLUDED(statistics_lock_); // Functions for getting the bitmap which corresponds to an object's address. // This is probably slow, TODO: use better data structure like binary tree . Space* FindSpaceFromObject(const Object*) const; - void DumpForSigQuit(std::ostream& os); + void DumpForSigQuit(std::ostream& os) LOCKS_EXCLUDED(statistics_lock_); void Trim(AllocSpace* alloc_space); - HeapBitmap* GetLiveBitmap() { + HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { return live_bitmap_.get(); } - HeapBitmap* GetMarkBitmap() { + HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { return mark_bitmap_.get(); } @@ -248,11 +236,11 @@ class LOCKABLE Heap { private: // Allocates uninitialized storage. - Object* AllocateLocked(size_t num_bytes); - Object* AllocateLocked(AllocSpace* space, size_t num_bytes); - - void Lock() EXCLUSIVE_LOCK_FUNCTION(); - void Unlock() UNLOCK_FUNCTION(); + Object* Allocate(size_t num_bytes) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* Allocate(AllocSpace* space, size_t num_bytes) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Pushes a list of cleared references out to the managed heap. void EnqueueClearedReferences(Object** cleared_references); @@ -260,35 +248,42 @@ class LOCKABLE Heap { void RequestHeapTrim(); void RequestConcurrentGC(); - void RecordAllocationLocked(AllocSpace* space, const Object* object); - - // TODO: can we teach GCC to understand the weird locking in here? - void CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_soft_references) NO_THREAD_SAFETY_ANALYSIS; + void RecordAllocation(AllocSpace* space, const Object* object) + LOCKS_EXCLUDED(statistics_lock_, GlobalSynchronization::heap_bitmap_lock_); + + void CollectGarbageInternal(bool partial_gc, bool clear_soft_references) + LOCKS_EXCLUDED(gc_complete_lock_, + GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + void CollectGarbageMarkSweepPlan(bool partial_gc, bool clear_soft_references) + LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_); + void CollectGarbageConcurrentMarkSweepPlan(bool partial_gc, bool clear_soft_references) + LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_); // Given the current contents of the alloc space, increase the allowed heap footprint to match // the target utilization ratio. This should only be called immediately after a full garbage // collection. void GrowForUtilization(); - size_t GetPercentFree(); - - void AddSpace(Space* space); + size_t GetPercentFree() EXCLUSIVE_LOCKS_REQUIRED(statistics_lock_); - void VerifyObjectLocked(const Object *obj); + void AddSpace(Space* space) LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_); - void VerifyHeapLocked(); + void VerifyObjectLocked(const Object *obj) + SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); - static void VerificationCallback(Object* obj, void* arg); - - UniquePtr lock_; - UniquePtr condition_; + static void VerificationCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); Spaces spaces_; // The alloc space which we are currently allocating into. AllocSpace* alloc_space_; - // The mod-union table remembers all of the referneces from the image space to the alloc / + // The mod-union table remembers all of the references from the image space to the alloc / // zygote spaces. UniquePtr mod_union_table_; @@ -297,20 +292,44 @@ class LOCKABLE Heap { UniquePtr card_table_; + // True for concurrent mark sweep GC, false for mark sweep. + const bool concurrent_gc_; + + // If we have a zygote space. + bool have_zygote_space_; + // Used by the image writer to disable card marking on copied objects // TODO: remove bool card_marking_disabled_; + // Guards access to the state of GC, associated conditional variable is used to signal when a GC + // completes. + Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + UniquePtr gc_complete_cond_ GUARDED_BY(gc_complete_lock_); + // True while the garbage collector is running. - volatile bool is_gc_running_; + volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_); + + // Guards access to heap statistics, some used to calculate when concurrent GC should occur. + // TODO: move bytes/objects allocated to thread-locals and remove need for lock? + Mutex* statistics_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; // Bytes until concurrent GC starts. - size_t concurrent_start_bytes_; + size_t concurrent_start_bytes_ GUARDED_BY(statistics_lock_); size_t concurrent_start_size_; size_t concurrent_min_free_; - UniquePtr live_bitmap_; - UniquePtr mark_bitmap_; + // Number of bytes allocated. Adjusted after each allocation and free. + size_t num_bytes_allocated_ GUARDED_BY(statistics_lock_); + + // Number of objects allocated. Adjusted after each allocation and free. + size_t num_objects_allocated_ GUARDED_BY(statistics_lock_); + + // Last trim time + uint64_t last_trim_time_; + + UniquePtr live_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_); + UniquePtr mark_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_); // True while the garbage collector is trying to signal the GC daemon thread. // This flag is needed to prevent recursion from occurring when the JNI calls @@ -318,20 +337,11 @@ class LOCKABLE Heap { bool try_running_gc_; // Used to ensure that we don't ever recursively request GC. - bool requesting_gc_; + volatile bool requesting_gc_; // Mark stack that we reuse to avoid re-allocating the mark stack UniquePtr mark_stack_; - // Number of bytes allocated. Adjusted after each allocation and free. - size_t num_bytes_allocated_; - - // Number of objects allocated. Adjusted after each allocation and free. - size_t num_objects_allocated_; - - // Last trim time - uint64_t last_trim_time_; - // offset of java.lang.ref.Reference.referent MemberOffset reference_referent_offset_; @@ -347,9 +357,6 @@ class LOCKABLE Heap { // offset of java.lang.ref.FinalizerReference.zombie MemberOffset finalizer_reference_zombie_offset_; - // If we have a zygote space. - bool have_zygote_space_; - // Target ideal heap utilization ratio float target_utilization_; diff --git a/src/heap_bitmap.h b/src/heap_bitmap.h index 433319913a..98b42b3e89 100644 --- a/src/heap_bitmap.h +++ b/src/heap_bitmap.h @@ -25,13 +25,15 @@ namespace art { class HeapBitmap { public: - bool Test(const Object* obj) { + bool Test(const Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL); return bitmap->Test(obj); } - void Clear(const Object* obj) { + void Clear(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL) << "tried to clear object " @@ -40,7 +42,8 @@ namespace art { return bitmap->Clear(obj); } - void Set(const Object* obj) { + void Set(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL) << "tried to mark object " @@ -59,7 +62,8 @@ namespace art { return NULL; } - void Walk(SpaceBitmap::Callback* callback, void* arg) { + void Walk(SpaceBitmap::Callback* callback, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { // TODO: C++0x auto for (Bitmaps::iterator cur = bitmaps_.begin(); cur != bitmaps_.end(); ++cur) { (*cur)->Walk(callback, arg); @@ -67,7 +71,8 @@ namespace art { } // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. - void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap); + void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); HeapBitmap(Heap* heap) : heap_(heap) { diff --git a/src/heap_test.cc b/src/heap_test.cc index 48aa42535a..d846db5b7d 100644 --- a/src/heap_test.cc +++ b/src/heap_test.cc @@ -32,16 +32,18 @@ TEST_F(HeapTest, ClearGrowthLimit) { } TEST_F(HeapTest, GarbageCollectClassLinkerInit) { - // garbage is created during ClassLinker::Init - - Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;"); - for (size_t i = 0; i < 1024; ++i) { - SirtRef > array(ObjectArray::Alloc(c, 2048)); - for (size_t j = 0; j < 2048; ++j) { - array->Set(j, String::AllocFromModifiedUtf8("hello, world!")); + { + ScopedObjectAccess soa(Thread::Current()); + // garbage is created during ClassLinker::Init + + Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;"); + for (size_t i = 0; i < 1024; ++i) { + SirtRef > array(ObjectArray::Alloc(c, 2048)); + for (size_t j = 0; j < 2048; ++j) { + array->Set(j, String::AllocFromModifiedUtf8("hello, world!")); + } } } - Runtime::Current()->GetHeap()->CollectGarbage(false); } diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc index d806d71cc7..d0c87bee17 100644 --- a/src/hprof/hprof.cc +++ b/src/hprof/hprof.cc @@ -47,7 +47,7 @@ #include "object_utils.h" #include "os.h" #include "safe_map.h" -#include "scoped_heap_lock.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "stringprintf.h" #include "thread_list.h" @@ -165,8 +165,8 @@ typedef uint32_t HprofId; typedef HprofId HprofStringId; typedef HprofId HprofObjectId; typedef HprofId HprofClassObjectId; -typedef std::set ClassSet; -typedef std::set::iterator ClassSetIterator; +typedef std::set ClassSet; +typedef std::set::iterator ClassSetIterator; typedef SafeMap StringMap; typedef SafeMap::iterator StringMapIterator; @@ -401,11 +401,16 @@ class Hprof { free(body_data_ptr_); } - void Dump() { + void Dump() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) { // Walk the roots and the heap. current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_SEGMENT, HPROF_TIME); Runtime::Current()->VisitRoots(RootVisitor, this); - Runtime::Current()->GetHeap()->GetLiveBitmap()->Walk(HeapBitmapCallback, this); + { + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + Runtime::Current()->GetHeap()->GetLiveBitmap()->Walk(HeapBitmapCallback, this); + } current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_END, HPROF_TIME); current_record_.Flush(); fflush(body_fp_); @@ -464,27 +469,29 @@ class Hprof { } private: - static void RootVisitor(const Object* obj, void* arg) { + static void RootVisitor(const Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(arg != NULL); Hprof* hprof = reinterpret_cast(arg); hprof->VisitRoot(obj); } - static void HeapBitmapCallback(Object* obj, void* arg) { + static void HeapBitmapCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(obj != NULL); CHECK(arg != NULL); Hprof* hprof = reinterpret_cast(arg); hprof->DumpHeapObject(obj); } - void VisitRoot(const Object* obj); + void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - int DumpHeapObject(const Object* obj); + int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void Finish() { } - int WriteClassTable() { + int WriteClassTable() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { HprofRecord* rec = ¤t_record_; uint32_t nextSerialNumber = 1; @@ -551,7 +558,8 @@ class Hprof { int MarkRootObject(const Object* obj, jobject jniObj); - HprofClassObjectId LookupClassId(const Class* c) { + HprofClassObjectId LookupClassId(Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (c == NULL) { // c is the superclass of java.lang.Object or a primitive return (HprofClassObjectId)0; @@ -585,7 +593,8 @@ class Hprof { return id; } - HprofStringId LookupClassNameId(const Class* c) { + HprofStringId LookupClassNameId(const Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return LookupStringId(PrettyDescriptor(c)); } @@ -807,7 +816,7 @@ static int StackTraceSerialNumber(const Object* /*obj*/) { return HPROF_NULL_STACK_TRACE; } -int Hprof::DumpHeapObject(const Object* obj) { +int Hprof::DumpHeapObject(Object* obj) { HprofRecord* rec = ¤t_record_; HprofHeapId desiredHeap = false ? HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP; // TODO: zygote objects? @@ -847,7 +856,7 @@ int Hprof::DumpHeapObject(const Object* obj) { // allocated which hasn't been initialized yet. } else { if (obj->IsClass()) { - const Class* thisClass = obj->AsClass(); + Class* thisClass = obj->AsClass(); // obj is a ClassObject. size_t sFieldCount = thisClass->NumStaticFields(); if (sFieldCount != 0) { @@ -1053,15 +1062,11 @@ void Hprof::VisitRoot(const Object* obj) { // Otherwise, "filename" is used to create an output file. void DumpHeap(const char* filename, int fd, bool direct_to_ddms) { CHECK(filename != NULL); - ScopedHeapLock heap_lock; - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - - ThreadList* thread_list = Runtime::Current()->GetThreadList(); - thread_list->SuspendAll(); + Runtime::Current()->GetThreadList()->SuspendAll(); Hprof hprof(filename, fd, direct_to_ddms); hprof.Dump(); - thread_list->ResumeAll(); + Runtime::Current()->GetThreadList()->ResumeAll(); } } // namespace hprof diff --git a/src/image.h b/src/image.h index 6286411076..253b762e54 100644 --- a/src/image.h +++ b/src/image.h @@ -94,7 +94,8 @@ class PACKED ImageHeader { kImageRootsMax, }; - Object* GetImageRoot(ImageRoot image_root) const { + Object* GetImageRoot(ImageRoot image_root) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetImageRoots()->Get(image_root); } diff --git a/src/image_test.cc b/src/image_test.cc index f9c2d1c6d1..9c947c1a9a 100644 --- a/src/image_test.cc +++ b/src/image_test.cc @@ -32,19 +32,21 @@ class ImageTest : public CommonTest {}; TEST_F(ImageTest, WriteRead) { ScratchFile tmp_oat; - std::vector dex_files; - dex_files.push_back(java_lang_dex_file_); - bool success_oat = OatWriter::Create(tmp_oat.GetFile(), NULL, dex_files, 0, "", *compiler_.get()); - ASSERT_TRUE(success_oat); - - // Force all system classes into memory - for (size_t i = 0; i < java_lang_dex_file_->NumClassDefs(); ++i) { - const DexFile::ClassDef& class_def = java_lang_dex_file_->GetClassDef(i); - const char* descriptor = java_lang_dex_file_->GetClassDescriptor(class_def); - Class* klass = class_linker_->FindSystemClass(descriptor); - EXPECT_TRUE(klass != NULL) << descriptor; + { + ScopedObjectAccess soa(Thread::Current()); + std::vector dex_files; + dex_files.push_back(java_lang_dex_file_); + bool success_oat = OatWriter::Create(tmp_oat.GetFile(), NULL, dex_files, 0, "", *compiler_.get()); + ASSERT_TRUE(success_oat); + + // Force all system classes into memory + for (size_t i = 0; i < java_lang_dex_file_->NumClassDefs(); ++i) { + const DexFile::ClassDef& class_def = java_lang_dex_file_->GetClassDef(i); + const char* descriptor = java_lang_dex_file_->GetClassDescriptor(class_def); + Class* klass = class_linker_->FindSystemClass(descriptor); + EXPECT_TRUE(klass != NULL) << descriptor; + } } - ImageWriter writer(NULL); ScratchFile tmp_image; const uintptr_t requested_image_base = 0x60000000; @@ -81,7 +83,15 @@ TEST_F(ImageTest, WriteRead) { image.append(tmp_image.GetFilename()); options.push_back(std::make_pair(image.c_str(), reinterpret_cast(NULL))); - runtime_.reset(Runtime::Create(options, false)); + if (!Runtime::Create(options, false)) { + LOG(FATAL) << "Failed to create runtime"; + return; + } + runtime_.reset(Runtime::Current()); + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more managable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + ScopedObjectAccess soa(Thread::Current()); ASSERT_TRUE(runtime_.get() != NULL); class_linker_ = runtime_->GetClassLinker(); diff --git a/src/image_writer.cc b/src/image_writer.cc index 59b7e80417..7c88c955f1 100644 --- a/src/image_writer.cc +++ b/src/image_writer.cc @@ -35,6 +35,7 @@ #include "object.h" #include "object_utils.h" #include "runtime.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "UniquePtr.h" #include "utils.h" @@ -71,9 +72,13 @@ bool ImageWriter::Write(const std::string& image_filename, } class_linker->RegisterOatFile(*oat_file_); - PruneNonImageClasses(); // Remove junk - ComputeLazyFieldsForImageClasses(); // Add useful information - ComputeEagerResolvedStrings(); + { + Thread::Current()->TransitionFromSuspendedToRunnable(); + PruneNonImageClasses(); // Remove junk + ComputeLazyFieldsForImageClasses(); // Add useful information + ComputeEagerResolvedStrings(); + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + } heap->CollectGarbage(false); // Remove garbage // Trim size of alloc spaces // TODO: C++0x auto @@ -90,9 +95,13 @@ bool ImageWriter::Write(const std::string& image_filename, CheckNonImageClassesRemoved(); #endif heap->DisableCardMarking(); - CalculateNewObjectOffsets(); - CopyAndFixupObjects(); - PatchOatCodeAndMethods(compiler); + { + Thread::Current()->TransitionFromSuspendedToRunnable(); + CalculateNewObjectOffsets(); + CopyAndFixupObjects(); + PatchOatCodeAndMethods(compiler); + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + } UniquePtr file(OS::OpenFile(image_filename.c_str(), true)); if (file.get() == NULL) { @@ -145,7 +154,7 @@ bool ImageWriter::AllocMemory() { void ImageWriter::ComputeLazyFieldsForImageClasses() { Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); - class_linker->VisitClasses(ComputeLazyFieldsForClassesVisitor, NULL); + class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL); } bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) { @@ -178,6 +187,7 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) { void ImageWriter::ComputeEagerResolvedStrings() { // TODO: Check image spaces only? + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); Runtime::Current()->GetHeap()->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this); } @@ -258,6 +268,7 @@ void ImageWriter::CheckNonImageClassesRemoved() { return; } + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); Runtime::Current()->GetHeap()->GetLiveBitmap()->Walk(CheckNonImageClassesRemovedCallback, this); } @@ -392,6 +403,7 @@ void ImageWriter::CopyAndFixupObjects() { // TODO: heap validation can't handle this fix up pass heap->DisableObjectValidation(); // TODO: Image spaces only? + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); heap->GetLiveBitmap()->Walk(CopyAndFixupObjectsCallback, this); } @@ -568,38 +580,43 @@ void ImageWriter::FixupFields(const Object* orig, } } -static Method* GetReferrerMethod(const Compiler::PatchInformation* patch) { +static Method* GetReferrerMethod(const Compiler::PatchInformation* patch) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ScopedObjectAccessUnchecked soa(Thread::Current()); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile()); Method* method = class_linker->ResolveMethod(patch->GetDexFile(), patch->GetReferrerMethodIdx(), - patch->GetDexCache(), + dex_cache, NULL, patch->GetReferrerIsDirect()); CHECK(method != NULL) << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx(); CHECK(!method->IsRuntimeMethod()) << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx(); - CHECK(patch->GetDexCache()->GetResolvedMethods()->Get(patch->GetReferrerMethodIdx()) == method) + CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetReferrerMethodIdx()) == method) << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " " - << PrettyMethod(patch->GetDexCache()->GetResolvedMethods()->Get(patch->GetReferrerMethodIdx())) << " " + << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetReferrerMethodIdx())) << " " << PrettyMethod(method); return method; } -static Method* GetTargetMethod(const Compiler::PatchInformation* patch) { +static Method* GetTargetMethod(const Compiler::PatchInformation* patch) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile()); Method* method = class_linker->ResolveMethod(patch->GetDexFile(), patch->GetTargetMethodIdx(), - patch->GetDexCache(), + dex_cache, NULL, patch->GetTargetIsDirect()); CHECK(method != NULL) << patch->GetDexFile().GetLocation() << " " << patch->GetTargetMethodIdx(); CHECK(!method->IsRuntimeMethod()) << patch->GetDexFile().GetLocation() << " " << patch->GetTargetMethodIdx(); - CHECK(patch->GetDexCache()->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method) + CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method) << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " " - << PrettyMethod(patch->GetDexCache()->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " " + << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " " << PrettyMethod(method); return method; } diff --git a/src/image_writer.h b/src/image_writer.h index 07d55dc42b..f768d8707d 100644 --- a/src/image_writer.h +++ b/src/image_writer.h @@ -39,7 +39,8 @@ namespace art { class ImageWriter { public: explicit ImageWriter(const std::set* image_classes) - : image_end_(0), image_begin_(NULL), image_classes_(image_classes), oat_begin_(NULL) {} + : oat_file_(NULL), image_end_(0), image_begin_(NULL), image_classes_(image_classes), + oat_begin_(NULL) {} ~ImageWriter() {} @@ -47,13 +48,15 @@ class ImageWriter { uintptr_t image_begin, const std::string& oat_filename, const std::string& oat_location, - const Compiler& compiler); + const Compiler& compiler) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); private: bool AllocMemory(); // we use the lock word to store the offset of the object in the image - void AssignImageOffset(Object* object) { + void AssignImageOffset(Object* object) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(object != NULL); SetImageOffset(object, image_end_); image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment @@ -105,38 +108,55 @@ class ImageWriter { return oat_begin_ + offset; } - bool IsImageClass(const Class* klass); + bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void DumpImageClasses(); - void ComputeLazyFieldsForImageClasses(); - static bool ComputeLazyFieldsForClassesVisitor(Class* klass, void* arg); + void ComputeLazyFieldsForImageClasses() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static bool ComputeLazyFieldsForClassesVisitor(Class* klass, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Wire dex cache resolved strings to strings in the image to avoid runtime resolution void ComputeEagerResolvedStrings(); - static void ComputeEagerResolvedStringsCallback(Object* obj, void* arg); + static void ComputeEagerResolvedStringsCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void PruneNonImageClasses(); - static bool NonImageClassesVisitor(Class* c, void* arg); + void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static bool NonImageClassesVisitor(Class* c, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void CheckNonImageClassesRemoved(); - static void CheckNonImageClassesRemovedCallback(Object* obj, void* arg); + static void CheckNonImageClassesRemovedCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void CalculateNewObjectOffsets(); - ObjectArray* CreateImageRoots() const; - static void CalculateNewObjectOffsetsCallback(Object* obj, void* arg); + void CalculateNewObjectOffsets() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + ObjectArray* CreateImageRoots() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void CalculateNewObjectOffsetsCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void CopyAndFixupObjects(); - static void CopyAndFixupObjectsCallback(Object* obj, void* arg); - void FixupClass(const Class* orig, Class* copy); - void FixupMethod(const Method* orig, Method* copy); - void FixupObject(const Object* orig, Object* copy); - void FixupObjectArray(const ObjectArray* orig, ObjectArray* copy); - void FixupInstanceFields(const Object* orig, Object* copy); - void FixupStaticFields(const Class* orig, Class* copy); - void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static); - - void PatchOatCodeAndMethods(const Compiler& compiler); - void SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value); + static void CopyAndFixupObjectsCallback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupClass(const Class* orig, Class* copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupMethod(const Method* orig, Method* copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupObject(const Object* orig, Object* copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupObjectArray(const ObjectArray* orig, ObjectArray* copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupInstanceFields(const Object* orig, Object* copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupStaticFields(const Class* orig, Class* copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + void PatchOatCodeAndMethods(const Compiler& compiler) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); SafeMap offsets_; diff --git a/src/indirect_reference_table.cc b/src/indirect_reference_table.cc index 81b87ef01a..958531df56 100644 --- a/src/indirect_reference_table.cc +++ b/src/indirect_reference_table.cc @@ -18,6 +18,7 @@ #include "jni_internal.h" #include "reference_table.h" #include "runtime.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "utils.h" @@ -91,7 +92,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) { if (topIndex == max_entries_) { LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow " << "(max=" << max_entries_ << ")\n" - << Dumpable(*this); + << MutatorLockedDumpable(*this); } size_t newSize = alloc_entries_ * 2; @@ -101,13 +102,14 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) { DCHECK_GT(newSize, alloc_entries_); table_ = reinterpret_cast(realloc(table_, newSize * sizeof(const Object*))); - slot_data_ = reinterpret_cast(realloc(slot_data_, newSize * sizeof(IndirectRefSlot))); + slot_data_ = reinterpret_cast(realloc(slot_data_, + newSize * sizeof(IndirectRefSlot))); if (table_ == NULL || slot_data_ == NULL) { LOG(FATAL) << "JNI ERROR (app bug): unable to expand " << kind_ << " table (from " << alloc_entries_ << " to " << newSize << ", max=" << max_entries_ << ")\n" - << Dumpable(*this); + << MutatorLockedDumpable(*this); } // Clear the newly-allocated slot_data_ elements. @@ -150,9 +152,10 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) { } void IndirectReferenceTable::AssertEmpty() { - if (begin() != end()) { + if (UNLIKELY(begin() != end())) { + ScopedObjectAccess soa(Thread::Current()); LOG(FATAL) << "Internal Error: non-empty local reference table\n" - << Dumpable(*this); + << MutatorLockedDumpable(*this); } } diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h index 710e43f980..c3e17b0f89 100644 --- a/src/indirect_reference_table.h +++ b/src/indirect_reference_table.h @@ -257,7 +257,8 @@ class IndirectReferenceTable { * Returns NULL if the table is full (max entries reached, or alloc * failed during expansion). */ - IndirectRef Add(uint32_t cookie, const Object* obj); + IndirectRef Add(uint32_t cookie, const Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Given an IndirectRef in the table, return the Object it refers to. @@ -287,7 +288,7 @@ class IndirectReferenceTable { void AssertEmpty(); - void Dump(std::ostream& os) const; + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Return the #of entries in the entire table. This includes holes, and diff --git a/src/indirect_reference_table_test.cc b/src/indirect_reference_table_test.cc index 387a2cd38b..1698f18612 100644 --- a/src/indirect_reference_table_test.cc +++ b/src/indirect_reference_table_test.cc @@ -24,6 +24,7 @@ class IndirectReferenceTableTest : public CommonTest { }; TEST_F(IndirectReferenceTableTest, BasicTest) { + ScopedObjectAccess soa(Thread::Current()); static const size_t kTableInitial = 10; static const size_t kTableMax = 20; IndirectReferenceTable irt(kTableInitial, kTableMax, kGlobal); diff --git a/src/intern_table.h b/src/intern_table.h index 04c75d05ff..0d9e0971ab 100644 --- a/src/intern_table.h +++ b/src/intern_table.h @@ -41,24 +41,27 @@ class InternTable { InternTable(); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(int32_t utf16_length, const char* utf8_data); + String* InternStrong(int32_t utf16_length, const char* utf8_data) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(const char* utf8_data); + String* InternStrong(const char* utf8_data) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(String* s); + String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Interns a potentially new string in the 'weak' table. (See above.) - String* InternWeak(String* s); + String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Register a String trusting that it is safe to intern. // Used when reinitializing InternTable from an image. - void RegisterStrong(String* s); + void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg); + void SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); - bool ContainsWeak(String* s); + bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t Size() const; @@ -69,9 +72,11 @@ class InternTable { private: typedef std::multimap Table; - String* Insert(String* s, bool is_strong); + String* Insert(String* s, bool is_strong) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - String* Lookup(Table& table, String* s, uint32_t hash_code); + String* Lookup(Table& table, String* s, uint32_t hash_code) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); String* Insert(Table& table, String* s, uint32_t hash_code); void Remove(Table& table, const String* s, uint32_t hash_code); diff --git a/src/intern_table_test.cc b/src/intern_table_test.cc index 52531cee32..0d46076382 100644 --- a/src/intern_table_test.cc +++ b/src/intern_table_test.cc @@ -24,6 +24,7 @@ namespace art { class InternTableTest : public CommonTest {}; TEST_F(InternTableTest, Intern) { + ScopedObjectAccess soa(Thread::Current()); InternTable intern_table; SirtRef foo_1(intern_table.InternStrong(3, "foo")); SirtRef foo_2(intern_table.InternStrong(3, "foo")); @@ -41,6 +42,7 @@ TEST_F(InternTableTest, Intern) { } TEST_F(InternTableTest, Size) { + ScopedObjectAccess soa(Thread::Current()); InternTable t; EXPECT_EQ(0U, t.Size()); t.InternStrong(3, "foo"); @@ -84,6 +86,7 @@ bool IsMarked(const Object* object, void* arg) { } TEST_F(InternTableTest, SweepInternTableWeaks) { + ScopedObjectAccess soa(Thread::Current()); InternTable t; t.InternStrong(3, "foo"); t.InternStrong(3, "bar"); @@ -98,7 +101,10 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { TestPredicate p; p.Expect(s0.get()); p.Expect(s1.get()); - t.SweepInternTableWeaks(IsMarked, &p); + { + ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + t.SweepInternTableWeaks(IsMarked, &p); + } EXPECT_EQ(2U, t.Size()); @@ -109,6 +115,7 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { } TEST_F(InternTableTest, ContainsWeak) { + ScopedObjectAccess soa(Thread::Current()); { // Strongs are never weak. InternTable t; diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h index 8534c8e5de..b80033c434 100644 --- a/src/jdwp/jdwp.h +++ b/src/jdwp/jdwp.h @@ -78,7 +78,8 @@ struct JdwpLocation { MethodId method_id; uint64_t dex_pc; }; -std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs); +std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs); bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs); @@ -118,7 +119,8 @@ struct JdwpState { * * Returns a newly-allocated JdwpState struct on success, or NULL on failure. */ - static JdwpState* Create(const JdwpOptions* options) NO_THREAD_SAFETY_ANALYSIS; // TODO: make GCC understand. + static JdwpState* Create(const JdwpOptions* options) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); ~JdwpState(); @@ -178,7 +180,7 @@ struct JdwpState { * The VM has finished initializing. Only called when the debugger is * connected at the time initialization completes. */ - bool PostVMStart(); + bool PostVMStart() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * A location of interest has been reached. This is used for breakpoints, @@ -190,24 +192,30 @@ struct JdwpState { * * "eventFlags" indicates the types of events that have occurred. */ - bool PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, int eventFlags); + bool PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, int eventFlags) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * An exception has been thrown. * * Pass in a zeroed-out "*pCatchLoc" if the exception wasn't caught. */ - bool PostException(const JdwpLocation* pThrowLoc, ObjectId excepId, RefTypeId excepClassId, const JdwpLocation* pCatchLoc, ObjectId thisPtr); + bool PostException(const JdwpLocation* pThrowLoc, ObjectId excepId, RefTypeId excepClassId, + const JdwpLocation* pCatchLoc, ObjectId thisPtr) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * A thread has started or stopped. */ - bool PostThreadChange(ObjectId threadId, bool start); + bool PostThreadChange(ObjectId threadId, bool start) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Class has been prepared. */ - bool PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature, int status); + bool PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature, + int status) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * The VM is about to stop. @@ -215,12 +223,13 @@ struct JdwpState { bool PostVMDeath(); // Called if/when we realize we're talking to DDMS. - void NotifyDdmsActive(); + void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Send up a chunk of DDM data. */ - void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count); + void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Process a request from the debugger. @@ -237,13 +246,17 @@ struct JdwpState { */ bool SendRequest(ExpandBuf* pReq); - void ResetState(); + void ResetState() + LOCKS_EXCLUDED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* atomic ops to get next serial number */ uint32_t NextRequestSerial(); uint32_t NextEventSerial(); - void Run(); + void Run() + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_suspend_count_lock_); /* * Register an event by adding it to the event list. @@ -251,44 +264,60 @@ struct JdwpState { * "*pEvent" must be storage allocated with jdwpEventAlloc(). The caller * may discard its pointer after calling this. */ - JdwpError RegisterEvent(JdwpEvent* pEvent); + JdwpError RegisterEvent(JdwpEvent* pEvent) + LOCKS_EXCLUDED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Unregister an event, given the requestId. */ - void UnregisterEventById(uint32_t requestId); + void UnregisterEventById(uint32_t requestId) + LOCKS_EXCLUDED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Unregister all events. */ - void UnregisterAll(); + void UnregisterAll() + LOCKS_EXCLUDED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: explicit JdwpState(const JdwpOptions* options); bool InvokeInProgress(); bool IsConnected(); - void SuspendByPolicy(JdwpSuspendPolicy suspend_policy); + void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy, + ObjectId threadId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void CleanupMatchList(JdwpEvent** match_list, - int match_count) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_); + int match_count) + EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void EventFinish(ExpandBuf* pReq); void FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** match_list, - int* pMatchCount) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_); - void UnregisterEvent(JdwpEvent* pEvent) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_); + int* pMatchCount) + EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void UnregisterEvent(JdwpEvent* pEvent) + EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); public: // TODO: fix privacy const JdwpOptions* options_; private: /* wait for creation of the JDWP thread */ - Mutex thread_start_lock_; - ConditionVariable thread_start_cond_; + Mutex thread_start_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + ConditionVariable thread_start_cond_ GUARDED_BY(thread_start_lock_); pthread_t pthread_; Thread* thread_; - volatile int32_t debug_thread_started_; + volatile int32_t debug_thread_started_ GUARDED_BY(thread_start_lock_); ObjectId debug_thread_id_; private: @@ -300,14 +329,14 @@ struct JdwpState { private: // For wait-for-debugger. - Mutex attach_lock_; - ConditionVariable attach_cond_; + Mutex attach_lock_ ACQUIRED_AFTER(thread_start_lock_); + ConditionVariable attach_cond_ GUARDED_BY(attach_lock_); // Time of last debugger activity, in milliseconds. int64_t last_activity_time_ms_; // Global counters and a mutex to protect them. - Mutex serial_lock_; + Mutex serial_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; uint32_t request_serial_ GUARDED_BY(serial_lock_); uint32_t event_serial_ GUARDED_BY(serial_lock_); @@ -318,8 +347,8 @@ struct JdwpState { // Used to synchronize suspension of the event thread (to avoid receiving "resume" // events before the thread has finished suspending itself). - Mutex event_thread_lock_; - ConditionVariable event_thread_cond_; + Mutex event_thread_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + ConditionVariable event_thread_cond_ GUARDED_BY(event_thread_lock_); ObjectId event_thread_id_; bool ddm_is_active_; diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc index 891ba537f9..0e7bb99f76 100644 --- a/src/jdwp/jdwp_event.cc +++ b/src/jdwp/jdwp_event.cc @@ -374,7 +374,8 @@ static bool PatternMatch(const char* pattern, const std::string& target) { * If we find a Count mod before rejecting an event, we decrement it. We * need to do this even if later mods cause us to ignore the event. */ -static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket) { +static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { JdwpEventMod* pMod = pEvent->mods; for (int i = pEvent->modCount; i > 0; i--, pMod++) { @@ -452,7 +453,8 @@ static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket) { * DO NOT call this multiple times for the same eventKind, as Count mods are * decremented during the scan. */ -void JdwpState::FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** match_list, int* pMatchCount) { +void JdwpState::FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, + JdwpEvent** match_list, int* pMatchCount) { /* start after the existing entries */ match_list += *pMatchCount; @@ -490,7 +492,7 @@ static JdwpSuspendPolicy scanSuspendPolicy(JdwpEvent** match_list, int match_cou * SP_EVENT_THREAD - suspend ourselves * SP_ALL - suspend everybody except JDWP support thread */ -void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy) { +void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) { VLOG(jdwp) << "SuspendByPolicy(" << suspend_policy << ")"; if (suspend_policy == SP_NONE) { return; @@ -503,7 +505,7 @@ void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy) { } /* this is rare but possible -- see CLASS_PREPARE handling */ - if (Dbg::GetThreadSelfId() == debug_thread_id_) { + if (thread_self_id == debug_thread_id_) { LOG(INFO) << "NOTE: SuspendByPolicy not suspending JDWP thread"; return; } @@ -524,7 +526,7 @@ void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy) { } /* grab this before posting/suspending again */ - SetWaitForEventThread(Dbg::GetThreadSelfId()); + SetWaitForEventThread(thread_self_id); /* leave pReq->invoke_needed_ raised so we can check reentrancy */ Dbg::ExecuteMethod(pReq); @@ -540,6 +542,23 @@ void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy) { } } +void JdwpState::SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy, + ObjectId threadId) { + Thread* self = Thread::Current(); + self->AssertThreadSuspensionIsAllowable(); + /* send request and possibly suspend ourselves */ + if (pReq != NULL) { + JDWP::ObjectId thread_self_id = Dbg::GetThreadSelfId(); + self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); + if (suspend_policy != SP_NONE) { + SetWaitForEventThread(threadId); + } + EventFinish(pReq); + SuspendByPolicy(suspend_policy, thread_self_id); + self->TransitionFromSuspendedToRunnable(); + } +} + /* * Determine if there is a method invocation in progress in the current * thread. @@ -670,17 +689,7 @@ bool JdwpState::PostVMStart() { } /* send request and possibly suspend ourselves */ - if (pReq != NULL) { - int old_state = Dbg::ThreadWaiting(); - if (suspend_policy != SP_NONE) { - SetWaitForEventThread(threadId); - } - - EventFinish(pReq); - - SuspendByPolicy(suspend_policy); - Dbg::ThreadContinuing(old_state); - } + SendRequestAndPossiblySuspend(pReq, suspend_policy, threadId); return true; } @@ -787,18 +796,7 @@ bool JdwpState::PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, in CleanupMatchList(match_list, match_count); } - /* send request and possibly suspend ourselves */ - if (pReq != NULL) { - int old_state = Dbg::ThreadWaiting(); - if (suspend_policy != SP_NONE) { - SetWaitForEventThread(basket.threadId); - } - - EventFinish(pReq); - - SuspendByPolicy(suspend_policy); - Dbg::ThreadContinuing(old_state); - } + SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId); return match_count != 0; } @@ -859,17 +857,7 @@ bool JdwpState::PostThreadChange(ObjectId threadId, bool start) { CleanupMatchList(match_list, match_count); } - /* send request and possibly suspend ourselves */ - if (pReq != NULL) { - int old_state = Dbg::ThreadWaiting(); - if (suspend_policy != SP_NONE) { - SetWaitForEventThread(basket.threadId); - } - EventFinish(pReq); - - SuspendByPolicy(suspend_policy); - Dbg::ThreadContinuing(old_state); - } + SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId); return match_count != 0; } @@ -968,18 +956,7 @@ bool JdwpState::PostException(const JdwpLocation* pThrowLoc, CleanupMatchList(match_list, match_count); } - /* send request and possibly suspend ourselves */ - if (pReq != NULL) { - int old_state = Dbg::ThreadWaiting(); - if (suspend_policy != SP_NONE) { - SetWaitForEventThread(basket.threadId); - } - - EventFinish(pReq); - - SuspendByPolicy(suspend_policy); - Dbg::ThreadContinuing(old_state); - } + SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId); return match_count != 0; } @@ -990,7 +967,8 @@ bool JdwpState::PostException(const JdwpLocation* pThrowLoc, * Valid mods: * Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude */ -bool JdwpState::PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature, int status) { +bool JdwpState::PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature, + int status) { ModBasket basket; memset(&basket, 0, sizeof(basket)); @@ -1049,17 +1027,7 @@ bool JdwpState::PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std CleanupMatchList(match_list, match_count); } - /* send request and possibly suspend ourselves */ - if (pReq != NULL) { - int old_state = Dbg::ThreadWaiting(); - if (suspend_policy != SP_NONE) { - SetWaitForEventThread(basket.threadId); - } - EventFinish(pReq); - - SuspendByPolicy(suspend_policy); - Dbg::ThreadContinuing(old_state); - } + SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId); return match_count != 0; } @@ -1105,9 +1073,10 @@ void JdwpState::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) { /* * Make sure we're in VMWAIT in case the write blocks. */ - int old_state = Dbg::ThreadWaiting(); + Thread* self = Thread::Current(); + self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); (*transport_->sendBufferedRequest)(this, wrapiov, iov_count + 1); - Dbg::ThreadContinuing(old_state); + self->TransitionFromSuspendedToRunnable(); } } // namespace JDWP diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc index 36fbaf1c54..fbe9192ee2 100644 --- a/src/jdwp/jdwp_handler.cc +++ b/src/jdwp/jdwp_handler.cc @@ -91,13 +91,16 @@ static void JdwpWriteValue(ExpandBuf* pReply, int width, uint64_t value) { */ static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, ObjectId thread_id, ObjectId object_id, - RefTypeId class_id, MethodId method_id, bool is_constructor) { + RefTypeId class_id, MethodId method_id, bool is_constructor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(!is_constructor || object_id != 0); uint32_t arg_count = Read4BE(&buf); VLOG(jdwp) << StringPrintf(" --> thread_id=%#llx object_id=%#llx", thread_id, object_id); - VLOG(jdwp) << StringPrintf(" class_id=%#llx method_id=%x %s.%s", class_id, method_id, Dbg::GetClassName(class_id).c_str(), Dbg::GetMethodName(class_id, method_id).c_str()); + VLOG(jdwp) << StringPrintf(" class_id=%#llx method_id=%x %s.%s", class_id, + method_id, Dbg::GetClassName(class_id).c_str(), + Dbg::GetMethodName(class_id, method_id).c_str()); VLOG(jdwp) << StringPrintf(" %d args:", arg_count); UniquePtr argTypes(arg_count > 0 ? new JdwpTag[arg_count] : NULL); @@ -110,7 +113,9 @@ static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR } uint32_t options = Read4BE(&buf); /* enum InvokeOptions bit flags */ - VLOG(jdwp) << StringPrintf(" options=0x%04x%s%s", options, (options & INVOKE_SINGLE_THREADED) ? " (SINGLE_THREADED)" : "", (options & INVOKE_NONVIRTUAL) ? " (NONVIRTUAL)" : ""); + VLOG(jdwp) << StringPrintf(" options=0x%04x%s%s", options, + (options & INVOKE_SINGLE_THREADED) ? " (SINGLE_THREADED)" : "", + (options & INVOKE_NONVIRTUAL) ? " (NONVIRTUAL)" : ""); JdwpTag resultTag; uint64_t resultValue; @@ -155,7 +160,8 @@ static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Request for version info. */ -static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { /* text information on runtime version */ std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion())); expandBufAddUtf8String(pReply, version); @@ -175,7 +181,8 @@ static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) * referenceTypeID. We need to send back more than one if the class has * been loaded by multiple class loaders. */ -static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string classDescriptor(ReadNewUtf8String(&buf)); VLOG(jdwp) << " Req for class by signature '" << classDescriptor << "'"; @@ -207,7 +214,8 @@ static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, Expa * We exclude ourselves from the list, because we don't allow ourselves * to be suspended, and that violates some JDWP expectations. */ -static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::vector thread_ids; Dbg::GetThreads(0, thread_ids); @@ -222,7 +230,8 @@ static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pRepl /* * List all thread groups that do not have a parent. */ -static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { /* * TODO: maintain a list of parentless thread groups in the VM. * @@ -244,7 +253,8 @@ static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, Expand * * All IDs are 8 bytes. */ -static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { expandBufAdd4BE(pReply, sizeof(FieldId)); expandBufAdd4BE(pReply, sizeof(MethodId)); expandBufAdd4BE(pReply, sizeof(ObjectId)); @@ -253,7 +263,8 @@ static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) return ERR_NONE; } -static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Dbg::Disposed(); return ERR_NONE; } @@ -264,7 +275,8 @@ static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) { * * This needs to increment the "suspend count" on all threads. */ -static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Dbg::SuspendVM(); return ERR_NONE; } @@ -272,7 +284,8 @@ static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) { /* * Resume execution. Decrements the "suspend count" of all threads. */ -static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Dbg::ResumeVM(); return ERR_NONE; } @@ -280,7 +293,8 @@ static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) { /* * The debugger wants the entire VM to exit. */ -static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint32_t exitCode = Get4BE(buf); LOG(WARNING) << "Debugger is telling the VM to exit with code=" << exitCode; @@ -295,7 +309,8 @@ static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { * (Ctrl-Shift-I in Eclipse on an array of objects causes it to create the * string "java.util.Arrays".) */ -static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string str(ReadNewUtf8String(&buf)); VLOG(jdwp) << " Req to create string '" << str << "'"; ObjectId stringId = Dbg::CreateString(str); @@ -309,7 +324,8 @@ static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* /* * Tell the debugger what we are capable of. */ -static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { expandBufAdd1(pReply, false); /* canWatchFieldModification */ expandBufAdd1(pReply, false); /* canWatchFieldAccess */ expandBufAdd1(pReply, false); /* canGetBytecodes */ @@ -320,7 +336,8 @@ static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pRe return ERR_NONE; } -static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { expandBufAddUtf8String(pReply, "/"); std::vector class_path; @@ -345,14 +362,16 @@ static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pRepl * * Currently does nothing. */ -static JdwpError VM_DisposeObjects(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError VM_DisposeObjects(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return ERR_NONE; } /* * Tell the debugger what we are capable of. */ -static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { expandBufAdd1(pReply, false); /* canWatchFieldModification */ expandBufAdd1(pReply, false); /* canWatchFieldAccess */ expandBufAdd1(pReply, false); /* canGetBytecodes */ @@ -382,7 +401,8 @@ static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* return ERR_NONE; } -static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic) { +static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::vector classes; Dbg::GetClassList(classes); @@ -412,15 +432,18 @@ static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status return ERR_NONE; } -static JdwpError VM_AllClasses(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_AllClasses(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return VM_AllClassesImpl(pReply, true, false); } -static JdwpError VM_AllClassesWithGeneric(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) { +static JdwpError VM_AllClassesWithGeneric(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return VM_AllClassesImpl(pReply, true, true); } -static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); return Dbg::GetModifiers(refTypeId, pReply); } @@ -428,7 +451,8 @@ static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Get values from static fields in a reference type. */ -static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); uint32_t field_count = Read4BE(&buf); expandBufAdd4BE(pReply, field_count); @@ -445,7 +469,8 @@ static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Get the name of the source file in which a reference type was declared. */ -static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); std::string source_file; JdwpError status = Dbg::GetSourceFile(refTypeId, source_file); @@ -459,7 +484,8 @@ static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* p /* * Return the current status of the reference type. */ -static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); JDWP::JdwpTypeTag type_tag; uint32_t class_status; @@ -474,7 +500,8 @@ static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl /* * Return interfaces implemented directly by this class. */ -static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for interfaces in %#llx (%s)", refTypeId, Dbg::GetClassName(refTypeId).c_str()); return Dbg::OutputDeclaredInterfaces(refTypeId, pReply); @@ -483,7 +510,8 @@ static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* p /* * Return the class object corresponding to this type. */ -static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); ObjectId classObjectId; JdwpError status = Dbg::GetClassObject(refTypeId, classObjectId); @@ -500,12 +528,15 @@ static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* * * JDB seems interested, but DEX files don't currently support this. */ -static JdwpError RT_SourceDebugExtension(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError RT_SourceDebugExtension(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { /* referenceTypeId in, string out */ return ERR_ABSENT_INFORMATION; } -static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, bool with_generic) { +static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, + bool with_generic) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for signature of refTypeId=%#llx", refTypeId); @@ -522,11 +553,14 @@ static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR return ERR_NONE; } -static JdwpError RT_Signature(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError RT_Signature(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return RT_Signature(state, buf, dataLen, pReply, false); } -static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, + ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return RT_Signature(state, buf, dataLen, pReply, true); } @@ -534,12 +568,14 @@ static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, i * Return the instance of java.lang.ClassLoader that loaded the specified * reference type, or null if it was loaded by the system loader. */ -static JdwpError RT_ClassLoader(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_ClassLoader(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); return Dbg::GetClassLoader(refTypeId, pReply); } -static std::string Describe(const RefTypeId& refTypeId) { +static std::string Describe(const RefTypeId& refTypeId) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string signature("unknown"); Dbg::GetSignature(refTypeId, signature); return StringPrintf("refTypeId=%#llx (%s)", refTypeId, signature.c_str()); @@ -549,14 +585,16 @@ static std::string Describe(const RefTypeId& refTypeId) { * Given a referenceTypeId, return a block of stuff that describes the * fields declared by a class. */ -static JdwpError RT_FieldsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_FieldsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for fields in " << Describe(refTypeId); return Dbg::OutputDeclaredFields(refTypeId, true, pReply); } // Obsolete equivalent of FieldsWithGeneric, without the generic type information. -static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for fields in " << Describe(refTypeId); return Dbg::OutputDeclaredFields(refTypeId, false, pReply); @@ -566,14 +604,16 @@ static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Given a referenceTypeID, return a block of goodies describing the * methods declared by a class. */ -static JdwpError RT_MethodsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_MethodsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for methods in " << Describe(refTypeId); return Dbg::OutputDeclaredMethods(refTypeId, true, pReply); } // Obsolete equivalent of MethodsWithGeneric, without the generic type information. -static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for methods in " << Describe(refTypeId); return Dbg::OutputDeclaredMethods(refTypeId, false, pReply); @@ -582,7 +622,8 @@ static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRep /* * Return the immediate superclass of a class. */ -static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); RefTypeId superClassId; JdwpError status = Dbg::GetSuperclass(class_id, superClassId); @@ -596,7 +637,8 @@ static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* p /* * Set static class values. */ -static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) { +static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); uint32_t values = Read4BE(&buf); @@ -624,7 +666,9 @@ static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) * Example: Eclipse sometimes uses java/lang/Class.forName(String s) on * values in the "variables" display. */ -static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, + ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); ObjectId thread_id = ReadObjectId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -639,7 +683,9 @@ static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataL * Example: in IntelliJ, create a watch on "new String(myByteArray)" to * see the contents of a byte[] as a string. */ -static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLen, + ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); ObjectId thread_id = ReadObjectId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -659,7 +705,8 @@ static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLe /* * Create a new array object of the requested type and length. */ -static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId arrayTypeId = ReadRefTypeId(&buf); uint32_t length = Read4BE(&buf); @@ -680,7 +727,8 @@ static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* /* * Return line number information for the method, if present. */ -static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -691,11 +739,15 @@ static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRe return ERR_NONE; } -static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, bool generic) { +static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, + bool generic) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); MethodId method_id = ReadMethodId(&buf); - VLOG(jdwp) << StringPrintf(" Req for LocalVarTab in class=%s method=%s", Dbg::GetClassName(class_id).c_str(), Dbg::GetMethodName(class_id, method_id).c_str()); + VLOG(jdwp) << StringPrintf(" Req for LocalVarTab in class=%s method=%s", + Dbg::GetClassName(class_id).c_str(), + Dbg::GetMethodName(class_id, method_id).c_str()); // We could return ERR_ABSENT_INFORMATION here if the DEX file was built without local variable // information. That will cause Eclipse to make a best-effort attempt at displaying local @@ -705,11 +757,15 @@ static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* return ERR_NONE; } -static JdwpError M_VariableTable(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError M_VariableTable(JdwpState* state, const uint8_t* buf, int dataLen, + ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return M_VariableTable(state, buf, dataLen, pReply, false); } -static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, + ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return M_VariableTable(state, buf, dataLen, pReply, true); } @@ -720,7 +776,8 @@ static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf * This can get called on different things, e.g. thread_id gets * passed in here. */ -static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for type of object_id=%#llx", object_id); return Dbg::GetReferenceType(object_id, pReply); @@ -729,7 +786,8 @@ static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf /* * Get values from the fields of an object. */ -static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); uint32_t field_count = Read4BE(&buf); @@ -751,7 +809,8 @@ static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Set values in the fields of an object. */ -static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); uint32_t field_count = Read4BE(&buf); @@ -785,7 +844,9 @@ static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { * object), it will try to invoke the object's toString() function. This * feature becomes crucial when examining ArrayLists with Eclipse. */ -static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, + ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); ObjectId thread_id = ReadObjectId(&buf); RefTypeId class_id = ReadRefTypeId(&buf); @@ -797,7 +858,8 @@ static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataL /* * Disable garbage collection of the specified object. */ -static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // this is currently a no-op return ERR_NONE; } @@ -805,7 +867,8 @@ static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf /* * Enable garbage collection of the specified object. */ -static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) { +static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // this is currently a no-op return ERR_NONE; } @@ -813,7 +876,8 @@ static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf* /* * Determine whether an object has been garbage collected. */ -static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId object_id; object_id = ReadObjectId(&buf); @@ -828,7 +892,8 @@ static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* /* * Return the string value in a string object. */ -static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId stringObject = ReadObjectId(&buf); std::string str(Dbg::StringToUtf8(stringObject)); @@ -842,7 +907,8 @@ static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply /* * Return a thread's name. */ -static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for name of thread %#llx", thread_id); @@ -862,7 +928,8 @@ static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) * It's supposed to remain suspended even if interpreted code wants to * resume it; only the JDI is allowed to resume it. */ -static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (thread_id == Dbg::GetThreadSelfId()) { @@ -870,14 +937,18 @@ static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { return ERR_THREAD_NOT_SUSPENDED; } VLOG(jdwp) << StringPrintf(" Req to suspend thread %#llx", thread_id); - Dbg::SuspendThread(thread_id); - return ERR_NONE; + Thread* self = Thread::Current(); + self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); + JdwpError result = Dbg::SuspendThread(thread_id); + self->TransitionFromSuspendedToRunnable(); + return result; } /* * Resume the specified thread. */ -static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (thread_id == Dbg::GetThreadSelfId()) { @@ -892,7 +963,8 @@ static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { /* * Return status of specified thread. */ -static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for status of thread %#llx", thread_id); @@ -914,7 +986,8 @@ static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl /* * Return the thread group that the specified thread is a member of. */ -static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); return Dbg::GetThreadGroup(thread_id, pReply); } @@ -925,7 +998,8 @@ static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* * If the thread isn't suspended, the error code isn't defined, but should * be THREAD_NOT_SUSPENDED. */ -static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); uint32_t start_frame = Read4BE(&buf); uint32_t length = Read4BE(&buf); @@ -961,7 +1035,8 @@ static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl /* * Returns the #of frames on the specified thread, which must be suspended. */ -static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (!Dbg::ThreadExists(thread_id)) { @@ -984,7 +1059,8 @@ static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* p /* * Get the monitor that the thread is waiting on. */ -static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ReadObjectId(&buf); // thread_id // TODO: create an Object to represent the monitor (we're currently @@ -994,14 +1070,15 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, } /* - * Return the suspend count for the specified thread. + * Return the debug suspend count for the specified thread. * * (The thread *might* still be running -- it might not have examined * its suspend count recently.) */ -static JdwpError TR_SuspendCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TR_DebugSuspendCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); - return Dbg::GetThreadSuspendCount(thread_id, pReply); + return Dbg::GetThreadDebugSuspendCount(thread_id, pReply); } /* @@ -1009,7 +1086,8 @@ static JdwpError TR_SuspendCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* * * The Eclipse debugger recognizes "main" and "system" as special. */ -static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for name of thread_group_id=%#llx", thread_group_id); @@ -1022,7 +1100,8 @@ static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply * Returns the thread group -- if any -- that contains the specified * thread group. */ -static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); ObjectId parentGroup = Dbg::GetThreadGroupParent(thread_group_id); @@ -1035,7 +1114,8 @@ static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRep * Return the active threads and thread groups that are part of the * specified thread group. */ -static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for threads in thread_group_id=%#llx", thread_group_id); @@ -1059,7 +1139,8 @@ static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Return the #of components in the array. */ -static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for length of array %#llx", arrayId); @@ -1078,7 +1159,8 @@ static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl /* * Return the values from an array. */ -static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); uint32_t firstIndex = Read4BE(&buf); uint32_t length = Read4BE(&buf); @@ -1090,17 +1172,20 @@ static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Set values in an array. */ -static JdwpError AR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError AR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); uint32_t firstIndex = Read4BE(&buf); uint32_t values = Read4BE(&buf); - VLOG(jdwp) << StringPrintf(" Req to set array values %#llx first=%d count=%d", arrayId, firstIndex, values); + VLOG(jdwp) << StringPrintf(" Req to set array values %#llx first=%d count=%d", arrayId, + firstIndex, values); return Dbg::SetArrayElements(arrayId, firstIndex, values, buf); } -static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ReadObjectId(&buf); // classLoaderObject // TODO: we should only return classes which have the given class loader as a defining or // initiating loader. The former would be easy; the latter is hard, because we don't have @@ -1113,7 +1198,8 @@ static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandB * * Reply with a requestID. */ -static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const uint8_t* origBuf = buf; uint8_t eventKind = Read1(&buf); @@ -1282,7 +1368,8 @@ static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, Expan * Clear an event. Failure to find an event with a matching ID is a no-op * and does not return an error. */ -static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint8_t eventKind; eventKind = Read1(&buf); uint32_t requestId = Read4BE(&buf); @@ -1297,7 +1384,8 @@ static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) /* * Return the values of arguments and local variables. */ -static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); uint32_t slots = Read4BE(&buf); @@ -1322,7 +1410,8 @@ static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR /* * Set the values of arguments and local variables. */ -static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { +static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); uint32_t slots = Read4BE(&buf); @@ -1345,7 +1434,8 @@ static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) { /* * Returns the value of "this" for the specified frame. */ -static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); @@ -1361,7 +1451,8 @@ static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* p return rc; } - VLOG(jdwp) << StringPrintf(" Req for 'this' in thread_id=%#llx frame=%lld --> %#llx '%c'", thread_id, frame_id, id, static_cast(tag)); + VLOG(jdwp) << StringPrintf(" Req for 'this' in thread_id=%#llx frame=%lld --> %#llx '%c'", + thread_id, frame_id, id, static_cast(tag)); expandBufAdd1(pReply, tag); expandBufAddObjectId(pReply, id); @@ -1375,16 +1466,19 @@ static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * reused, whereas ClassIds can be recycled like any other object. (Either * that, or I have no idea what this is for.) */ -static JdwpError COR_ReflectedType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) { +static JdwpError COR_ReflectedType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { RefTypeId classObjectId = ReadRefTypeId(&buf); - VLOG(jdwp) << StringPrintf(" Req for refTypeId for class=%#llx (%s)", classObjectId, Dbg::GetClassName(classObjectId).c_str()); + VLOG(jdwp) << StringPrintf(" Req for refTypeId for class=%#llx (%s)", classObjectId, + Dbg::GetClassName(classObjectId).c_str()); return Dbg::GetReflectedType(classObjectId, pReply); } /* * Handle a DDM packet with a single chunk in it. */ -static JdwpError DDM_Chunk(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) { +static JdwpError DDM_Chunk(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint8_t* replyBuf = NULL; int replyLen = -1; @@ -1518,7 +1612,7 @@ static const JdwpHandlerMap gHandlerMap[] = { { 11, 9, TR_CurrentContendedMonitor, "ThreadReference.CurrentContendedMonitor" }, { 11, 10, NULL, "ThreadReference.Stop" }, { 11, 11, NULL, "ThreadReference.Interrupt" }, - { 11, 12, TR_SuspendCount, "ThreadReference.SuspendCount" }, + { 11, 12, TR_DebugSuspendCount, "ThreadReference.SuspendCount" }, { 11, 13, NULL, "ThreadReference.OwnedMonitorsStackDepthInfo" }, { 11, 14, NULL, "ThreadReference.ForceEarlyReturn" }, @@ -1612,7 +1706,8 @@ void JdwpState::ProcessRequest(const JdwpReqHeader* pHeader, const uint8_t* buf, * Tell the VM that we're running and shouldn't be interrupted by GC. * Do this after anything that can stall indefinitely. */ - Dbg::ThreadRunning(); + Thread* self = Thread::Current(); + ThreadState old_state = self->TransitionFromSuspendedToRunnable(); expandBufAddSpace(pReply, kJDWPHeaderLen); @@ -1660,7 +1755,8 @@ void JdwpState::ProcessRequest(const JdwpReqHeader* pHeader, const uint8_t* buf, } /* tell the VM that GC is okay again */ - Dbg::ThreadWaiting(); + self->TransitionFromRunnableToSuspended(old_state); + } } // namespace JDWP diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc index dfe83ff513..beec7afc84 100644 --- a/src/jdwp/jdwp_main.cc +++ b/src/jdwp/jdwp_main.cc @@ -22,6 +22,7 @@ #include "debugger.h" #include "jdwp/jdwp_priv.h" #include "logging.h" +#include "scoped_thread_state_change.h" #include #include @@ -88,6 +89,8 @@ JdwpState::JdwpState(const JdwpOptions* options) : options_(options), thread_start_lock_("JDWP thread start lock"), thread_start_cond_("JDWP thread start condition variable"), + pthread_(0), + thread_(NULL), debug_thread_started_(false), debug_thread_id_(0), run(false), @@ -115,6 +118,7 @@ JdwpState::JdwpState(const JdwpOptions* options) * the thread is accepting network connections. */ JdwpState* JdwpState::Create(const JdwpOptions* options) { + GlobalSynchronization::mutator_lock_->AssertNotHeld(); UniquePtr state(new JdwpState(options)); switch (options->transport) { case kJdwpTransportSocket: @@ -139,53 +143,62 @@ JdwpState* JdwpState::Create(const JdwpOptions* options) { * Grab a mutex or two before starting the thread. This ensures they * won't signal the cond var before we're waiting. */ - state->thread_start_lock_.Lock(); - const bool should_suspend = options->suspend; - if (should_suspend) { - state->attach_lock_.Lock(); - } - - /* - * We have bound to a port, or are trying to connect outbound to a - * debugger. Create the JDWP thread and let it continue the mission. - */ - CHECK_PTHREAD_CALL(pthread_create, (&state->pthread_, NULL, StartJdwpThread, state.get()), "JDWP thread"); - - /* - * Wait until the thread finishes basic initialization. - * TODO: cond vars should be waited upon in a loop - */ - state->thread_start_cond_.Wait(state->thread_start_lock_); - state->thread_start_lock_.Unlock(); + { + MutexLock thread_start_locker(state->thread_start_lock_); + const bool should_suspend = options->suspend; + if (!should_suspend) { + /* + * We have bound to a port, or are trying to connect outbound to a + * debugger. Create the JDWP thread and let it continue the mission. + */ + CHECK_PTHREAD_CALL(pthread_create, (&state->pthread_, NULL, StartJdwpThread, state.get()), "JDWP thread"); - /* - * For suspend=y, wait for the debugger to connect to us or for us to - * connect to the debugger. - * - * The JDWP thread will signal us when it connects successfully or - * times out (for timeout=xxx), so we have to check to see what happened - * when we wake up. - */ - if (should_suspend) { - { - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); + /* + * Wait until the thread finishes basic initialization. + * TODO: cond vars should be waited upon in a loop + */ + state->thread_start_cond_.Wait(state->thread_start_lock_); + } else { + { + MutexLock attach_locker(state->attach_lock_); + /* + * We have bound to a port, or are trying to connect outbound to a + * debugger. Create the JDWP thread and let it continue the mission. + */ + CHECK_PTHREAD_CALL(pthread_create, (&state->pthread_, NULL, StartJdwpThread, state.get()), "JDWP thread"); + + /* + * Wait until the thread finishes basic initialization. + * TODO: cond vars should be waited upon in a loop + */ + state->thread_start_cond_.Wait(state->thread_start_lock_); + + /* + * For suspend=y, wait for the debugger to connect to us or for us to + * connect to the debugger. + * + * The JDWP thread will signal us when it connects successfully or + * times out (for timeout=xxx), so we have to check to see what happened + * when we wake up. + */ + { + ScopedThreadStateChange tsc(Thread::Current(), kWaitingForDebuggerToAttach); + state->attach_cond_.Wait(state->attach_lock_); + } + } + if (!state->IsActive()) { + LOG(ERROR) << "JDWP connection failed"; + return NULL; + } - state->attach_cond_.Wait(state->attach_lock_); - state->attach_lock_.Unlock(); - } + LOG(INFO) << "JDWP connected"; - if (!state->IsActive()) { - LOG(ERROR) << "JDWP connection failed"; - return NULL; + /* + * Ordinarily we would pause briefly to allow the debugger to set + * breakpoints and so on, but for "suspend=y" the VM init code will + * pause the VM when it sends the VM_START message. + */ } - - LOG(INFO) << "JDWP connected"; - - /* - * Ordinarily we would pause briefly to allow the debugger to set - * breakpoints and so on, but for "suspend=y" the VM init code will - * pause the VM when it sends the VM_START message. - */ } return state.release(); @@ -280,14 +293,18 @@ void JdwpState::Run() { */ thread_ = Thread::Current(); run = true; - android_atomic_release_store(true, &debug_thread_started_); thread_start_lock_.Lock(); + debug_thread_started_ = true; thread_start_cond_.Broadcast(); thread_start_lock_.Unlock(); - /* set the thread state to VMWAIT so GCs don't wait for us */ - Dbg::ThreadWaiting(); + /* set the thread state to kWaitingInMainDebuggerLoop so GCs don't wait for us */ + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(thread_->GetState(), kNative); + thread_->SetState(kWaitingInMainDebuggerLoop); + } /* * Loop forever if we're in server mode, processing connections. In @@ -327,10 +344,10 @@ void JdwpState::Run() { /* process requests until the debugger drops */ bool first = true; while (!Dbg::IsDisposed()) { - // sanity check -- shouldn't happen? - if (Thread::Current()->GetState() != kVmWait) { - LOG(ERROR) << "JDWP thread no longer in VMWAIT (now " << Thread::Current()->GetState() << "); resetting"; - Dbg::ThreadWaiting(); + { + // sanity check -- shouldn't happen? + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop); } if (!(*transport_->processIncoming)(this)) { @@ -343,7 +360,10 @@ void JdwpState::Run() { first = false; /* set thread ID; requires object registry to be active */ - debug_thread_id_ = Dbg::GetThreadSelfId(); + { + ScopedObjectAccess soa(thread_); + debug_thread_id_ = Dbg::GetThreadSelfId(); + } /* wake anybody who's waiting for us */ MutexLock mu(attach_lock_); @@ -357,14 +377,16 @@ void JdwpState::Run() { ddm_is_active_ = false; /* broadcast the disconnect; must be in RUNNING state */ - Dbg::ThreadRunning(); + thread_->TransitionFromSuspendedToRunnable(); Dbg::DdmDisconnected(); - Dbg::ThreadWaiting(); + thread_->TransitionFromRunnableToSuspended(kWaitingInMainDebuggerLoop); } /* release session state, e.g. remove breakpoint instructions */ - ResetState(); - + { + ScopedObjectAccess soa(thread_); + ResetState(); + } /* tell the interpreter that the debugger is no longer around */ Dbg::Disconnected(); @@ -377,8 +399,12 @@ void JdwpState::Run() { } } - /* back to running, for thread shutdown */ - Dbg::ThreadRunning(); + /* back to native, for thread shutdown */ + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop); + thread_->SetState(kNative); + } VLOG(jdwp) << "JDWP: thread detaching and exiting..."; runtime->DetachCurrentThread(); diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc index 22b4b2cc20..e1332d3395 100644 --- a/src/jni_compiler_test.cc +++ b/src/jni_compiler_test.cc @@ -22,7 +22,8 @@ #include "jni_internal.h" #include "mem_map.h" #include "runtime.h" -#include "scoped_jni_thread_state.h" +#include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "UniquePtr.h" @@ -38,10 +39,11 @@ namespace art { class JniCompilerTest : public CommonTest { protected: - void CompileForTest(ClassLoader* class_loader, bool direct, + void CompileForTest(jobject class_loader, bool direct, const char* method_name, const char* method_sig) { + ScopedObjectAccess soa(Thread::Current()); // Compile the native method before starting the runtime - Class* c = class_linker_->FindClass("LMyClassNatives;", class_loader); + Class* c = class_linker_->FindClass("LMyClassNatives;", soa.Decode(class_loader)); Method* method; if (direct) { method = c->FindDirectMethod(method_name, method_sig); @@ -56,15 +58,20 @@ class JniCompilerTest : public CommonTest { ASSERT_TRUE(method->GetCode() != NULL); } - void SetUpForTest(ClassLoader* class_loader, bool direct, - const char* method_name, const char* method_sig, + void SetUpForTest(bool direct, const char* method_name, const char* method_sig, void* native_fnptr) { - CompileForTest(class_loader, direct, method_name, method_sig); - if (!runtime_->IsStarted()) { + // Initialize class loader and compile method when runtime not started. + if (!runtime_->IsStarted()){ + { + ScopedObjectAccess soa(Thread::Current()); + class_loader_ = LoadDex("MyClassNatives"); + } + CompileForTest(class_loader_, direct, method_name, method_sig); + // Start runtime. + Thread::Current()->TransitionFromSuspendedToRunnable(); runtime_->Start(); } - - // JNI operations after runtime start + // JNI operations after runtime start. env_ = Thread::Current()->GetJniEnv(); jklass_ = env_->FindClass("MyClassNatives"); ASSERT_TRUE(jklass_ != NULL); @@ -91,6 +98,8 @@ class JniCompilerTest : public CommonTest { public: static jclass jklass_; static jobject jobj_; + static jobject class_loader_; + protected: JNIEnv* env_; @@ -99,12 +108,17 @@ class JniCompilerTest : public CommonTest { jclass JniCompilerTest::jklass_; jobject JniCompilerTest::jobj_; +jobject JniCompilerTest::class_loader_; int gJava_MyClassNatives_foo_calls = 0; void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) { - // 2 = SirtRef + thisObj - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = thisObj + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + GlobalSynchronization::mutator_lock_->AssertNotHeld(); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -112,8 +126,7 @@ void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) { } TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "foo", "()V", + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); EXPECT_EQ(0, gJava_MyClassNatives_foo_calls); @@ -124,26 +137,28 @@ TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) { } TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "bar", "(I)I", + SetUpForTest(false, "bar", "(I)I", NULL /* calling through stub will link with &Java_MyClassNatives_bar */); + ScopedObjectAccess soa(Thread::Current()); std::string reason; - ASSERT_TRUE(Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader.get(), reason)) - << reason; + ASSERT_TRUE( + Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode(class_loader_), + reason)) << reason; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); EXPECT_EQ(25, result); } TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "sbar", "(I)I", + SetUpForTest(true, "sbar", "(I)I", NULL /* calling through stub will link with &Java_MyClassNatives_sbar */); + ScopedObjectAccess soa(Thread::Current()); std::string reason; - ASSERT_TRUE(Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader.get(), reason)) - << reason; + ASSERT_TRUE( + Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode(class_loader_), + reason)) << reason; jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); EXPECT_EQ(43, result); @@ -151,9 +166,12 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) { int gJava_MyClassNatives_fooI_calls = 0; jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) { - // 2 = SirtRef + thisObj - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = thisObj + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -162,8 +180,7 @@ jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) { } TEST_F(JniCompilerTest, CompileAndRunIntMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooI", "(I)I", + SetUpForTest(false, "fooI", "(I)I", reinterpret_cast(&Java_MyClassNatives_fooI)); EXPECT_EQ(0, gJava_MyClassNatives_fooI_calls); @@ -177,9 +194,12 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethod) { int gJava_MyClassNatives_fooII_calls = 0; jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) { - // 2 = SirtRef + thisObj - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = thisObj + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -188,8 +208,7 @@ jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) { } TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooII", "(II)I", + SetUpForTest(false, "fooII", "(II)I", reinterpret_cast(&Java_MyClassNatives_fooII)); EXPECT_EQ(0, gJava_MyClassNatives_fooII_calls); @@ -204,9 +223,12 @@ TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) { int gJava_MyClassNatives_fooJJ_calls = 0; jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) { - // 2 = SirtRef + thisObj - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = thisObj + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -215,8 +237,7 @@ jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) } TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooJJ", "(JJ)J", + SetUpForTest(false, "fooJJ", "(JJ)J", reinterpret_cast(&Java_MyClassNatives_fooJJ)); EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_calls); @@ -232,9 +253,12 @@ TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) { int gJava_MyClassNatives_fooDD_calls = 0; jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) { - // 2 = SirtRef + thisObj - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = thisObj + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -243,8 +267,7 @@ jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdoub } TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooDD", "(DD)D", + SetUpForTest(false, "fooDD", "(DD)D", reinterpret_cast(&Java_MyClassNatives_fooDD)); EXPECT_EQ(0, gJava_MyClassNatives_fooDD_calls); @@ -261,9 +284,12 @@ TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) { int gJava_MyClassNatives_fooJJ_synchronized_calls = 0; jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong x, jlong y) { - // 2 = SirtRef + thisObj - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = thisObj + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -272,8 +298,7 @@ jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong } TEST_F(JniCompilerTest, CompileAndRun_fooJJ_synchronized) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooJJ_synchronized", "(JJ)J", + SetUpForTest(false, "fooJJ_synchronized", "(JJ)J", reinterpret_cast(&Java_MyClassNatives_fooJJ_synchronized)); EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls); @@ -287,9 +312,12 @@ TEST_F(JniCompilerTest, CompileAndRun_fooJJ_synchronized) { int gJava_MyClassNatives_fooIOO_calls = 0; jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y, jobject z) { - // 4 = SirtRef + this + y + z - EXPECT_EQ(4U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 3 = this + y + z + EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); @@ -305,8 +333,7 @@ jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject } TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooIOO", + SetUpForTest(false, "fooIOO", "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", reinterpret_cast(&Java_MyClassNatives_fooIOO)); @@ -338,9 +365,12 @@ TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) { int gJava_MyClassNatives_fooSII_calls = 0; jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) { - // 2 = SirtRef + klass - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = klass + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); @@ -349,8 +379,7 @@ jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) { } TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "fooSII", "(II)I", + SetUpForTest(true, "fooSII", "(II)I", reinterpret_cast(&Java_MyClassNatives_fooSII)); EXPECT_EQ(0, gJava_MyClassNatives_fooSII_calls); @@ -361,9 +390,12 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) { int gJava_MyClassNatives_fooSDD_calls = 0; jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble y) { - // 2 = SirtRef + klass - EXPECT_EQ(2U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 1 = klass + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); @@ -372,8 +404,7 @@ jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble } TEST_F(JniCompilerTest, CompileAndRunStaticDoubleDoubleMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "fooSDD", "(DD)D", + SetUpForTest(true, "fooSDD", "(DD)D", reinterpret_cast(&Java_MyClassNatives_fooSDD)); EXPECT_EQ(0, gJava_MyClassNatives_fooSDD_calls); @@ -390,9 +421,12 @@ TEST_F(JniCompilerTest, CompileAndRunStaticDoubleDoubleMethod) { int gJava_MyClassNatives_fooSIOO_calls = 0; jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) { - // 4 = SirtRef + klass + y + z - EXPECT_EQ(4U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); + // 3 = klass + y + z + EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); @@ -409,8 +443,7 @@ jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "fooSIOO", + SetUpForTest(true, "fooSIOO", "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", reinterpret_cast(&Java_MyClassNatives_fooSIOO)); @@ -441,11 +474,13 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) { } int gJava_MyClassNatives_fooSSIOO_calls = 0; -jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, - jobject z) { - // 4 = SirtRef + klass + y + z - EXPECT_EQ(4U, Thread::Current()->NumStackReferences()); - EXPECT_EQ(kNative, Thread::Current()->GetState()); +jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) { + // 3 = klass + y + z + EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); @@ -461,8 +496,7 @@ jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject } TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "fooSSIOO", + SetUpForTest(true, "fooSSIOO", "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", reinterpret_cast(&Java_MyClassNatives_fooSSIOO)); @@ -498,34 +532,42 @@ void Java_MyClassNatives_throwException(JNIEnv* env, jobject) { } TEST_F(JniCompilerTest, ExceptionHandling) { - SirtRef class_loader(LoadDex("MyClassNatives")); - - // all compilation needs to happen before SetUpForTest calls Runtime::Start - CompileForTest(class_loader.get(), false, "foo", "()V"); - CompileForTest(class_loader.get(), false, "throwException", "()V"); - CompileForTest(class_loader.get(), false, "foo", "()V"); + { + ASSERT_FALSE(runtime_->IsStarted()); + ScopedObjectAccess soa(Thread::Current()); + class_loader_ = LoadDex("MyClassNatives"); + + // all compilation needs to happen before Runtime::Start + CompileForTest(class_loader_, false, "foo", "()V"); + CompileForTest(class_loader_, false, "throwException", "()V"); + CompileForTest(class_loader_, false, "foo", "()V"); + } + // Start runtime to avoid re-initialization in SetupForTest. + Thread::Current()->TransitionFromSuspendedToRunnable(); + runtime_->Start(); gJava_MyClassNatives_foo_calls = 0; // Check a single call of a JNI method is ok - SetUpForTest(class_loader.get(), false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(1, gJava_MyClassNatives_foo_calls); EXPECT_FALSE(Thread::Current()->IsExceptionPending()); // Get class for exception we expect to be thrown - Class* jlre = class_linker_->FindClass("Ljava/lang/RuntimeException;", class_loader.get()); - SetUpForTest(class_loader.get(), false, "throwException", "()V", + ScopedLocalRef jlre(env_, env_->FindClass("java/lang/RuntimeException")); + SetUpForTest(false, "throwException", "()V", reinterpret_cast(&Java_MyClassNatives_throwException)); // Call Java_MyClassNatives_throwException (JNI method that throws exception) env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(1, gJava_MyClassNatives_foo_calls); - EXPECT_TRUE(Thread::Current()->IsExceptionPending()); - EXPECT_TRUE(Thread::Current()->GetException()->InstanceOf(jlre)); - Thread::Current()->ClearException(); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); + ScopedLocalRef exception(env_, env_->ExceptionOccurred()); + env_->ExceptionClear(); + EXPECT_TRUE(env_->IsInstanceOf(exception.get(), jlre.get())); // Check a single call of a JNI method is ok - SetUpForTest(class_loader.get(), false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(2, gJava_MyClassNatives_foo_calls); } @@ -533,13 +575,13 @@ TEST_F(JniCompilerTest, ExceptionHandling) { jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { if (i <= 0) { // We want to check raw Object*/Array* below - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); // Build stack trace - jobject internal = Thread::Current()->CreateInternalStackTrace(ts); + jobject internal = Thread::Current()->CreateInternalStackTrace(soa); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal); ObjectArray* trace_array = - ts.Decode*>(ste_array); + soa.Decode*>(ste_array); EXPECT_TRUE(trace_array != NULL); EXPECT_EQ(11, trace_array->GetLength()); @@ -569,8 +611,7 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { } TEST_F(JniCompilerTest, NativeStackTraceElement) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooI", "(I)I", + SetUpForTest(false, "fooI", "(I)I", reinterpret_cast(&Java_MyClassNatives_nativeUpCall)); jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 10); EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result); @@ -581,8 +622,7 @@ jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) { } TEST_F(JniCompilerTest, ReturnGlobalRef) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;", + SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;", reinterpret_cast(&Java_MyClassNatives_fooO)); jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, jobj_); EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(result)); @@ -591,16 +631,15 @@ TEST_F(JniCompilerTest, ReturnGlobalRef) { jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) { // Add 10 local references - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); for (int i = 0; i < 10; i++) { - ts.AddLocalReference(ts.Decode(thisObj)); + soa.AddLocalReference(soa.Decode(thisObj)); } return x+1; } TEST_F(JniCompilerTest, LocalReferenceTableClearingTest) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "fooI", "(I)I", reinterpret_cast(&local_ref_test)); + SetUpForTest(false, "fooI", "(I)I", reinterpret_cast(&local_ref_test)); // 1000 invocations of a method that adds 10 local references for (int i = 0; i < 1000; i++) { jint result = env_->CallIntMethod(jobj_, jmethod_, i); @@ -618,8 +657,7 @@ void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject } TEST_F(JniCompilerTest, JavaLangSystemArrayCopy) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V", + SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V", reinterpret_cast(&my_arraycopy)); env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876); } @@ -634,8 +672,7 @@ jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint ex } TEST_F(JniCompilerTest, CompareAndSwapInt) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z", + SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z", reinterpret_cast(&my_casi)); jboolean result = env_->CallBooleanMethod(jobj_, jmethod_, jobj_, 0x12345678ABCDEF88ll, 0xCAFEF00D, 0xEBADF00D); EXPECT_EQ(result, JNI_TRUE); @@ -651,8 +688,7 @@ jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, } TEST_F(JniCompilerTest, GetText) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I", + SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I", reinterpret_cast(&my_gettext)); jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_, 0x7FEDCBA987654321ll, jobj_); @@ -670,8 +706,7 @@ jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclas } TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Instance) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;", + SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;", reinterpret_cast(&Java_MyClassNatives_instanceMethodThatShouldReturnClass)); CheckJniAbortCatcher check_jni_abort_catcher; @@ -688,8 +723,7 @@ TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Instance) { } TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Static) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;", + SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;", reinterpret_cast(&Java_MyClassNatives_staticMethodThatShouldReturnClass)); CheckJniAbortCatcher check_jni_abort_catcher; @@ -714,8 +748,7 @@ void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass } TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Instance) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", + SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", reinterpret_cast(&Java_MyClassNatives_instanceMethodThatShouldTakeClass)); CheckJniAbortCatcher check_jni_abort_catcher; @@ -725,8 +758,7 @@ TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Instance) { } TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Static) { - SirtRef class_loader(LoadDex("MyClassNatives")); - SetUpForTest(class_loader.get(), true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", + SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", reinterpret_cast(&Java_MyClassNatives_staticMethodThatShouldTakeClass)); CheckJniAbortCatcher check_jni_abort_catcher; diff --git a/src/jni_internal.cc b/src/jni_internal.cc index 74b740a229..dbdc149d6c 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -26,11 +26,12 @@ #include "class_loader.h" #include "jni.h" #include "logging.h" +#include "mutex.h" #include "object.h" #include "object_utils.h" #include "runtime.h" #include "safe_map.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "stl_util.h" #include "stringpiece.h" @@ -55,7 +56,8 @@ static size_t gGlobalsMax = 51200; // Arbitrary sanity check. static const size_t kWeakGlobalsInitial = 16; // Arbitrary. static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. -void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, size_t method_count) { +void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, + size_t method_count) { ScopedLocalRef c(env, env->FindClass(jni_class_name)); if (c.get() == NULL) { LOG(FATAL) << "Couldn't find class: " << jni_class_name; @@ -91,7 +93,7 @@ size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) { class ArgArray { public: - explicit ArgArray(Method* method) { + explicit ArgArray(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { MethodHelper mh(method); shorty_ = mh.GetShorty(); shorty_len_ = mh.GetShortyLength(); @@ -107,7 +109,8 @@ class ArgArray { return arg_array_; } - void BuildArgArray(const ScopedJniThreadState& ts, va_list ap) { + void BuildArgArray(const ScopedObjectAccess& soa, va_list ap) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -129,7 +132,7 @@ class ArgArray { arg_array_[offset].SetF(va_arg(ap, jdouble)); break; case 'L': - arg_array_[offset].SetL(ts.Decode(va_arg(ap, jobject))); + arg_array_[offset].SetL(soa.Decode(va_arg(ap, jobject))); break; case 'D': arg_array_[offset].SetD(va_arg(ap, jdouble)); @@ -141,7 +144,8 @@ class ArgArray { } } - void BuildArgArray(const ScopedJniThreadState& ts, jvalue* args) { + void BuildArgArray(const ScopedObjectAccess& soa, jvalue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -163,7 +167,7 @@ class ArgArray { arg_array_[offset].SetF(args[offset].f); break; case 'L': - arg_array_[offset].SetL(ts.Decode(args[offset].l)); + arg_array_[offset].SetL(soa.Decode(args[offset].l)); break; case 'D': arg_array_[offset].SetD(args[offset].d); @@ -184,18 +188,20 @@ class ArgArray { UniquePtr large_arg_array_; }; -static jweak AddWeakGlobalReference(ScopedJniThreadState& ts, Object* obj) { +static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (obj == NULL) { return NULL; } - JavaVMExt* vm = ts.Vm(); + JavaVMExt* vm = soa.Vm(); IndirectReferenceTable& weak_globals = vm->weak_globals; MutexLock mu(vm->weak_globals_lock); IndirectRef ref = weak_globals.Add(IRT_FIRST_SEGMENT, obj); return reinterpret_cast(ref); } -static void CheckMethodArguments(Method* m, JValue* args) { +static void CheckMethodArguments(Method* m, JValue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { MethodHelper mh(m); ObjectArray* parameter_types = mh.GetParameterTypes(); CHECK(parameter_types != NULL); @@ -219,45 +225,50 @@ static void CheckMethodArguments(Method* m, JValue* args) { } } -static JValue InvokeWithArgArray(const ScopedJniThreadState& ts, Object* receiver, Method* method, - JValue* args) { - if (UNLIKELY(ts.Env()->check_jni)) { +static JValue InvokeWithArgArray(const ScopedObjectAccess& soa, Object* receiver, + Method* method, JValue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + if (UNLIKELY(soa.Env()->check_jni)) { CheckMethodArguments(method, args); } JValue result; - method->Invoke(ts.Self(), receiver, args, &result); + method->Invoke(soa.Self(), receiver, args, &result); return result; } -static JValue InvokeWithVarArgs(const ScopedJniThreadState& ts, jobject obj, jmethodID mid, - va_list args) { - Object* receiver = ts.Decode(obj); - Method* method = ts.DecodeMethod(mid); +static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, + jmethodID mid, va_list args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Object* receiver = soa.Decode(obj); + Method* method = soa.DecodeMethod(mid); ArgArray arg_array(method); - arg_array.BuildArgArray(ts, args); - return InvokeWithArgArray(ts, receiver, method, arg_array.get()); + arg_array.BuildArgArray(soa, args); + return InvokeWithArgArray(soa, receiver, method, arg_array.get()); } -static Method* FindVirtualMethod(Object* receiver, Method* method) { +static Method* FindVirtualMethod(Object* receiver, Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method); } -static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedJniThreadState& ts, jobject obj, - jmethodID mid, jvalue* args) { - Object* receiver = ts.Decode(obj); - Method* method = FindVirtualMethod(receiver, ts.DecodeMethod(mid)); +static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa, + jobject obj, jmethodID mid, jvalue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Object* receiver = soa.Decode(obj); + Method* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); ArgArray arg_array(method); - arg_array.BuildArgArray(ts, args); - return InvokeWithArgArray(ts, receiver, method, arg_array.get()); + arg_array.BuildArgArray(soa, args); + return InvokeWithArgArray(soa, receiver, method, arg_array.get()); } -static JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedJniThreadState& ts, jobject obj, - jmethodID mid, va_list args) { - Object* receiver = ts.Decode(obj); - Method* method = FindVirtualMethod(receiver, ts.DecodeMethod(mid)); +static JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccess& soa, + jobject obj, jmethodID mid, va_list args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Object* receiver = soa.Decode(obj); + Method* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); ArgArray arg_array(method); - arg_array.BuildArgArray(ts, args); - return InvokeWithArgArray(ts, receiver, method, arg_array.get()); + arg_array.BuildArgArray(soa, args); + return InvokeWithArgArray(soa, receiver, method, arg_array.get()); } // Section 12.3.2 of the JNI spec describes JNI class descriptors. They're @@ -284,13 +295,17 @@ static std::string NormalizeJniClassDescriptor(const char* name) { return result; } -static void ThrowNoSuchMethodError(ScopedJniThreadState& ts, Class* c, const char* name, const char* sig, const char* kind) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", +static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, Class* c, + const char* name, const char* sig, const char* kind) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", "no %s method \"%s.%s%s\"", kind, ClassHelper(c).GetDescriptor(), name, sig); } -static jmethodID FindMethodID(ScopedJniThreadState& ts, jclass jni_class, const char* name, const char* sig, bool is_static) { - Class* c = ts.Decode(jni_class); +static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, + const char* name, const char* sig, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* c = soa.Decode(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -308,14 +323,15 @@ static jmethodID FindMethodID(ScopedJniThreadState& ts, jclass jni_class, const } if (method == NULL || method->IsStatic() != is_static) { - ThrowNoSuchMethodError(ts, c, name, sig, is_static ? "static" : "non-static"); + ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static"); return NULL; } - return ts.EncodeMethod(method); + return soa.EncodeMethod(method); } -static ClassLoader* GetClassLoader(Thread* self) { +static ClassLoader* GetClassLoader(Thread* self) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method = self->GetCurrentMethod(); if (method == NULL || PrettyMethod(method, false) == "java.lang.Runtime.nativeLoad") { return self->GetClassLoaderOverride(); @@ -323,9 +339,10 @@ static ClassLoader* GetClassLoader(Thread* self) { return method->GetDeclaringClass()->GetClassLoader(); } -static jfieldID FindFieldID(const ScopedJniThreadState& ts, jclass jni_class, const char* name, - const char* sig, bool is_static) { - Class* c = ts.Decode(jni_class); +static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name, + const char* sig, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* c = soa.Decode(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -334,16 +351,16 @@ static jfieldID FindFieldID(const ScopedJniThreadState& ts, jclass jni_class, co Class* field_type; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); if (sig[1] != '\0') { - ClassLoader* cl = GetClassLoader(ts.Self()); + ClassLoader* cl = GetClassLoader(soa.Self()); field_type = class_linker->FindClass(sig, cl); } else { field_type = class_linker->FindPrimitiveClass(*sig); } if (field_type == NULL) { // Failed to find type from the signature of the field. - DCHECK(ts.Self()->IsExceptionPending()); - ts.Self()->ClearException(); - ts.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", + DCHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", "no type \"%s\" found and so no field \"%s\" could be found in class " "\"%s\" or its superclasses", sig, name, ClassHelper(c).GetDescriptor()); return NULL; @@ -354,125 +371,85 @@ static jfieldID FindFieldID(const ScopedJniThreadState& ts, jclass jni_class, co field = c->FindInstanceField(name, ClassHelper(field_type).GetDescriptor()); } if (field == NULL) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", "no \"%s\" field \"%s\" in class \"%s\" or its superclasses", sig, name, ClassHelper(c).GetDescriptor()); return NULL; } - return ts.EncodeField(field); + return soa.EncodeField(field); } -static void PinPrimitiveArray(const ScopedJniThreadState& ts, const Array* array) { - JavaVMExt* vm = ts.Vm(); +static void PinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + JavaVMExt* vm = soa.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Add(array); } -static void UnpinPrimitiveArray(const ScopedJniThreadState& ts, const Array* array) { - JavaVMExt* vm = ts.Vm(); +static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + JavaVMExt* vm = soa.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Remove(array); } -template -static JniT NewPrimitiveArray(const ScopedJniThreadState& ts, jsize length) { - CHECK_GE(length, 0); // TODO: ReportJniError - ArtT* result = ArtT::Alloc(length); - return ts.AddLocalReference(result); -} - -template -static CArrayT GetPrimitiveArray(ScopedJniThreadState& ts, ArrayT java_array, jboolean* is_copy) { - ArtArrayT* array = ts.Decode(java_array); - PinPrimitiveArray(ts, array); - if (is_copy != NULL) { - *is_copy = JNI_FALSE; - } - return array->GetData(); -} - -template -static void ReleasePrimitiveArray(ScopedJniThreadState& ts, ArrayT java_array, jint mode) { - if (mode != JNI_COMMIT) { - Array* array = ts.Decode(java_array); - UnpinPrimitiveArray(ts, array); - } -} - -static void ThrowAIOOBE(ScopedJniThreadState& ts, Array* array, jsize start, jsize length, const char* identifier) { +static void ThrowAIOOBE(ScopedObjectAccess& soa, Array* array, jsize start, + jsize length, const char* identifier) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string type(PrettyTypeOf(array)); - ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "%s offset=%d length=%d %s.length=%d", type.c_str(), start, length, identifier, array->GetLength()); } -static void ThrowSIOOBE(ScopedJniThreadState& ts, jsize start, jsize length, jsize array_length) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", +static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length, + jsize array_length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", "offset=%d length=%d string.length()=%d", start, length, array_length); } -template -static void GetPrimitiveArrayRegion(ScopedJniThreadState& ts, JavaArrayT java_array, jsize start, jsize length, JavaT* buf) { - ArrayT* array = ts.Decode(java_array); - if (start < 0 || length < 0 || start + length > array->GetLength()) { - ThrowAIOOBE(ts, array, start, length, "src"); - } else { - JavaT* data = array->GetData(); - memcpy(buf, data + start, length * sizeof(JavaT)); +int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + ScopedObjectAccess soa(env); + + // Turn the const char* into a java.lang.String. + ScopedLocalRef s(env, env->NewStringUTF(msg)); + if (msg != NULL && s.get() == NULL) { + return JNI_ERR; } -} -template -static void SetPrimitiveArrayRegion(ScopedJniThreadState& ts, JavaArrayT java_array, jsize start, jsize length, const JavaT* buf) { - ArrayT* array = ts.Decode(java_array); - if (start < 0 || length < 0 || start + length > array->GetLength()) { - ThrowAIOOBE(ts, array, start, length, "dst"); + // Choose an appropriate constructor and set up the arguments. + jvalue args[2]; + const char* signature; + if (msg == NULL && cause == NULL) { + signature = "()V"; + } else if (msg != NULL && cause == NULL) { + signature = "(Ljava/lang/String;)V"; + args[0].l = s.get(); + } else if (msg == NULL && cause != NULL) { + signature = "(Ljava/lang/Throwable;)V"; + args[0].l = cause; } else { - JavaT* data = array->GetData(); - memcpy(data + start, buf, length * sizeof(JavaT)); + signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; + args[0].l = s.get(); + args[1].l = cause; + } + jmethodID mid = env->GetMethodID(exception_class, "", signature); + if (mid == NULL) { + LOG(ERROR) << "No " << signature << " in " + << PrettyClass(soa.Decode(exception_class)); + return JNI_ERR; } -} - -int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) { - ScopedJniThreadState ts(env); - - // Turn the const char* into a java.lang.String. - ScopedLocalRef s(env, env->NewStringUTF(msg)); - if (msg != NULL && s.get() == NULL) { - return JNI_ERR; - } - // Choose an appropriate constructor and set up the arguments. - jvalue args[2]; - const char* signature; - if (msg == NULL && cause == NULL) { - signature = "()V"; - } else if (msg != NULL && cause == NULL) { - signature = "(Ljava/lang/String;)V"; - args[0].l = s.get(); - } else if (msg == NULL && cause != NULL) { - signature = "(Ljava/lang/Throwable;)V"; - args[0].l = cause; - } else { - signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; - args[0].l = s.get(); - args[1].l = cause; - } - jmethodID mid = env->GetMethodID(exception_class, "", signature); - if (mid == NULL) { - LOG(ERROR) << "No " << signature << " in " - << PrettyClass(ts.Decode(exception_class)); - return JNI_ERR; - } - - ScopedLocalRef exception(env, reinterpret_cast(env->NewObjectA(exception_class, mid, args))); - if (exception.get() == NULL) { - return JNI_ERR; - } + ScopedLocalRef exception(env, reinterpret_cast(env->NewObjectA(exception_class, mid, args))); + if (exception.get() == NULL) { + return JNI_ERR; + } - ts.Self()->SetException(ts.Decode(exception.get())); + soa.Self()->SetException(soa.Decode(exception.get())); - return JNI_OK; + return JNI_OK; } static jint JII_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) { @@ -533,32 +510,36 @@ class SharedLibrary { * Check the result of an earlier call to JNI_OnLoad on this library. * If the call has not yet finished in another thread, wait for it. */ - bool CheckOnLoadResult() { - MutexLock mu(jni_on_load_lock_); - + bool CheckOnLoadResult() + LOCKS_EXCLUDED(jni_on_load_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Thread* self = Thread::Current(); - if (jni_on_load_thread_id_ == self->GetThinLockId()) { - // Check this so we don't end up waiting for ourselves. We need - // to return "true" so the caller can continue. - LOG(INFO) << *self << " recursive attempt to load library " - << "\"" << path_ << "\""; - return true; - } + self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad); + bool okay; + { + MutexLock mu(jni_on_load_lock_); + + if (jni_on_load_thread_id_ == self->GetThinLockId()) { + // Check this so we don't end up waiting for ourselves. We need to return "true" so the + // caller can continue. + LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\""; + okay = true; + } else { + while (jni_on_load_result_ == kPending) { + VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]"; + jni_on_load_cond_.Wait(jni_on_load_lock_); + } - while (jni_on_load_result_ == kPending) { - VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " - << "JNI_OnLoad...]"; - ScopedThreadStateChange tsc(self, kVmWait); - jni_on_load_cond_.Wait(jni_on_load_lock_); + okay = (jni_on_load_result_ == kOkay); + VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" " + << (okay ? "succeeded" : "failed") << "]"; + } } - - bool okay = (jni_on_load_result_ == kOkay); - VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" " - << (okay ? "succeeded" : "failed") << "]"; + self->TransitionFromSuspendedToRunnable(); return okay; } - void SetResult(bool result) { + void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) { MutexLock mu(jni_on_load_lock_); jni_on_load_result_ = result ? kOkay : kFailed; @@ -589,7 +570,7 @@ class SharedLibrary { Object* class_loader_; // Guards remaining items. - Mutex jni_on_load_lock_; + Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; // Wait for JNI_OnLoad in other thread. ConditionVariable jni_on_load_cond_; // Recursive invocation guard. @@ -633,7 +614,8 @@ class Libraries { } // See section 11.3 "Linking Native Methods" of the JNI spec. - void* FindNativeMethod(const Method* m, std::string& detail) { + void* FindNativeMethod(const Method* m, std::string& detail) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string jni_short_name(JniShortName(m)); std::string jni_long_name(JniLongName(m)); const ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader(); @@ -667,109 +649,112 @@ class Libraries { SafeMap libraries_; }; -JValue InvokeWithJValues(const ScopedJniThreadState& ts, jobject obj, jmethodID mid, jvalue* args) { - Object* receiver = ts.Decode(obj); - Method* method = ts.DecodeMethod(mid); +JValue InvokeWithJValues(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, + jvalue* args) { + Object* receiver = soa.Decode(obj); + Method* method = soa.DecodeMethod(mid); ArgArray arg_array(method); - arg_array.BuildArgArray(ts, args); - return InvokeWithArgArray(ts, receiver, method, arg_array.get()); + arg_array.BuildArgArray(soa, args); + return InvokeWithArgArray(soa, receiver, method, arg_array.get()); } -JValue InvokeWithJValues(const ScopedJniThreadState& ts, Object* receiver, Method* m, JValue* args) { - return InvokeWithArgArray(ts, receiver, m, args); +JValue InvokeWithJValues(const ScopedObjectAccess& soa, Object* receiver, Method* m, + JValue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return InvokeWithArgArray(soa, receiver, m, args); } class JNI { public: static jint GetVersion(JNIEnv* env) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); return JNI_VERSION_1_6; } static jclass DefineClass(JNIEnv* env, const char*, jobject, const jbyte*, jsize) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); LOG(WARNING) << "JNI DefineClass is not supported"; return NULL; } static jclass FindClass(JNIEnv* env, const char* name) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); std::string descriptor(NormalizeJniClassDescriptor(name)); Class* c = NULL; if (runtime->IsStarted()) { - ClassLoader* cl = GetClassLoader(ts.Self()); + ClassLoader* cl = GetClassLoader(soa.Self()); c = class_linker->FindClass(descriptor.c_str(), cl); } else { c = class_linker->FindSystemClass(descriptor.c_str()); } - return ts.AddLocalReference(c); + return soa.AddLocalReference(c); } static jmethodID FromReflectedMethod(JNIEnv* env, jobject java_method) { - ScopedJniThreadState ts(env); - Method* method = ts.Decode(java_method); - return ts.EncodeMethod(method); + ScopedObjectAccess soa(env); + Method* method = soa.Decode(java_method); + return soa.EncodeMethod(method); } static jfieldID FromReflectedField(JNIEnv* env, jobject java_field) { - ScopedJniThreadState ts(env); - Field* field = ts.Decode(java_field); - return ts.EncodeField(field); + ScopedObjectAccess soa(env); + Field* field = soa.Decode(java_field); + return soa.EncodeField(field); } static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) { - ScopedJniThreadState ts(env); - Method* method = ts.DecodeMethod(mid); - return ts.AddLocalReference(method); + ScopedObjectAccess soa(env); + Method* method = soa.DecodeMethod(mid); + return soa.AddLocalReference(method); } static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) { - ScopedJniThreadState ts(env); - Field* field = ts.DecodeField(fid); - return ts.AddLocalReference(field); + ScopedObjectAccess soa(env); + Field* field = soa.DecodeField(fid); + return soa.AddLocalReference(field); } static jclass GetObjectClass(JNIEnv* env, jobject java_object) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(java_object); - return ts.AddLocalReference(o->GetClass()); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(java_object); + return soa.AddLocalReference(o->GetClass()); } static jclass GetSuperclass(JNIEnv* env, jclass java_class) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(java_class); - return ts.AddLocalReference(c->GetSuperClass()); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(java_class); + return soa.AddLocalReference(c->GetSuperClass()); } static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) { - ScopedJniThreadState ts(env); - Class* c1 = ts.Decode(java_class1); - Class* c2 = ts.Decode(java_class2); + ScopedObjectAccess soa(env); + Class* c1 = soa.Decode(java_class1); + Class* c2 = soa.Decode(java_class2); return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE; } static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); CHECK_NE(static_cast(NULL), java_class); // TODO: ReportJniError if (jobj == NULL) { // Note: JNI is different from regular Java instanceof in this respect return JNI_TRUE; } else { - Object* obj = ts.Decode(jobj); - Class* c = ts.Decode(java_class); + Object* obj = soa.Decode(jobj); + Class* c = soa.Decode(java_class); return obj->InstanceOf(c) ? JNI_TRUE : JNI_FALSE; } } static jint Throw(JNIEnv* env, jthrowable java_exception) { - ScopedJniThreadState ts(env); - Throwable* exception = ts.Decode(java_exception); + ScopedObjectAccess soa(env); + Throwable* exception = soa.Decode(java_exception); if (exception == NULL) { return JNI_ERR; } - ts.Self()->SetException(exception); + soa.Self()->SetException(exception); return JNI_OK; } @@ -778,23 +763,23 @@ class JNI { } static jboolean ExceptionCheck(JNIEnv* env) { - ScopedJniThreadState ts(env); - return ts.Self()->IsExceptionPending() ? JNI_TRUE : JNI_FALSE; + ScopedObjectAccess soa(env); + return soa.Self()->IsExceptionPending() ? JNI_TRUE : JNI_FALSE; } static void ExceptionClear(JNIEnv* env) { - ScopedJniThreadState ts(env); - ts.Self()->ClearException(); + ScopedObjectAccess soa(env); + soa.Self()->ClearException(); } static void ExceptionDescribe(JNIEnv* env) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); - Thread* self = ts.Self(); + Thread* self = soa.Self(); Throwable* original_exception = self->GetException(); self->ClearException(); - ScopedLocalRef exception(env, ts.AddLocalReference(original_exception)); + ScopedLocalRef exception(env, soa.AddLocalReference(original_exception)); ScopedLocalRef exception_class(env, env->GetObjectClass(exception.get())); jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V"); if (mid == NULL) { @@ -813,72 +798,58 @@ class JNI { } static jthrowable ExceptionOccurred(JNIEnv* env) { - ScopedJniThreadState ts(env); - Object* exception = ts.Self()->GetException(); - return ts.AddLocalReference(exception); + ScopedObjectAccess soa(env); + Object* exception = soa.Self()->GetException(); + return soa.AddLocalReference(exception); } static void FatalError(JNIEnv* env, const char* msg) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); LOG(FATAL) << "JNI FatalError called: " << msg; } static jint PushLocalFrame(JNIEnv* env, jint capacity) { - ScopedJniThreadState ts(env); - if (EnsureLocalCapacity(ts, capacity, "PushLocalFrame") != JNI_OK) { + ScopedObjectAccess soa(env); + if (EnsureLocalCapacity(soa, capacity, "PushLocalFrame") != JNI_OK) { return JNI_ERR; } - ts.Env()->PushFrame(capacity); + soa.Env()->PushFrame(capacity); return JNI_OK; } static jobject PopLocalFrame(JNIEnv* env, jobject java_survivor) { - ScopedJniThreadState ts(env); - Object* survivor = ts.Decode(java_survivor); - ts.Env()->PopFrame(); - return ts.AddLocalReference(survivor); + ScopedObjectAccess soa(env); + Object* survivor = soa.Decode(java_survivor); + soa.Env()->PopFrame(); + return soa.AddLocalReference(survivor); } static jint EnsureLocalCapacity(JNIEnv* env, jint desired_capacity) { - ScopedJniThreadState ts(env); - return EnsureLocalCapacity(ts, desired_capacity, "EnsureLocalCapacity"); - } - - static jint EnsureLocalCapacity(const ScopedJniThreadState& ts, jint desired_capacity, const char* caller) { - // TODO: we should try to expand the table if necessary. - if (desired_capacity < 1 || desired_capacity > static_cast(kLocalsMax)) { - LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; - return JNI_ERR; - } - // TODO: this isn't quite right, since "capacity" includes holes. - size_t capacity = ts.Env()->locals.Capacity(); - bool okay = (static_cast(kLocalsMax - capacity) >= desired_capacity); - if (!okay) { - ts.Self()->ThrowOutOfMemoryError(caller); - } - return okay ? JNI_OK : JNI_ERR; + ScopedObjectAccess soa(env); + return EnsureLocalCapacity(soa, desired_capacity, "EnsureLocalCapacity"); } static jobject NewGlobalRef(JNIEnv* env, jobject obj) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); if (obj == NULL) { return NULL; } - JavaVMExt* vm = ts.Vm(); + JavaVMExt* vm = soa.Vm(); IndirectReferenceTable& globals = vm->globals; + Object* decoded_obj = soa.Decode(obj); MutexLock mu(vm->globals_lock); - IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, ts.Decode(obj)); + IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, decoded_obj); return reinterpret_cast(ref); } static void DeleteGlobalRef(JNIEnv* env, jobject obj) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); if (obj == NULL) { return; } - JavaVMExt* vm = ts.Vm(); + JavaVMExt* vm = soa.Vm(); IndirectReferenceTable& globals = vm->globals; MutexLock mu(vm->globals_lock); @@ -889,17 +860,17 @@ class JNI { } static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) { - ScopedJniThreadState ts(env); - return AddWeakGlobalReference(ts, ts.Decode(obj)); + ScopedObjectAccess soa(env); + return AddWeakGlobalReference(soa, soa.Decode(obj)); } static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); if (obj == NULL) { return; } - JavaVMExt* vm = ts.Vm(); + JavaVMExt* vm = soa.Vm(); IndirectReferenceTable& weak_globals = vm->weak_globals; MutexLock mu(vm->weak_globals_lock); @@ -910,27 +881,27 @@ class JNI { } static jobject NewLocalRef(JNIEnv* env, jobject obj) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); if (obj == NULL) { return NULL; } - IndirectReferenceTable& locals = ts.Env()->locals; + IndirectReferenceTable& locals = soa.Env()->locals; - uint32_t cookie = ts.Env()->local_ref_cookie; - IndirectRef ref = locals.Add(cookie, ts.Decode(obj)); + uint32_t cookie = soa.Env()->local_ref_cookie; + IndirectRef ref = locals.Add(cookie, soa.Decode(obj)); return reinterpret_cast(ref); } static void DeleteLocalRef(JNIEnv* env, jobject obj) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); if (obj == NULL) { return; } - IndirectReferenceTable& locals = ts.Env()->locals; + IndirectReferenceTable& locals = soa.Env()->locals; - uint32_t cookie = ts.Env()->local_ref_cookie; + uint32_t cookie = soa.Env()->local_ref_cookie; if (!locals.Remove(cookie, obj)) { // Attempting to delete a local reference that is not in the // topmost local reference frame is a no-op. DeleteLocalRef returns @@ -943,22 +914,22 @@ class JNI { } static jboolean IsSameObject(JNIEnv* env, jobject obj1, jobject obj2) { - ScopedJniThreadState ts(env); - return (ts.Decode(obj1) == ts.Decode(obj2)) + ScopedObjectAccess soa(env); + return (soa.Decode(obj1) == soa.Decode(obj2)) ? JNI_TRUE : JNI_FALSE; } static jobject AllocObject(JNIEnv* env, jclass java_class) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(java_class); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(java_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } - return ts.AddLocalReference(c->AllocObject()); + return soa.AddLocalReference(c->AllocObject()); } static jobject NewObject(JNIEnv* env, jclass c, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list args; va_start(args, mid); jobject result = NewObjectV(env, c, mid, args); @@ -967,8 +938,8 @@ class JNI { } static jobject NewObjectV(JNIEnv* env, jclass java_class, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(java_class); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(java_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -976,9 +947,9 @@ class JNI { if (result == NULL) { return NULL; } - jobject local_result = ts.AddLocalReference(result); + jobject local_result = soa.AddLocalReference(result); CallNonvirtualVoidMethodV(env, local_result, java_class, mid, args); - if (!ts.Self()->IsExceptionPending()) { + if (!soa.Self()->IsExceptionPending()) { return local_result; } else { return NULL; @@ -986,8 +957,8 @@ class JNI { } static jobject NewObjectA(JNIEnv* env, jclass java_class, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(java_class); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(java_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; } @@ -995,9 +966,9 @@ class JNI { if (result == NULL) { return NULL; } - jobject local_result = ts.AddLocalReference(result); + jobject local_result = soa.AddLocalReference(result); CallNonvirtualVoidMethodA(env, local_result, java_class, mid, args); - if (!ts.Self()->IsExceptionPending()) { + if (!soa.Self()->IsExceptionPending()) { return local_result; } else { return NULL; @@ -1005,468 +976,468 @@ class JNI { } static jmethodID GetMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) { - ScopedJniThreadState ts(env); - return FindMethodID(ts, c, name, sig, false); + ScopedObjectAccess soa(env); + return FindMethodID(soa, c, name, sig, false); } static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) { - ScopedJniThreadState ts(env); - return FindMethodID(ts, c, name, sig, true); + ScopedObjectAccess soa(env); + return FindMethodID(soa, c, name, sig, true); } static jobject CallObjectMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); - return ts.AddLocalReference(result.GetL()); + return soa.AddLocalReference(result.GetL()); } static jobject CallObjectMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args)); - return ts.AddLocalReference(result.GetL()); + ScopedObjectAccess soa(env); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args)); + return soa.AddLocalReference(result.GetL()); } static jobject CallObjectMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - JValue result(InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args)); - return ts.AddLocalReference(result.GetL()); + ScopedObjectAccess soa(env); + JValue result(InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args)); + return soa.AddLocalReference(result.GetL()); } static jboolean CallBooleanMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetZ(); } static jboolean CallBooleanMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetZ(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetZ(); } static jboolean CallBooleanMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetZ(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetZ(); } static jbyte CallByteMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetB(); } static jbyte CallByteMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetB(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetB(); } static jbyte CallByteMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetB(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetB(); } static jchar CallCharMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetC(); } static jchar CallCharMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetC(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetC(); } static jchar CallCharMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetC(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetC(); } static jdouble CallDoubleMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetD(); } static jdouble CallDoubleMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetD(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetD(); } static jdouble CallDoubleMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetD(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetD(); } static jfloat CallFloatMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetF(); } static jfloat CallFloatMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetF(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetF(); } static jfloat CallFloatMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetF(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetF(); } static jint CallIntMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetI(); } static jint CallIntMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetI(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetI(); } static jint CallIntMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetI(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetI(); } static jlong CallLongMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetJ(); } static jlong CallLongMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetJ(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetJ(); } static jlong CallLongMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetJ(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetJ(); } static jshort CallShortMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetS(); } static jshort CallShortMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args).GetS(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetS(); } static jshort CallShortMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args).GetS(); + ScopedObjectAccess soa(env); + return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetS(); } static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap)); va_end(ap); } static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - InvokeVirtualOrInterfaceWithVarArgs(ts, obj, mid, args); + ScopedObjectAccess soa(env); + InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args); } static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - InvokeVirtualOrInterfaceWithJValues(ts, obj, mid, args); + ScopedObjectAccess soa(env); + InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args); } static jobject CallNonvirtualObjectMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); - jobject local_result = ts.AddLocalReference(result.GetL()); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); + jobject local_result = soa.AddLocalReference(result.GetL()); va_end(ap); return local_result; } static jobject CallNonvirtualObjectMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - JValue result(InvokeWithVarArgs(ts, obj, mid, args)); - return ts.AddLocalReference(result.GetL()); + ScopedObjectAccess soa(env); + JValue result(InvokeWithVarArgs(soa, obj, mid, args)); + return soa.AddLocalReference(result.GetL()); } static jobject CallNonvirtualObjectMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - JValue result(InvokeWithJValues(ts, obj, mid, args)); - return ts.AddLocalReference(result.GetL()); + ScopedObjectAccess soa(env); + JValue result(InvokeWithJValues(soa, obj, mid, args)); + return soa.AddLocalReference(result.GetL()); } static jboolean CallNonvirtualBooleanMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetZ(); } static jboolean CallNonvirtualBooleanMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetZ(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetZ(); } static jboolean CallNonvirtualBooleanMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetZ(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetZ(); } static jbyte CallNonvirtualByteMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetB(); } static jbyte CallNonvirtualByteMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetB(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetB(); } static jbyte CallNonvirtualByteMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetB(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetB(); } static jchar CallNonvirtualCharMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetC(); } static jchar CallNonvirtualCharMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetC(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetC(); } static jchar CallNonvirtualCharMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetC(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetC(); } static jshort CallNonvirtualShortMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetS(); } static jshort CallNonvirtualShortMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetS(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetS(); } static jshort CallNonvirtualShortMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetS(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetS(); } static jint CallNonvirtualIntMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetI(); } static jint CallNonvirtualIntMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetI(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetI(); } static jint CallNonvirtualIntMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetI(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetI(); } static jlong CallNonvirtualLongMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetJ(); } static jlong CallNonvirtualLongMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetJ(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetJ(); } static jlong CallNonvirtualLongMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetJ(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetJ(); } static jfloat CallNonvirtualFloatMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetF(); } static jfloat CallNonvirtualFloatMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetF(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetF(); } static jfloat CallNonvirtualFloatMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetF(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetF(); } static jdouble CallNonvirtualDoubleMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, obj, mid, ap)); + JValue result(InvokeWithVarArgs(soa, obj, mid, ap)); va_end(ap); return result.GetD(); } static jdouble CallNonvirtualDoubleMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, obj, mid, args).GetD(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, obj, mid, args).GetD(); } static jdouble CallNonvirtualDoubleMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, obj, mid, args).GetD(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, obj, mid, args).GetD(); } static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - InvokeWithVarArgs(ts, obj, mid, ap); + InvokeWithVarArgs(soa, obj, mid, ap); va_end(ap); } static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - InvokeWithVarArgs(ts, obj, mid, args); + ScopedObjectAccess soa(env); + InvokeWithVarArgs(soa, obj, mid, args); } static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - InvokeWithJValues(ts, obj, mid, args); + ScopedObjectAccess soa(env); + InvokeWithJValues(soa, obj, mid, args); } static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) { - ScopedJniThreadState ts(env); - return FindFieldID(ts, c, name, sig, false); + ScopedObjectAccess soa(env); + return FindFieldID(soa, c, name, sig, false); } static jfieldID GetStaticFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) { - ScopedJniThreadState ts(env); - return FindFieldID(ts, c, name, sig, true); + ScopedObjectAccess soa(env); + return FindFieldID(soa, c, name, sig, true); } static jobject GetObjectField(JNIEnv* env, jobject obj, jfieldID fid) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(obj); - Field* f = ts.DecodeField(fid); - return ts.AddLocalReference(f->GetObject(o)); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(obj); + Field* f = soa.DecodeField(fid); + return soa.AddLocalReference(f->GetObject(o)); } static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) { - ScopedJniThreadState ts(env); - Field* f = ts.DecodeField(fid); - return ts.AddLocalReference(f->GetObject(NULL)); + ScopedObjectAccess soa(env); + Field* f = soa.DecodeField(fid); + return soa.AddLocalReference(f->GetObject(NULL)); } static void SetObjectField(JNIEnv* env, jobject java_object, jfieldID fid, jobject java_value) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(java_object); - Object* v = ts.Decode(java_value); - Field* f = ts.DecodeField(fid); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(java_object); + Object* v = soa.Decode(java_value); + Field* f = soa.DecodeField(fid); f->SetObject(o, v); } static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) { - ScopedJniThreadState ts(env); - Object* v = ts.Decode(java_value); - Field* f = ts.DecodeField(fid); + ScopedObjectAccess soa(env); + Object* v = soa.Decode(java_value); + Field* f = soa.DecodeField(fid); f->SetObject(NULL, v); } #define GET_PRIMITIVE_FIELD(fn, instance) \ - ScopedJniThreadState ts(env); \ - Object* o = ts.Decode(instance); \ - Field* f = ts.DecodeField(fid); \ + ScopedObjectAccess soa(env); \ + Object* o = soa.Decode(instance); \ + Field* f = soa.DecodeField(fid); \ return f->fn(o) #define SET_PRIMITIVE_FIELD(fn, instance, value) \ - ScopedJniThreadState ts(env); \ - Object* o = ts.Decode(instance); \ - Field* f = ts.DecodeField(fid); \ + ScopedObjectAccess soa(env); \ + Object* o = soa.Decode(instance); \ + Field* f = soa.DecodeField(fid); \ f->fn(o, value) static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) { @@ -1598,227 +1569,227 @@ class JNI { } static jobject CallStaticObjectMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); - jobject local_result = ts.AddLocalReference(result.GetL()); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); + jobject local_result = soa.AddLocalReference(result.GetL()); va_end(ap); return local_result; } static jobject CallStaticObjectMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - JValue result(InvokeWithVarArgs(ts, NULL, mid, args)); - return ts.AddLocalReference(result.GetL()); + ScopedObjectAccess soa(env); + JValue result(InvokeWithVarArgs(soa, NULL, mid, args)); + return soa.AddLocalReference(result.GetL()); } static jobject CallStaticObjectMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - JValue result(InvokeWithJValues(ts, NULL, mid, args)); - return ts.AddLocalReference(result.GetL()); + ScopedObjectAccess soa(env); + JValue result(InvokeWithJValues(soa, NULL, mid, args)); + return soa.AddLocalReference(result.GetL()); } static jboolean CallStaticBooleanMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetZ(); } static jboolean CallStaticBooleanMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetZ(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetZ(); } static jboolean CallStaticBooleanMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetZ(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetZ(); } static jbyte CallStaticByteMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetB(); } static jbyte CallStaticByteMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetB(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetB(); } static jbyte CallStaticByteMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetB(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetB(); } static jchar CallStaticCharMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetC(); } static jchar CallStaticCharMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetC(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetC(); } static jchar CallStaticCharMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetC(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetC(); } static jshort CallStaticShortMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetS(); } static jshort CallStaticShortMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetS(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetS(); } static jshort CallStaticShortMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetS(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetS(); } static jint CallStaticIntMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetI(); } static jint CallStaticIntMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetI(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetI(); } static jint CallStaticIntMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetI(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetI(); } static jlong CallStaticLongMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetJ(); } static jlong CallStaticLongMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetJ(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetJ(); } static jlong CallStaticLongMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetJ(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetJ(); } static jfloat CallStaticFloatMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetF(); } static jfloat CallStaticFloatMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetF(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetF(); } static jfloat CallStaticFloatMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetF(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetF(); } static jdouble CallStaticDoubleMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - JValue result(InvokeWithVarArgs(ts, NULL, mid, ap)); + JValue result(InvokeWithVarArgs(soa, NULL, mid, ap)); va_end(ap); return result.GetD(); } static jdouble CallStaticDoubleMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - return InvokeWithVarArgs(ts, NULL, mid, args).GetD(); + ScopedObjectAccess soa(env); + return InvokeWithVarArgs(soa, NULL, mid, args).GetD(); } static jdouble CallStaticDoubleMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - return InvokeWithJValues(ts, NULL, mid, args).GetD(); + ScopedObjectAccess soa(env); + return InvokeWithJValues(soa, NULL, mid, args).GetD(); } static void CallStaticVoidMethod(JNIEnv* env, jclass, jmethodID mid, ...) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); va_list ap; va_start(ap, mid); - InvokeWithVarArgs(ts, NULL, mid, ap); + InvokeWithVarArgs(soa, NULL, mid, ap); va_end(ap); } static void CallStaticVoidMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) { - ScopedJniThreadState ts(env); - InvokeWithVarArgs(ts, NULL, mid, args); + ScopedObjectAccess soa(env); + InvokeWithVarArgs(soa, NULL, mid, args); } static void CallStaticVoidMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) { - ScopedJniThreadState ts(env); - InvokeWithJValues(ts, NULL, mid, args); + ScopedObjectAccess soa(env); + InvokeWithJValues(soa, NULL, mid, args); } static jstring NewString(JNIEnv* env, const jchar* chars, jsize char_count) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); String* result = String::AllocFromUtf16(char_count, chars); - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static jstring NewStringUTF(JNIEnv* env, const char* utf) { if (utf == NULL) { return NULL; } - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); String* result = String::AllocFromModifiedUtf8(utf); - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static jsize GetStringLength(JNIEnv* env, jstring java_string) { - ScopedJniThreadState ts(env); - return ts.Decode(java_string)->GetLength(); + ScopedObjectAccess soa(env); + return soa.Decode(java_string)->GetLength(); } static jsize GetStringUTFLength(JNIEnv* env, jstring java_string) { - ScopedJniThreadState ts(env); - return ts.Decode(java_string)->GetUtfLength(); + ScopedObjectAccess soa(env); + return soa.Decode(java_string)->GetUtfLength(); } static void GetStringRegion(JNIEnv* env, jstring java_string, jsize start, jsize length, jchar* buf) { - ScopedJniThreadState ts(env); - String* s = ts.Decode(java_string); + ScopedObjectAccess soa(env); + String* s = soa.Decode(java_string); if (start < 0 || length < 0 || start + length > s->GetLength()) { - ThrowSIOOBE(ts, start, length, s->GetLength()); + ThrowSIOOBE(soa, start, length, s->GetLength()); } else { const jchar* chars = s->GetCharArray()->GetData() + s->GetOffset(); memcpy(buf, chars + start, length * sizeof(jchar)); @@ -1826,10 +1797,10 @@ class JNI { } static void GetStringUTFRegion(JNIEnv* env, jstring java_string, jsize start, jsize length, char* buf) { - ScopedJniThreadState ts(env); - String* s = ts.Decode(java_string); + ScopedObjectAccess soa(env); + String* s = soa.Decode(java_string); if (start < 0 || length < 0 || start + length > s->GetLength()) { - ThrowSIOOBE(ts, start, length, s->GetLength()); + ThrowSIOOBE(soa, start, length, s->GetLength()); } else { const jchar* chars = s->GetCharArray()->GetData() + s->GetOffset(); ConvertUtf16ToModifiedUtf8(buf, chars + start, length); @@ -1837,10 +1808,10 @@ class JNI { } static const jchar* GetStringChars(JNIEnv* env, jstring java_string, jboolean* is_copy) { - ScopedJniThreadState ts(env); - String* s = ts.Decode(java_string); + ScopedObjectAccess soa(env); + String* s = soa.Decode(java_string); const CharArray* chars = s->GetCharArray(); - PinPrimitiveArray(ts, chars); + PinPrimitiveArray(soa, chars); if (is_copy != NULL) { *is_copy = JNI_FALSE; } @@ -1848,29 +1819,29 @@ class JNI { } static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar*) { - ScopedJniThreadState ts(env); - UnpinPrimitiveArray(ts, ts.Decode(java_string)->GetCharArray()); + ScopedObjectAccess soa(env); + UnpinPrimitiveArray(soa, soa.Decode(java_string)->GetCharArray()); } static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* is_copy) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); return GetStringChars(env, java_string, is_copy); } static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); return ReleaseStringChars(env, java_string, chars); } static const char* GetStringUTFChars(JNIEnv* env, jstring java_string, jboolean* is_copy) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); if (java_string == NULL) { return NULL; } if (is_copy != NULL) { *is_copy = JNI_TRUE; } - String* s = ts.Decode(java_string); + String* s = soa.Decode(java_string); size_t byte_count = s->GetUtfLength(); char* bytes = new char[byte_count + 1]; CHECK(bytes != NULL); // bionic aborts anyway. @@ -1881,73 +1852,73 @@ class JNI { } static void ReleaseStringUTFChars(JNIEnv* env, jstring, const char* chars) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); delete[] chars; } static jsize GetArrayLength(JNIEnv* env, jarray java_array) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(java_array); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(java_array); CHECK(obj->IsArrayInstance()); // TODO: ReportJniError Array* array = obj->AsArray(); return array->GetLength(); } static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index) { - ScopedJniThreadState ts(env); - ObjectArray* array = ts.Decode*>(java_array); - return ts.AddLocalReference(array->Get(index)); + ScopedObjectAccess soa(env); + ObjectArray* array = soa.Decode*>(java_array); + return soa.AddLocalReference(array->Get(index)); } static void SetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index, jobject java_value) { - ScopedJniThreadState ts(env); - ObjectArray* array = ts.Decode*>(java_array); - Object* value = ts.Decode(java_value); + ScopedObjectAccess soa(env); + ObjectArray* array = soa.Decode*>(java_array); + Object* value = soa.Decode(java_value); array->Set(index, value); } static jbooleanArray NewBooleanArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jbyteArray NewByteArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jcharArray NewCharArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jdoubleArray NewDoubleArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jfloatArray NewFloatArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jintArray NewIntArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jlongArray NewLongArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_jclass, jobject initial_element) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); CHECK_GE(length, 0); // TODO: ReportJniError // Compute the array class corresponding to the given element class. - Class* element_class = ts.Decode(element_jclass); + Class* element_class = soa.Decode(element_jclass); std::string descriptor; descriptor += "["; descriptor += ClassHelper(element_class).GetDescriptor(); @@ -1959,26 +1930,26 @@ class JNI { } // Allocate and initialize if necessary. - Class* array_class = ts.Decode(java_array_class.get()); + Class* array_class = soa.Decode(java_array_class.get()); ObjectArray* result = ObjectArray::Alloc(array_class, length); if (initial_element != NULL) { - Object* initial_object = ts.Decode(initial_element); + Object* initial_object = soa.Decode(initial_element); for (jsize i = 0; i < length; ++i) { result->Set(i, initial_object); } } - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static jshortArray NewShortArray(JNIEnv* env, jsize length) { - ScopedJniThreadState ts(env); - return NewPrimitiveArray(ts, length); + ScopedObjectAccess soa(env); + return NewPrimitiveArray(soa, length); } static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray java_array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - Array* array = ts.Decode(java_array); - PinPrimitiveArray(ts, array); + ScopedObjectAccess soa(env); + Array* array = soa.Decode(java_array); + PinPrimitiveArray(soa, array); if (is_copy != NULL) { *is_copy = JNI_FALSE; } @@ -1986,173 +1957,173 @@ class JNI { } static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static jboolean* GetBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jbyte* GetByteArrayElements(JNIEnv* env, jbyteArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jchar* GetCharArrayElements(JNIEnv* env, jcharArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jdouble* GetDoubleArrayElements(JNIEnv* env, jdoubleArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jfloat* GetFloatArrayElements(JNIEnv* env, jfloatArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jint* GetIntArrayElements(JNIEnv* env, jintArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jlong* GetLongArrayElements(JNIEnv* env, jlongArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static jshort* GetShortArrayElements(JNIEnv* env, jshortArray array, jboolean* is_copy) { - ScopedJniThreadState ts(env); - return GetPrimitiveArray(ts, array, is_copy); + ScopedObjectAccess soa(env); + return GetPrimitiveArray(soa, array, is_copy); } static void ReleaseBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseByteArrayElements(JNIEnv* env, jbyteArray array, jbyte*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseCharArrayElements(JNIEnv* env, jcharArray array, jchar*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseDoubleArrayElements(JNIEnv* env, jdoubleArray array, jdouble*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseFloatArrayElements(JNIEnv* env, jfloatArray array, jfloat*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseIntArrayElements(JNIEnv* env, jintArray array, jint*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseLongArrayElements(JNIEnv* env, jlongArray array, jlong*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void ReleaseShortArrayElements(JNIEnv* env, jshortArray array, jshort*, jint mode) { - ScopedJniThreadState ts(env); - ReleasePrimitiveArray(ts, array, mode); + ScopedObjectAccess soa(env); + ReleasePrimitiveArray(soa, array, mode); } static void GetBooleanArrayRegion(JNIEnv* env, jbooleanArray array, jsize start, jsize length, jboolean* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetByteArrayRegion(JNIEnv* env, jbyteArray array, jsize start, jsize length, jbyte* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetCharArrayRegion(JNIEnv* env, jcharArray array, jsize start, jsize length, jchar* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetDoubleArrayRegion(JNIEnv* env, jdoubleArray array, jsize start, jsize length, jdouble* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetFloatArrayRegion(JNIEnv* env, jfloatArray array, jsize start, jsize length, jfloat* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetIntArrayRegion(JNIEnv* env, jintArray array, jsize start, jsize length, jint* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetLongArrayRegion(JNIEnv* env, jlongArray array, jsize start, jsize length, jlong* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void GetShortArrayRegion(JNIEnv* env, jshortArray array, jsize start, jsize length, jshort* buf) { - ScopedJniThreadState ts(env); - GetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + GetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetBooleanArrayRegion(JNIEnv* env, jbooleanArray array, jsize start, jsize length, const jboolean* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetByteArrayRegion(JNIEnv* env, jbyteArray array, jsize start, jsize length, const jbyte* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetCharArrayRegion(JNIEnv* env, jcharArray array, jsize start, jsize length, const jchar* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetDoubleArrayRegion(JNIEnv* env, jdoubleArray array, jsize start, jsize length, const jdouble* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetFloatArrayRegion(JNIEnv* env, jfloatArray array, jsize start, jsize length, const jfloat* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetIntArrayRegion(JNIEnv* env, jintArray array, jsize start, jsize length, const jint* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetLongArrayRegion(JNIEnv* env, jlongArray array, jsize start, jsize length, const jlong* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static void SetShortArrayRegion(JNIEnv* env, jshortArray array, jsize start, jsize length, const jshort* buf) { - ScopedJniThreadState ts(env); - SetPrimitiveArrayRegion(ts, array, start, length, buf); + ScopedObjectAccess soa(env); + SetPrimitiveArrayRegion(soa, array, start, length, buf); } static jint RegisterNatives(JNIEnv* env, jclass java_class, const JNINativeMethod* methods, jint method_count) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(java_class); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(java_class); for (int i = 0; i < method_count; i++) { const char* name = methods[i].name; @@ -2169,67 +2140,69 @@ class JNI { } if (m == NULL) { LOG(INFO) << "Failed to register native method " << name << sig; - ThrowNoSuchMethodError(ts, c, name, sig, "static or non-static"); + ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static"); return JNI_ERR; } else if (!m->IsNative()) { LOG(INFO) << "Failed to register non-native method " << name << sig << " as native"; - ThrowNoSuchMethodError(ts, c, name, sig, "native"); + ThrowNoSuchMethodError(soa, c, name, sig, "native"); return JNI_ERR; } VLOG(jni) << "[Registering JNI native method " << PrettyMethod(m) << "]"; - m->RegisterNative(ts.Self(), methods[i].fnPtr); + m->RegisterNative(soa.Self(), methods[i].fnPtr); } return JNI_OK; } static jint UnregisterNatives(JNIEnv* env, jclass java_class) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(java_class); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(java_class); VLOG(jni) << "[Unregistering JNI native methods for " << PrettyClass(c) << "]"; for (size_t i = 0; i < c->NumDirectMethods(); ++i) { Method* m = c->GetDirectMethod(i); if (m->IsNative()) { - m->UnregisterNative(ts.Self()); + m->UnregisterNative(soa.Self()); } } for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { Method* m = c->GetVirtualMethod(i); if (m->IsNative()) { - m->UnregisterNative(ts.Self()); + m->UnregisterNative(soa.Self()); } } return JNI_OK; } - static jint MonitorEnter(JNIEnv* env, jobject java_object) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(java_object); - o->MonitorEnter(ts.Self()); - if (ts.Self()->IsExceptionPending()) { + static jint MonitorEnter(JNIEnv* env, jobject java_object) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + ScopedObjectAccess soa(env); + Object* o = soa.Decode(java_object); + o->MonitorEnter(soa.Self()); + if (soa.Self()->IsExceptionPending()) { return JNI_ERR; } - ts.Env()->monitors.Add(o); + soa.Env()->monitors.Add(o); return JNI_OK; } - static jint MonitorExit(JNIEnv* env, jobject java_object) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(java_object); - o->MonitorExit(ts.Self()); - if (ts.Self()->IsExceptionPending()) { + static jint MonitorExit(JNIEnv* env, jobject java_object) + UNLOCK_FUNCTION(monitor_lock_) { + ScopedObjectAccess soa(env); + Object* o = soa.Decode(java_object); + o->MonitorExit(soa.Self()); + if (soa.Self()->IsExceptionPending()) { return JNI_ERR; } - ts.Env()->monitors.Remove(o); + soa.Env()->monitors.Remove(o); return JNI_OK; } static jint GetJavaVM(JNIEnv* env, JavaVM** vm) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); Runtime* runtime = Runtime::Current(); if (runtime != NULL) { *vm = runtime->GetJavaVM(); @@ -2240,13 +2213,13 @@ class JNI { } static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); // The address may not be NULL, and the capacity must be > 0. CHECK(address != NULL); // TODO: ReportJniError CHECK_GT(capacity, 0); // TODO: ReportJniError - // At the moment, the Java side is limited to 32 bits. + // At the moment, the Java side is limited to 32 bisoa. CHECK_LE(reinterpret_cast(address), 0xffffffff); CHECK_LE(capacity, 0xffffffff); jint address_arg = reinterpret_cast(address); @@ -2255,21 +2228,21 @@ class JNI { jobject result = env->NewObject(WellKnownClasses::java_nio_ReadWriteDirectByteBuffer, WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_init, address_arg, capacity_arg); - return ts.Self()->IsExceptionPending() ? NULL : result; + return soa.Self()->IsExceptionPending() ? NULL : result; } static void* GetDirectBufferAddress(JNIEnv* env, jobject java_buffer) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); return reinterpret_cast(env->GetIntField(java_buffer, WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_effectiveDirectAddress)); } static jlong GetDirectBufferCapacity(JNIEnv* env, jobject java_buffer) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); return static_cast(env->GetIntField(java_buffer, WellKnownClasses::java_nio_ReadWriteDirectByteBuffer_capacity)); } static jobjectRefType GetObjectRefType(JNIEnv* env, jobject java_object) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); CHECK(java_object != NULL); // TODO: ReportJniError @@ -2278,7 +2251,7 @@ class JNI { IndirectRefKind kind = GetIndirectRefKind(ref); switch (kind) { case kLocal: - if (ts.Env()->locals.Get(ref) != kInvalidIndirectRefObject) { + if (soa.Env()->locals.Get(ref) != kInvalidIndirectRefObject) { return JNILocalRefType; } return JNIInvalidRefType; @@ -2288,18 +2261,18 @@ class JNI { return JNIWeakGlobalRefType; case kSirtOrInvalid: // Is it in a stack IRT? - if (ts.Self()->SirtContains(java_object)) { + if (soa.Self()->SirtContains(java_object)) { return JNILocalRefType; } - if (!ts.Vm()->work_around_app_jni_bugs) { + if (!soa.Vm()->work_around_app_jni_bugs) { return JNIInvalidRefType; } // If we're handing out direct pointers, check whether it's a direct pointer // to a local reference. - if (ts.Decode(java_object) == reinterpret_cast(java_object)) { - if (ts.Env()->locals.ContainsDirectPointer(reinterpret_cast(java_object))) { + if (soa.Decode(java_object) == reinterpret_cast(java_object)) { + if (soa.Env()->locals.ContainsDirectPointer(reinterpret_cast(java_object))) { return JNILocalRefType; } } @@ -2309,6 +2282,80 @@ class JNI { LOG(FATAL) << "IndirectRefKind[" << kind << "]"; return JNIInvalidRefType; } + + private: + static jint EnsureLocalCapacity(const ScopedObjectAccess& soa, jint desired_capacity, + const char* caller) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + // TODO: we should try to expand the table if necessary. + if (desired_capacity < 1 || desired_capacity > static_cast(kLocalsMax)) { + LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; + return JNI_ERR; + } + // TODO: this isn't quite right, since "capacity" includes holes. + size_t capacity = soa.Env()->locals.Capacity(); + bool okay = (static_cast(kLocalsMax - capacity) >= desired_capacity); + if (!okay) { + soa.Self()->ThrowOutOfMemoryError(caller); + } + return okay ? JNI_OK : JNI_ERR; + } + + template + static JniT NewPrimitiveArray(const ScopedObjectAccess& soa, jsize length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + CHECK_GE(length, 0); // TODO: ReportJniError + ArtT* result = ArtT::Alloc(length); + return soa.AddLocalReference(result); + } + + template + static CArrayT GetPrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array, + jboolean* is_copy) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ArtArrayT* array = soa.Decode(java_array); + PinPrimitiveArray(soa, array); + if (is_copy != NULL) { + *is_copy = JNI_FALSE; + } + return array->GetData(); + } + + template + static void ReleasePrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array, + jint mode) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + if (mode != JNI_COMMIT) { + Array* array = soa.Decode(java_array); + UnpinPrimitiveArray(soa, array); + } + } + + template + static void GetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array, + jsize start, jsize length, JavaT* buf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ArrayT* array = soa.Decode(java_array); + if (start < 0 || length < 0 || start + length > array->GetLength()) { + ThrowAIOOBE(soa, array, start, length, "src"); + } else { + JavaT* data = array->GetData(); + memcpy(buf, data + start, length * sizeof(JavaT)); + } + } + + template + static void SetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array, + jsize start, jsize length, const JavaT* buf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ArrayT* array = soa.Decode(java_array); + if (start < 0 || length < 0 || start + length > array->GetLength()) { + ThrowAIOOBE(soa, array, start, length, "dst"); + } else { + JavaT* data = array->GetData(); + memcpy(data + start, buf, length * sizeof(JavaT)); + } + } }; const JNINativeInterface gJniNativeInterface = { @@ -2603,10 +2650,10 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, void** p_env, void* vm_args) { options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo)); } bool ignore_unrecognized = args->ignoreUnrecognized; - Runtime* runtime = Runtime::Create(options, ignore_unrecognized); - if (runtime == NULL) { + if (!Runtime::Create(options, ignore_unrecognized)) { return JNI_ERR; } + Runtime* runtime = Runtime::Current(); runtime->Start(); *p_env = Thread::Current()->GetJniEnv(); *p_vm = runtime->GetJavaVM(); @@ -2700,7 +2747,7 @@ JavaVMExt::JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options) globals(gGlobalsInitial, gGlobalsMax, kGlobal), weak_globals_lock("JNI weak global reference table lock"), weak_globals(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal), - libraries_lock("JNI shared libraries map lock"), + libraries_lock("JNI shared libraries map lock", kLoadLibraryLock), libraries(new Libraries) { functions = unchecked_functions = &gJniInvokeInterface; if (options->check_jni_) { @@ -2760,7 +2807,8 @@ void JavaVMExt::DumpReferenceTables(std::ostream& os) { } } -bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, std::string& detail) { +bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, + std::string& detail) { detail.clear(); // See if we've already loaded this library. If we have, and the class loader @@ -2815,18 +2863,18 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_lo // - write a trivial app that calls sleep() then dlopen(), attach // to it with "strace -p " while it sleeps, and watch for // attempts to open nonexistent dependent shared libs - // TODO: automate some of these checks! + // Below we dlopen but there is no paired dlclose, this would be necessary if we supported + // class unloading. Libraries will only be unloaded when the reference count (incremented by + // dlopen) becomes zero from dlclose. + // This can execute slowly for a large library on a busy system, so we - // want to switch from kRunnable to kVmWait while it executes. This allows - // the GC to ignore us. + // want to switch from kRunnable while it executes. This allows the GC to ignore us. Thread* self = Thread::Current(); - void* handle = NULL; - { - ScopedThreadStateChange tsc(self, kVmWait); - handle = dlopen(path.empty() ? NULL : path.c_str(), RTLD_LAZY); - } + self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad); + void* handle = dlopen(path.empty() ? NULL : path.c_str(), RTLD_LAZY); + self->TransitionFromSuspendedToRunnable(); VLOG(jni) << "[Call to dlopen(\"" << path << "\") returned " << handle << "]"; @@ -2836,17 +2884,21 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_lo } // Create a new entry. + // TODO: move the locking (and more of this logic) into Libraries. + bool created_library = false; { - // TODO: move the locking (and more of this logic) into Libraries. MutexLock mu(libraries_lock); library = libraries->Get(path); - if (library != NULL) { - LOG(INFO) << "WOW: we lost a race to add shared library: " - << "\"" << path << "\" ClassLoader=" << class_loader; - return library->CheckOnLoadResult(); + if (library == NULL) { // We won race to get libraries_lock + library = new SharedLibrary(path, handle, class_loader); + libraries->Put(path, library); + created_library = true; } - library = new SharedLibrary(path, handle, class_loader); - libraries->Put(path, library); + } + if (!created_library) { + LOG(INFO) << "WOW: we lost a race to add shared library: " + << "\"" << path << "\" ClassLoader=" << class_loader; + return library->CheckOnLoadResult(); } VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]"; diff --git a/src/jni_internal.h b/src/jni_internal.h index b96a4d769b..fad06e1bec 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -43,17 +43,20 @@ class Field; union JValue; class Libraries; class Method; -class ScopedJniThreadState; +class ScopedObjectAccess; class Thread; void SetJniGlobalsMax(size_t max); void JniAbortF(const char* jni_function_name, const char* fmt, ...); void* FindNativeMethod(Thread* thread); -void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, size_t method_count); +void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, + size_t method_count); size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len); -JValue InvokeWithJValues(const ScopedJniThreadState&, jobject obj, jmethodID mid, jvalue* args); -JValue InvokeWithJValues(const ScopedJniThreadState&, Object* receiver, Method* m, JValue* args); +JValue InvokeWithJValues(const ScopedObjectAccess&, jobject obj, jmethodID mid, jvalue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +JValue InvokeWithJValues(const ScopedObjectAccess&, Object* receiver, Method* m, JValue* args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause); @@ -67,17 +70,20 @@ struct JavaVMExt : public JavaVM { * Returns 'true' on success. On failure, sets 'detail' to a * human-readable description of the error. */ - bool LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, std::string& detail); + bool LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, std::string& detail) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /** * Returns a pointer to the code for the native method 'm', found * using dlsym(3) on every native library that's been loaded so far. */ - void* FindCodeForNativeMethod(Method* m); + void* FindCodeForNativeMethod(Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void DumpForSigQuit(std::ostream& os); - void DumpReferenceTables(std::ostream& os); + void DumpReferenceTables(std::ostream& os) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void SetCheckJniEnabled(bool enabled); @@ -100,18 +106,18 @@ struct JavaVMExt : public JavaVM { bool work_around_app_jni_bugs; // Used to hold references to pinned primitive arrays. - Mutex pins_lock; + Mutex pins_lock DEFAULT_MUTEX_ACQUIRED_AFTER; ReferenceTable pin_table GUARDED_BY(pins_lock); // JNI global references. - Mutex globals_lock; + Mutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER; IndirectReferenceTable globals GUARDED_BY(globals_lock); // JNI weak global references. - Mutex weak_globals_lock; + Mutex weak_globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER; IndirectReferenceTable weak_globals GUARDED_BY(weak_globals_lock); - Mutex libraries_lock; + Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER; Libraries* libraries GUARDED_BY(libraries_lock); // Used by -Xcheck:jni. @@ -122,7 +128,8 @@ struct JNIEnvExt : public JNIEnv { JNIEnvExt(Thread* self, JavaVMExt* vm); ~JNIEnvExt(); - void DumpReferenceTables(std::ostream& os); + void DumpReferenceTables(std::ostream& os) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void SetCheckJniEnabled(bool enabled); diff --git a/src/jni_internal_test.cc b/src/jni_internal_test.cc index daca1b540c..64461b010f 100644 --- a/src/jni_internal_test.cc +++ b/src/jni_internal_test.cc @@ -20,7 +20,6 @@ #include "common_test.h" #include "ScopedLocalRef.h" -#include "scoped_jni_thread_state.h" namespace art { @@ -69,9 +68,12 @@ class JniInternalTest : public CommonTest { CommonTest::TearDown(); } - Method::InvokeStub* DoCompile(Method*& method, Object*& receiver, bool is_static, const char* method_name, const char* method_signature) { + Method::InvokeStub* DoCompile(Method*& method, Object*& receiver, bool is_static, + const char* method_name, const char* method_signature) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods"; - SirtRef class_loader(LoadDex(class_name)); + jobject jclass_loader(LoadDex(class_name)); + SirtRef class_loader(ScopedObjectAccessUnchecked(Thread::Current()).Decode(jclass_loader)); if (is_static) { CompileDirectMethod(class_loader.get(), class_name, method_name, method_signature); } else { @@ -83,7 +85,8 @@ class JniInternalTest : public CommonTest { Class* c = class_linker_->FindClass(DotToDescriptor(class_name).c_str(), class_loader.get()); CHECK(c != NULL); - method = is_static ? c->FindDirectMethod(method_name, method_signature) : c->FindVirtualMethod(method_name, method_signature); + method = is_static ? c->FindDirectMethod(method_name, method_signature) + : c->FindVirtualMethod(method_name, method_signature); CHECK(method != NULL); receiver = (is_static ? NULL : c->AllocObject()); @@ -94,14 +97,15 @@ class JniInternalTest : public CommonTest { return stub; } - void InvokeNopMethod(bool is_static) { + void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "nop", "()V"); (*stub)(method, receiver, Thread::Current(), NULL, NULL); } - void InvokeIdentityByteMethod(bool is_static) { + void InvokeIdentityByteMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(B)B"); @@ -130,7 +134,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(SCHAR_MIN, result.GetB()); } - void InvokeIdentityIntMethod(bool is_static) { + void InvokeIdentityIntMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(I)I"); @@ -159,7 +164,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(INT_MIN, result.GetI()); } - void InvokeIdentityDoubleMethod(bool is_static) { + void InvokeIdentityDoubleMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(D)D"); @@ -188,7 +194,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(DBL_MIN, result.GetD()); } - void InvokeSumIntIntMethod(bool is_static) { + void InvokeSumIntIntMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(II)I"); @@ -226,7 +233,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(-2, result.GetI()); } - void InvokeSumIntIntIntMethod(bool is_static) { + void InvokeSumIntIntIntMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(III)I"); @@ -269,7 +277,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(2147483645, result.GetI()); } - void InvokeSumIntIntIntIntMethod(bool is_static) { + void InvokeSumIntIntIntIntMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIII)I"); @@ -317,7 +326,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(-4, result.GetI()); } - void InvokeSumIntIntIntIntIntMethod(bool is_static) { + void InvokeSumIntIntIntIntIntMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIIII)I"); @@ -370,7 +380,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(2147483643, result.GetI()); } - void InvokeSumDoubleDoubleMethod(bool is_static) { + void InvokeSumDoubleDoubleMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DD)D"); @@ -409,7 +420,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(INFINITY, result.GetD()); } - void InvokeSumDoubleDoubleDoubleMethod(bool is_static) { + void InvokeSumDoubleDoubleDoubleMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDD)D"); @@ -439,7 +451,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(2.0, result.GetD()); } - void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) { + void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDD)D"); @@ -472,7 +485,8 @@ class JniInternalTest : public CommonTest { EXPECT_EQ(-2.0, result.GetD()); } - void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) { + void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDDD)D"); @@ -1143,7 +1157,8 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) { #if !defined(ART_USE_LLVM_COMPILER) TEST_F(JniInternalTest, GetPrimitiveField_SetPrimitiveField) { - SirtRef class_loader(LoadDex("AllFields")); + Thread::Current()->TransitionFromSuspendedToRunnable(); + LoadDex("AllFields"); runtime_->Start(); jclass c = env_->FindClass("AllFields"); @@ -1171,7 +1186,8 @@ TEST_F(JniInternalTest, GetPrimitiveField_SetPrimitiveField) { } TEST_F(JniInternalTest, GetObjectField_SetObjectField) { - SirtRef class_loader(LoadDex("AllFields")); + Thread::Current()->TransitionFromSuspendedToRunnable(); + LoadDex("AllFields"); runtime_->Start(); jclass c = env_->FindClass("AllFields"); @@ -1228,7 +1244,7 @@ TEST_F(JniInternalTest, DeleteLocalRef) { { CheckJniAbortCatcher check_jni_abort_catcher; env_->DeleteLocalRef(s); - check_jni_abort_catcher.Check("native code passing in reference to invalid local reference: 0x200001"); + check_jni_abort_catcher.Check("native code passing in reference to invalid local reference: 0x1400001"); } s = env_->NewStringUTF(""); @@ -1246,7 +1262,7 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) { jobject outer; jobject inner1, inner2; - ScopedJniThreadState ts(env_); + ScopedObjectAccess soa(env_); Object* inner2_direct_pointer; { env_->PushLocalFrame(4); @@ -1256,7 +1272,7 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) { env_->PushLocalFrame(4); inner1 = env_->NewLocalRef(outer); inner2 = env_->NewStringUTF("survivor"); - inner2_direct_pointer = ts.Decode(inner2); + inner2_direct_pointer = soa.Decode(inner2); env_->PopLocalFrame(inner2); } @@ -1309,7 +1325,7 @@ TEST_F(JniInternalTest, DeleteGlobalRef) { { CheckJniAbortCatcher check_jni_abort_catcher; env_->DeleteGlobalRef(o); - check_jni_abort_catcher.Check("native code passing in reference to invalid global reference: 0x10000e"); + check_jni_abort_catcher.Check("native code passing in reference to invalid global reference: 0x100056"); } jobject o1 = env_->NewGlobalRef(s); @@ -1364,7 +1380,9 @@ TEST_F(JniInternalTest, DeleteWeakGlobalRef) { } TEST_F(JniInternalTest, StaticMainMethod) { - SirtRef class_loader(LoadDex("Main")); + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("Main"); + SirtRef class_loader(soa.Decode(jclass_loader)); CompileDirectMethod(class_loader.get(), "Main", "main", "([Ljava/lang/String;)V"); Class* klass = class_linker_->FindClass("LMain;", class_loader.get()); @@ -1382,98 +1400,122 @@ TEST_F(JniInternalTest, StaticMainMethod) { } TEST_F(JniInternalTest, StaticNopMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeNopMethod(true); } TEST_F(JniInternalTest, NonStaticNopMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeNopMethod(false); } TEST_F(JniInternalTest, StaticIdentityByteMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeIdentityByteMethod(true); } TEST_F(JniInternalTest, NonStaticIdentityByteMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeIdentityByteMethod(false); } TEST_F(JniInternalTest, StaticIdentityIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeIdentityIntMethod(true); } TEST_F(JniInternalTest, NonStaticIdentityIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeIdentityIntMethod(false); } TEST_F(JniInternalTest, StaticIdentityDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeIdentityDoubleMethod(true); } TEST_F(JniInternalTest, NonStaticIdentityDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeIdentityDoubleMethod(false); } TEST_F(JniInternalTest, StaticSumIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntMethod(true); } TEST_F(JniInternalTest, NonStaticSumIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntMethod(false); } TEST_F(JniInternalTest, StaticSumIntIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntIntMethod(true); } TEST_F(JniInternalTest, NonStaticSumIntIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntIntMethod(false); } TEST_F(JniInternalTest, StaticSumIntIntIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntIntIntMethod(true); } TEST_F(JniInternalTest, NonStaticSumIntIntIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntIntIntMethod(false); } TEST_F(JniInternalTest, StaticSumIntIntIntIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntIntIntIntMethod(true); } TEST_F(JniInternalTest, NonStaticSumIntIntIntIntIntMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumIntIntIntIntIntMethod(false); } TEST_F(JniInternalTest, StaticSumDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleMethod(true); } TEST_F(JniInternalTest, NonStaticSumDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleMethod(false); } TEST_F(JniInternalTest, StaticSumDoubleDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleDoubleMethod(true); } TEST_F(JniInternalTest, NonStaticSumDoubleDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleDoubleMethod(false); } TEST_F(JniInternalTest, StaticSumDoubleDoubleDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleDoubleDoubleMethod(true); } TEST_F(JniInternalTest, NonStaticSumDoubleDoubleDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleDoubleDoubleMethod(false); } TEST_F(JniInternalTest, StaticSumDoubleDoubleDoubleDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(true); } TEST_F(JniInternalTest, NonStaticSumDoubleDoubleDoubleDoubleDoubleMethod) { + ScopedObjectAccess soa(Thread::Current()); InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(false); } diff --git a/src/jobject_comparator.cc b/src/jobject_comparator.cc new file mode 100644 index 0000000000..edd072738a --- /dev/null +++ b/src/jobject_comparator.cc @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jobject_comparator.h" + +#include "object.h" +#include "scoped_thread_state_change.h" + +namespace art { + +bool JobjectComparator::operator()(jobject jobj1, jobject jobj2) const { + // Ensure null references and cleared jweaks appear at the end. + if (jobj1 == NULL) { + return true; + } else if (jobj2 == NULL) { + return false; + } + ScopedObjectAccess soa(Thread::Current()); + Object* obj1 = soa.Decode(jobj1); + Object* obj2 = soa.Decode(jobj2); + if (obj1 == NULL) { + return true; + } else if (obj2 == NULL) { + return false; + } + // Sort by class... + if (obj1->GetClass() != obj2->GetClass()) { + return obj1->GetClass()->IdentityHashCode() < obj2->IdentityHashCode(); + } else { + // ...then by size... + size_t count1 = obj1->SizeOf(); + size_t count2 = obj2->SizeOf(); + if (count1 != count2) { + return count1 < count2; + } else { + // ...and finally by identity hash code. + return obj1->IdentityHashCode() < obj2->IdentityHashCode(); + } + } +} + +} // namespace art diff --git a/src/jobject_comparator.h b/src/jobject_comparator.h new file mode 100644 index 0000000000..17098aaebb --- /dev/null +++ b/src/jobject_comparator.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_JOBJECT_COMPARATOR_H_ +#define ART_SRC_JOBJECT_COMPARATOR_H_ + +#include + +namespace art { + +struct JobjectComparator { + bool operator()(jobject jobj1, jobject jobj2) const; +}; + +} // namespace art + +#endif // ART_SRC_JOBJECT_COMPARATOR_H_ diff --git a/src/logging.cc b/src/logging.cc index 30063a17dd..712c02bcd7 100644 --- a/src/logging.cc +++ b/src/logging.cc @@ -29,11 +29,6 @@ static std::string* gCmdLine; static std::string* gProgramInvocationName; static std::string* gProgramInvocationShortName; -static Mutex& GetLoggingLock() { - static Mutex logging_lock("LogMessage lock"); - return logging_lock; -} - const char* GetCmdLine() { return (gCmdLine != NULL) ? gCmdLine->c_str() : NULL; } @@ -55,6 +50,9 @@ const char* ProgramInvocationShortName() { // and a letter indicating the minimum priority level we're expected to log. // This can be used to reveal or conceal logs with specific tags. void InitLogging(char* argv[]) { + // TODO: Move this to a more obvious InitART... + GlobalSynchronization::Init(); + // Stash the command line for later use. We can use /proc/self/cmdline on Linux to recover this, // but we don't have that luxury on the Mac, and there are a couple of argv[0] variants that are // commonly used. @@ -106,7 +104,7 @@ LogMessage::~LogMessage() { // Do the actual logging with the lock held. { - MutexLock mu(GetLoggingLock()); + MutexLock mu(*GlobalSynchronization::logging_lock_); if (msg.find('\n') == std::string::npos) { LogLine(msg.c_str()); } else { diff --git a/src/logging.h b/src/logging.h index 94dd2c6f1d..ce86a72cbb 100644 --- a/src/logging.h +++ b/src/logging.h @@ -187,7 +187,7 @@ struct LogMessageData { class LogMessage { public: LogMessage(const char* file, int line, LogSeverity severity, int error); - ~LogMessage(); + ~LogMessage() LOCKS_EXCLUDED(GlobalSynchronization::logging_lock_); std::ostream& stream(); private: @@ -249,6 +249,35 @@ std::ostream& operator<<(std::ostream& os, const Dumpable& rhs) { return os; } +template +class MutatorLockedDumpable { + public: + explicit MutatorLockedDumpable(T& value) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : value_(value) { + } + + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + value_.Dump(os); + } + + private: + T& value_; + +// TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*. +#if GCC_VERSION >= 40300 + DISALLOW_COPY_AND_ASSIGN(MutatorLockedDumpable); +#endif +}; + +template +std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable& rhs) +// TODO: should be SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) however annotalysis +// currently fails for this. + NO_THREAD_SAFETY_ANALYSIS { + rhs.Dump(os); + return os; +} + // Helps you use operator<< in a const char*-like context such as our various 'F' methods with // format strings. template diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc index 7adc34436f..227614dd5e 100644 --- a/src/mark_sweep.cc +++ b/src/mark_sweep.cc @@ -30,7 +30,6 @@ #include "monitor.h" #include "object.h" #include "runtime.h" -#include "scoped_heap_lock.h" #include "space.h" #include "timing_logger.h" #include "thread.h" @@ -133,7 +132,9 @@ class CheckObjectVisitor { } - void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const { + void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { mark_sweep_->CheckReference(obj, ref, offset, is_static); } @@ -171,10 +172,11 @@ void MarkSweep::CopyMarkBits() { class ScanImageRootVisitor { public: ScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) { - } - void operator ()(const Object* root) const { + void operator ()(const Object* root) const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(root != NULL); mark_sweep_->ScanObject(root); } @@ -225,7 +227,9 @@ class CheckBitmapVisitor { } - void operator ()(const Object* obj) const { + void operator ()(const Object* obj) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { DCHECK(obj != NULL); mark_sweep_->CheckObject(obj); } @@ -322,7 +326,7 @@ struct SweepCallbackContext { }; void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { - ScopedHeapLock lock; + GlobalSynchronization::heap_bitmap_lock_->AssertExclusiveHeld(); size_t freed_objects = num_ptrs; size_t freed_bytes = 0; @@ -348,11 +352,12 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { space->Free(obj); } } - heap->RecordFreeLocked(freed_objects, freed_bytes); + heap->RecordFree(freed_objects, freed_bytes); } void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { - ScopedHeapLock lock; + GlobalSynchronization::heap_bitmap_lock_->AssertExclusiveHeld(); + SweepCallbackContext* context = static_cast(arg); Heap* heap = context->heap; // We don't free any actual memory to avoid dirtying the shared zygote pages. diff --git a/src/mark_sweep.h b/src/mark_sweep.h index 189462f5d3..bb48b7a3da 100644 --- a/src/mark_sweep.h +++ b/src/mark_sweep.h @@ -44,36 +44,46 @@ class MarkSweep { void Init(); // Marks the root set at the start of a garbage collection. - void MarkRoots(); + void MarkRoots() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Marks the roots in the image space on dirty cards. - void ScanDirtyImageRoots(); + void ScanDirtyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Verify that image roots point to only marked objects within the alloc space. - void VerifyImageRoots(); + void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); bool IsMarkStackEmpty() const { return mark_stack_->IsEmpty(); } // Builds a mark stack and recursively mark until it empties. - void RecursiveMark(bool partial); + void RecursiveMark(bool partial) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Copies mark bits from live bitmap of zygote space to mark bitmap for partial GCs. void CopyMarkBits(); // Builds a mark stack with objects on dirty cards and recursively mark // until it empties. - void RecursiveMarkDirtyObjects(); + void RecursiveMarkDirtyObjects() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Remarks the root set after completing the concurrent mark. - void ReMarkRoots(); + void ReMarkRoots() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); Heap* GetHeap() { return heap_; } - void ProcessReferences(bool clear_soft_references) { + void ProcessReferences(bool clear_soft_references) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ProcessReferences(&soft_reference_list_, clear_soft_references, &weak_reference_list_, &finalizer_reference_list_, @@ -81,59 +91,83 @@ class MarkSweep { } // Sweeps unmarked objects to complete the garbage collection. - void Sweep(bool partial); + void Sweep(bool partial) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); Object* GetClearedReferences() { return cleared_reference_list_; } // Blackens an object. - void ScanObject(const Object* obj); + void ScanObject(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: // Returns true if the object has its bit set in the mark bitmap. - bool IsMarked(const Object* object) const { + bool IsMarked(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { if (current_mark_bitmap_->HasAddress(object)) { return current_mark_bitmap_->Test(object); } return heap_->GetMarkBitmap()->Test(object); } - static bool IsMarkedCallback(const Object* object, void* arg) { + static bool IsMarkedCallback(const Object* object, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { return reinterpret_cast(arg)->IsMarked(object); } - static bool IsLiveCallback(const Object* object, void* arg) { + static bool IsLiveCallback(const Object* object, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { return reinterpret_cast(arg)->GetHeap()->GetLiveBitmap()->Test(object); } - static void MarkObjectVisitor(const Object* root, void* arg); + static void MarkObjectVisitor(const Object* root, void* arg) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); - static void ReMarkObjectVisitor(const Object* root, void* arg); + static void ReMarkObjectVisitor(const Object* root, void* arg) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); - static void VerifyImageRootVisitor(Object* root, void* arg); + static void VerifyImageRootVisitor(Object* root, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_); - static void ScanDirtyCardCallback(Object* obj, void* arg); + static void ScanDirtyCardCallback(Object* obj, void* arg) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Marks an object. - void MarkObject(const Object* obj); + void MarkObject(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Yuck. - void MarkObject0(const Object* obj, bool check_finger); + void MarkObject0(const Object* obj, bool check_finger) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); - static void ScanBitmapCallback(Object* obj, void* finger, void* arg); + static void ScanBitmapCallback(Object* obj, void* finger, void* arg) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg); + + static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Special sweep for zygote that just marks objects / dirties cards. - static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg); + static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); - void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static); + void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_); - void CheckObject(const Object* obj); + void CheckObject(const Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_); template - void VisitObjectReferences(const Object* obj, const Visitor& visitor) { + void VisitObjectReferences(const Object* obj, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { DCHECK(obj != NULL); DCHECK(obj->GetClass() != NULL); if (obj->IsClass()) { @@ -146,10 +180,14 @@ class MarkSweep { } // Grays references in instance fields. - void ScanInstanceFields(const Object* obj); + void ScanInstanceFields(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); template - void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor) { + void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(obj != NULL); Class* klass = obj->GetClass(); DCHECK(klass != NULL); @@ -157,28 +195,42 @@ class MarkSweep { } // Blackens a class object. - void ScanClass(const Object* obj); + void ScanClass(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + template - void VisitClassReferences(const Object* obj, const Visitor& visitor) { + void VisitClassReferences(const Object* obj, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { VisitInstanceFieldsReferences(obj, visitor); VisitStaticFieldsReferences(obj->AsClass(), visitor); } // Grays references in static fields. - void ScanStaticFields(const Class* klass); + void ScanStaticFields(const Class* klass) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); template - void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor) { + void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) {\ DCHECK(klass != NULL); VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor); } // Used by ScanInstanceFields and ScanStaticFields - void ScanFields(const Object* obj, uint32_t ref_offsets, bool is_static); + void ScanFields(const Object* obj, uint32_t ref_offsets, bool is_static) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); template - void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static, const Visitor& visitor) { + void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static, + const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { if (ref_offsets != CLASS_WALK_SUPER) { // Found a reference offset bitmap. Mark the specified offsets. while (ref_offsets != 0) { @@ -212,10 +264,14 @@ class MarkSweep { } // Grays references in an array. - void ScanArray(const Object* obj); + void ScanArray(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); template - void VisitArrayReferences(const Object* obj, const Visitor& visitor) { + void VisitArrayReferences(const Object* obj, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { visitor(obj, obj->GetClass(), Object::ClassOffset(), false); if (obj->IsObjectArray()) { const ObjectArray* array = obj->AsObjectArray(); @@ -227,35 +283,51 @@ class MarkSweep { } } - void ScanOther(const Object* obj); + void ScanOther(const Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); template - void VisitOtherReferences(const Object* obj, const Visitor& visitor) { + void VisitOtherReferences(const Object* obj, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { return VisitInstanceFieldsReferences(obj, visitor); } // Blackens objects grayed during a garbage collection. - void ScanGrayObjects(); + void ScanGrayObjects() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Schedules an unmarked object for reference processing. - void DelayReferenceReferent(Object* reference); + void DelayReferenceReferent(Object* reference) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Recursively blackens objects on the mark stack. - void ProcessMarkStack(); + void ProcessMarkStack() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void EnqueueFinalizerReferences(Object** ref); + void EnqueueFinalizerReferences(Object** ref) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void PreserveSomeSoftReferences(Object** ref); + void PreserveSomeSoftReferences(Object** ref) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void ClearWhiteReferences(Object** list); + void ClearWhiteReferences(Object** list) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); void ProcessReferences(Object** soft_references, bool clear_soft_references, Object** weak_references, Object** finalizer_references, - Object** phantom_references); - - void SweepSystemWeaks(bool swap_bitmaps); - void SweepJniWeakGlobals(HeapBitmap* bitmap); + Object** phantom_references) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + void SweepSystemWeaks(bool swap_bitmaps) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void SweepJniWeakGlobals(HeapBitmap* bitmap) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Current space, we check this space first to avoid searching for the appropriate space for an object. SpaceBitmap* current_mark_bitmap_; diff --git a/src/mod_union_table.cc b/src/mod_union_table.cc index 410bf62887..3b928e3858 100644 --- a/src/mod_union_table.cc +++ b/src/mod_union_table.cc @@ -56,7 +56,9 @@ class ModUnionVisitor { bitmap_(bitmap) { } - void operator ()(Object* obj) const { + void operator ()(Object* obj) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. @@ -136,10 +138,11 @@ void ModUnionTableBitmap::Update() { class ModUnionScanImageRootVisitor { public: ModUnionScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) { - } - void operator ()(const Object* root) const { + void operator ()(const Object* root) const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(root != NULL); mark_sweep_->ScanObject(root); } @@ -208,7 +211,9 @@ class ModUnionReferenceVisitor { references_(references) { } - void operator ()(Object* obj) const { + void operator ()(Object* obj) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, + GlobalSynchronization::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. diff --git a/src/mod_union_table.h b/src/mod_union_table.h index f44004bd7e..424f2f340e 100644 --- a/src/mod_union_table.h +++ b/src/mod_union_table.h @@ -84,10 +84,12 @@ class ModUnionTableBitmap : public ModUnionTable { void ClearCards(Space* space); // Update table based on cleared cards. - void Update(); + void Update() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Mark all references to the alloc space(s). - void MarkReferences(); + void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); protected: // Cleared card array, used to update the mod-union table. @@ -111,10 +113,12 @@ class ModUnionTableReferenceCache : public ModUnionTable { void ClearCards(Space* space); // Update table based on cleared cards. - void Update(); + void Update() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Mark all references to the alloc space(s). - void MarkReferences(); + void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); // Verify the mod-union table. void Verify(); @@ -146,7 +150,9 @@ class ModUnionTableCardCache : public ModUnionTable { void Update() {} // Mark all references to the alloc space(s). - void MarkReferences(); + void MarkReferences() + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Nothing to verify. void Verify() {} diff --git a/src/monitor.cc b/src/monitor.cc index 149babf0b2..0e6735d1d3 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -31,8 +31,7 @@ #include "mutex.h" #include "object.h" #include "object_utils.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock.h" +#include "scoped_thread_state_change.h" #include "stl_util.h" #include "thread.h" #include "thread_list.h" @@ -117,14 +116,26 @@ void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread is_sensitive_thread_hook_ = is_sensitive_thread_hook; } -Monitor::Monitor(Object* obj) - : owner_(NULL), +Monitor::Monitor(Thread* owner, Object* obj) + : monitor_lock_("a monitor lock", kMonitorLock), + owner_(owner), lock_count_(0), obj_(obj), wait_set_(NULL), - lock_("a monitor lock"), locking_method_(NULL), locking_dex_pc_(0) { + monitor_lock_.Lock(); + // Propagate the lock state. + uint32_t thin = *obj->GetRawLockWordAddress(); + lock_count_ = LW_LOCK_COUNT(thin); + thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT; + thin |= reinterpret_cast(this) | LW_SHAPE_FAT; + // Publish the updated lock word. + android_atomic_release_store(thin, obj->GetRawLockWordAddress()); + // Lock profiling. + if (lock_profiling_threshold_ != 0) { + locking_method_ = owner->GetCurrentMethod(&locking_dex_pc_); + } } Monitor::~Monitor() { @@ -190,7 +201,7 @@ void Monitor::Lock(Thread* self) { return; } - if (!lock_.TryLock()) { + if (!monitor_lock_.TryLock()) { uint64_t waitStart = 0; uint64_t waitEnd = 0; uint32_t wait_threshold = lock_profiling_threshold_; @@ -204,7 +215,7 @@ void Monitor::Lock(Thread* self) { current_locking_method = locking_method_; current_locking_dex_pc = locking_dex_pc_; - lock_.Lock(); + monitor_lock_.Lock(); if (wait_threshold != 0) { waitEnd = NanoTime() / 1000; } @@ -240,7 +251,8 @@ void Monitor::Lock(Thread* self) { static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) __attribute__((format(printf, 1, 2))); -static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) { +static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { va_list args; va_start(args, fmt); Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args); @@ -272,7 +284,7 @@ void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owne { // TODO: isn't this too late to prevent threads from disappearing? // Acquire thread list lock so threads won't disappear from under us. - ScopedThreadListLock thread_list_lock; + MutexLock mu(*GlobalSynchronization::thread_list_lock_); // Re-read owner now that we hold lock. current_owner = (monitor != NULL) ? monitor->owner_ : NULL; // Get short descriptions of the threads involved. @@ -322,7 +334,7 @@ void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owne } } -bool Monitor::Unlock(Thread* self) { +bool Monitor::Unlock(Thread* self, bool for_wait) { DCHECK(self != NULL); Thread* owner = owner_; if (owner == self) { @@ -331,10 +343,17 @@ bool Monitor::Unlock(Thread* self) { owner_ = NULL; locking_method_ = NULL; locking_dex_pc_ = 0; - lock_.Unlock(); + monitor_lock_.Unlock(); } else { --lock_count_; } + } else if (for_wait) { + // Wait should have already cleared the fields. + DCHECK_EQ(lock_count_, 0); + DCHECK(owner == NULL); + DCHECK(locking_method_ == NULL); + DCHECK_EQ(locking_dex_pc_, 0u); + monitor_lock_.Unlock(); } else { // We don't own this, so we're not allowed to unlock it. // The JNI spec says that we should throw IllegalMonitorStateException @@ -346,7 +365,8 @@ bool Monitor::Unlock(Thread* self) { } // Converts the given waiting time (relative to "now") into an absolute time in 'ts'. -static void ToAbsoluteTime(int64_t ms, int32_t ns, timespec* ts) { +static void ToAbsoluteTime(int64_t ms, int32_t ns, timespec* ts) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { int64_t endSec; #ifdef HAVE_TIMEDWAIT_MONOTONIC @@ -407,7 +427,11 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThr ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()"); return; } + monitor_lock_.AssertHeld(); + WaitWithLock(self, ms, ns, interruptShouldThrow); +} +void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) { // Enforce the timeout range. if (ms < 0 || ns < 0 || ns > 999999) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", @@ -447,57 +471,52 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThr * that we won't touch any references in this state, and we'll check * our suspend mode before we transition out. */ - if (timed) { - self->SetState(kTimedWaiting); - } else { - self->SetState(kWaiting); - } + self->TransitionFromRunnableToSuspended(timed ? kTimedWaiting : kWaiting); - self->wait_mutex_->Lock(); + bool wasInterrupted = false; + { + // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock. + MutexLock mu(*self->wait_mutex_); - /* - * Set wait_monitor_ to the monitor object we will be waiting on. - * When wait_monitor_ is non-NULL a notifying or interrupting thread - * must signal the thread's wait_cond_ to wake it up. - */ - DCHECK(self->wait_monitor_ == NULL); - self->wait_monitor_ = this; + // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is + // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it + // up. + DCHECK(self->wait_monitor_ == NULL); + self->wait_monitor_ = this; - /* - * Handle the case where the thread was interrupted before we called - * wait(). - */ - bool wasInterrupted = false; - if (self->interrupted_) { - wasInterrupted = true; + // Release the monitor lock. + Unlock(self, true); + + /* + * Handle the case where the thread was interrupted before we called + * wait(). + */ + if (self->interrupted_) { + wasInterrupted = true; + } else { + // Wait for a notification or a timeout to occur. + if (!timed) { + self->wait_cond_->Wait(*self->wait_mutex_); + } else { + self->wait_cond_->TimedWait(*self->wait_mutex_, ts); + } + if (self->interrupted_) { + wasInterrupted = true; + } + self->interrupted_ = false; + } self->wait_monitor_ = NULL; - self->wait_mutex_->Unlock(); - goto done; } - /* - * Release the monitor lock and wait for a notification or - * a timeout to occur. - */ - lock_.Unlock(); + // Set self->status back to kRunnable, and self-suspend if needed. + self->TransitionFromSuspendedToRunnable(); - if (!timed) { - self->wait_cond_->Wait(*self->wait_mutex_); - } else { - self->wait_cond_->TimedWait(*self->wait_mutex_, ts); - } - if (self->interrupted_) { - wasInterrupted = true; - } + // Re-acquire the monitor lock. + Lock(self); - self->interrupted_ = false; - self->wait_monitor_ = NULL; - self->wait_mutex_->Unlock(); - // Reacquire the monitor lock. - Lock(self); + self->wait_mutex_->AssertNotHeld(); - done: /* * We remove our thread from wait set after restoring the count * and owner fields so the subroutine can check that the calling @@ -510,9 +529,6 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThr locking_dex_pc_ = saved_dex_pc; RemoveFromWaitSet(self); - /* set self->status back to kRunnable, and self-suspend if needed */ - self->SetState(kRunnable); - if (wasInterrupted) { /* * We were interrupted while waiting, or somebody interrupted an @@ -521,7 +537,10 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThr * The doc sayeth: "The interrupted status of the current thread is * cleared when this exception is thrown." */ - self->interrupted_ = false; + { + MutexLock mu(*self->wait_mutex_); + self->interrupted_ = false; + } if (interruptShouldThrow) { Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL); } @@ -530,12 +549,16 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThr void Monitor::Notify(Thread* self) { DCHECK(self != NULL); - // Make sure that we hold the lock. if (owner_ != self) { ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()"); return; } + monitor_lock_.AssertHeld(); + NotifyWithLock(); +} + +void Monitor::NotifyWithLock() { // Signal the first waiting thread in the wait set. while (wait_set_ != NULL) { Thread* thread = wait_set_; @@ -553,12 +576,16 @@ void Monitor::Notify(Thread* self) { void Monitor::NotifyAll(Thread* self) { DCHECK(self != NULL); - // Make sure that we hold the lock. if (owner_ != self) { ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()"); return; } + monitor_lock_.AssertHeld(); + NotifyAllWithLock(); +} + +void Monitor::NotifyAllWithLock() { // Signal all threads in the wait set. while (wait_set_ != NULL) { Thread* thread = wait_set_; @@ -579,18 +606,10 @@ void Monitor::Inflate(Thread* self, Object* obj) { DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast(self->GetThinLockId())); // Allocate and acquire a new monitor. - Monitor* m = new Monitor(obj); + Monitor* m = new Monitor(self, obj); VLOG(monitor) << "monitor: thread " << self->GetThinLockId() << " created monitor " << m << " for object " << obj; Runtime::Current()->GetMonitorList()->Add(m); - m->Lock(self); - // Propagate the lock state. - uint32_t thin = *obj->GetRawLockWordAddress(); - m->lock_count_ = LW_LOCK_COUNT(thin); - thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT; - thin |= reinterpret_cast(m) | LW_SHAPE_FAT; - // Publish the updated lock word. - android_atomic_release_store(thin, obj->GetRawLockWordAddress()); } void Monitor::MonitorEnter(Thread* self, Object* obj) { @@ -639,7 +658,7 @@ void Monitor::MonitorEnter(Thread* self, Object* obj) { threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin)); // The lock is owned by another thread. Notify the runtime that we are about to wait. self->monitor_enter_object_ = obj; - ThreadState oldStatus = self->SetState(kBlocked); + self->TransitionFromRunnableToSuspended(kBlocked); // Spin until the thin lock is released or inflated. sleepDelayNs = 0; for (;;) { @@ -677,14 +696,14 @@ void Monitor::MonitorEnter(Thread* self, Object* obj) { // waiting and try again. VLOG(monitor) << StringPrintf("monitor: thread %d found lock %p surprise-fattened by another thread", threadId, thinp); self->monitor_enter_object_ = NULL; - self->SetState(oldStatus); + self->TransitionFromSuspendedToRunnable(); goto retry; } } VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp); // We have acquired the thin lock. Let the runtime know that we are no longer waiting. self->monitor_enter_object_ = NULL; - self->SetState(oldStatus); + self->TransitionFromSuspendedToRunnable(); // Fatten the lock. Inflate(self, obj); VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp); @@ -750,7 +769,7 @@ bool Monitor::MonitorExit(Thread* self, Object* obj) { * raised any exceptions before continuing. */ DCHECK(LW_MONITOR(*thinp) != NULL); - if (!LW_MONITOR(*thinp)->Unlock(self)) { + if (!LW_MONITOR(*thinp)->Unlock(self, false)) { // An exception has been raised. Do not fall through. return false; } @@ -796,6 +815,7 @@ void Monitor::Notify(Thread* self, Object *obj) { return; } // no-op; there are no waiters to notify. + Inflate(self, obj); } else { // It's a fat lock. LW_MONITOR(thin)->Notify(self); @@ -814,6 +834,7 @@ void Monitor::NotifyAll(Thread* self, Object *obj) { return; } // no-op; there are no waiters to notify. + Inflate(self, obj); } else { // It's a fat lock. LW_MONITOR(thin)->NotifyAll(self); @@ -830,17 +851,17 @@ uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) { } static uint32_t LockOwnerFromThreadLock(Object* thread_lock) { - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccess soa(Thread::Current()); if (thread_lock == NULL || - thread_lock->GetClass() != ts.Decode(WellKnownClasses::java_lang_ThreadLock)) { + thread_lock->GetClass() != soa.Decode(WellKnownClasses::java_lang_ThreadLock)) { return ThreadList::kInvalidId; } - Field* thread_field = ts.DecodeField(WellKnownClasses::java_lang_ThreadLock_thread); + Field* thread_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadLock_thread); Object* managed_thread = thread_field->GetObject(thread_lock); if (managed_thread == NULL) { return ThreadList::kInvalidId; } - Field* vmData_field = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData); + Field* vmData_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); uintptr_t vmData = static_cast(vmData_field->GetInt(managed_thread)); Thread* thread = reinterpret_cast(vmData); if (thread == NULL) { @@ -850,13 +871,21 @@ static uint32_t LockOwnerFromThreadLock(Object* thread_lock) { } void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { - ThreadState state = thread->GetState(); + ThreadState state; + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + state = thread->GetState(); + } Object* object = NULL; uint32_t lock_owner = ThreadList::kInvalidId; if (state == kWaiting || state == kTimedWaiting) { os << " - waiting on "; - Monitor* monitor = thread->wait_monitor_; + Monitor* monitor; + { + MutexLock mu(*thread->wait_mutex_); + monitor = thread->wait_monitor_; + } if (monitor != NULL) { object = monitor->obj_; } @@ -883,7 +912,8 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { os << "\n"; } -static void DumpLockedObject(std::ostream& os, Object* o) { +static void DumpLockedObject(std::ostream& os, Object* o) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { os << " - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n"; } @@ -968,21 +998,21 @@ void Monitor::TranslateLocation(const Method* method, uint32_t dex_pc, line_number = mh.GetLineNumFromDexPC(dex_pc); } -MonitorList::MonitorList() : lock_("MonitorList lock") { +MonitorList::MonitorList() : monitor_list_lock_("MonitorList lock") { } MonitorList::~MonitorList() { - MutexLock mu(lock_); + MutexLock mu(monitor_list_lock_); STLDeleteElements(&list_); } void MonitorList::Add(Monitor* m) { - MutexLock mu(lock_); + MutexLock mu(monitor_list_lock_); list_.push_front(m); } void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) { - MutexLock mu(lock_); + MutexLock mu(monitor_list_lock_); typedef std::list::iterator It; // TODO: C++0x auto It it = list_.begin(); while (it != list_.end()) { diff --git a/src/monitor.h b/src/monitor.h index d72ff73d4f..b506b39a96 100644 --- a/src/monitor.h +++ b/src/monitor.h @@ -25,6 +25,7 @@ #include "heap.h" #include "mutex.h" +#include "thread.h" namespace art { @@ -67,66 +68,96 @@ class Monitor { static bool IsSensitiveThread(); static void Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()); - static uint32_t GetThinLockId(uint32_t raw_lock_word); - - static void MonitorEnter(Thread* thread, Object* obj); - static bool MonitorExit(Thread* thread, Object* obj); - - static void Notify(Thread* self, Object* obj); - static void NotifyAll(Thread* self, Object* obj); - static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow); - - static void DescribeWait(std::ostream& os, const Thread* thread); - static void DescribeLocks(std::ostream& os, StackVisitor* stack_visitor); + static uint32_t GetThinLockId(uint32_t raw_lock_word) + NO_THREAD_SAFETY_ANALYSIS; // Reading lock owner without holding lock is racy. + + static void MonitorEnter(Thread* thread, Object* obj) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static bool MonitorExit(Thread* thread, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_); + + static void Notify(Thread* self, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void NotifyAll(Thread* self, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + static void DescribeWait(std::ostream& os, const Thread* thread) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DescribeLocks(std::ostream& os, StackVisitor* stack_visitor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); Object* GetObject(); private: - explicit Monitor(Object* obj); + explicit Monitor(Thread* owner, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); + void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); - void AppendToWaitSet(Thread* thread); - void RemoveFromWaitSet(Thread* thread); + static void Inflate(Thread* self, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static void Inflate(Thread* self, Object* obj); + void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, + const char* owner_filename, uint32_t owner_line_number) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number); + static void FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static void FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon); + void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); + bool Unlock(Thread* thread, bool for_wait) UNLOCK_FUNCTION(monitor_lock_); - void Lock(Thread* self) NO_THREAD_SAFETY_ANALYSIS; // TODO: mark Object LOCKABLE. - bool Unlock(Thread* thread) NO_THREAD_SAFETY_ANALYSIS; // TODO: mark Object LOCKABLE. + void Notify(Thread* self) NO_THREAD_SAFETY_ANALYSIS; + void NotifyWithLock() + EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Notify(Thread* self); - void NotifyAll(Thread* self); + void NotifyAll(Thread* self) NO_THREAD_SAFETY_ANALYSIS; + void NotifyAllWithLock() + EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow) NO_THREAD_SAFETY_ANALYSIS; // TODO: mark Object LOCKABLE. + + void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow) + NO_THREAD_SAFETY_ANALYSIS; + void WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) + EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. void TranslateLocation(const Method* method, uint32_t pc, - const char*& source_file, uint32_t& line_number) const; + const char*& source_file, uint32_t& line_number) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static bool (*is_sensitive_thread_hook_)(); static uint32_t lock_profiling_threshold_; + Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + // Which thread currently owns the lock? Thread* volatile owner_; // Owner's recursive lock depth. - int lock_count_; + int lock_count_ GUARDED_BY(monitor_lock_); // What object are we part of (for debugging). Object* const obj_; // Threads currently waiting on this monitor. - Thread* wait_set_; - - Mutex lock_; + Thread* wait_set_ GUARDED_BY(monitor_lock_); // Method and dex pc where the lock owner acquired the lock, used when lock // sampling is enabled. locking_method_ may be null if the lock is currently // unlocked, or if the lock is acquired by the system when the stack is empty. - const Method* locking_method_; - uint32_t locking_dex_pc_; + const Method* locking_method_ GUARDED_BY(monitor_lock_); + uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_); friend class MonitorList; friend class Object; @@ -140,11 +171,12 @@ class MonitorList { void Add(Monitor* m); - void SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg); + void SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); private: - Mutex lock_; - std::list list_; + Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + std::list list_ GUARDED_BY(monitor_list_lock_); DISALLOW_COPY_AND_ASSIGN(MonitorList); }; diff --git a/src/monitor_android.cc b/src/monitor_android.cc index 94f86e8085..ce42ee796a 100644 --- a/src/monitor_android.cc +++ b/src/monitor_android.cc @@ -50,7 +50,8 @@ static char* EventLogWriteString(char* dst, const char* value, size_t len) { return dst + len; } -void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number) { +void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, + const char* owner_filename, uint32_t owner_line_number) { // Emit the event list length, 1 byte. char eventBuffer[174]; char* cp = eventBuffer; @@ -103,7 +104,8 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample cp = EventLogWriteInt(cp, sample_percent); CHECK_LE((size_t)(cp - eventBuffer), sizeof(eventBuffer)); - android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample, EVENT_TYPE_LIST, eventBuffer, (size_t)(cp - eventBuffer)); + android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample, EVENT_TYPE_LIST, eventBuffer, + (size_t)(cp - eventBuffer)); } } // namespace art diff --git a/src/mutex.cc b/src/mutex.cc index c5551bd26b..182f6f05dd 100644 --- a/src/mutex.cc +++ b/src/mutex.cc @@ -37,44 +37,158 @@ namespace art { // This works on Mac OS 10.7, but hasn't been tested on older releases. struct __attribute__((__may_alias__)) darwin_pthread_mutex_t { - uint32_t padding0[2]; - uint32_t value; - uint32_t padding1[5]; - uint64_t owner_tid; + uint32_t padding0[4]; + intptr_t padding1; + uintptr_t owner_tid; + // ...other stuff we don't care about. +}; + +struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t { + int32_t padding0[4]; + intptr_t padding1[2]; + uintptr_t rw_owner_tid; // ...other stuff we don't care about. }; struct __attribute__((__may_alias__)) glibc_pthread_mutex_t { - int lock; - unsigned int count; + int32_t padding0[2]; int owner; // ...other stuff we don't care about. }; -static inline void CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking) { - if (!kIsDebugBuild) { +struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t { +#ifdef __LP64__ + int32_t padding0[6]; +#else + int32_t padding0[7]; +#endif + int writer; + // ...other stuff we don't care about. +}; + +ReaderWriterMutex* GlobalSynchronization::mutator_lock_ = NULL; +Mutex* GlobalSynchronization::thread_list_lock_ = NULL; +Mutex* GlobalSynchronization::classlinker_classes_lock_ = NULL; +ReaderWriterMutex* GlobalSynchronization::heap_bitmap_lock_ = NULL; +Mutex* GlobalSynchronization::abort_lock_ = NULL; +Mutex* GlobalSynchronization::logging_lock_ = NULL; +Mutex* GlobalSynchronization::unexpected_signal_lock_ = NULL; +Mutex* GlobalSynchronization::thread_suspend_count_lock_ = NULL; + +void GlobalSynchronization::Init() { + if (logging_lock_ != NULL) { + // Already initialized. + DCHECK(mutator_lock_ != NULL); + DCHECK(thread_list_lock_ != NULL); + DCHECK(classlinker_classes_lock_ != NULL); + DCHECK(heap_bitmap_lock_ != NULL); + DCHECK(abort_lock_ != NULL); + DCHECK(logging_lock_ != NULL); + DCHECK(unexpected_signal_lock_ != NULL); + DCHECK(thread_suspend_count_lock_ != NULL); + } else { + logging_lock_ = new Mutex("logging lock", kLoggingLock, true); + abort_lock_ = new Mutex("abort lock", kAbortLock, true); + DCHECK(mutator_lock_ == NULL); + mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock); + DCHECK(thread_list_lock_ == NULL); + thread_list_lock_ = new Mutex("thread list lock", kThreadListLock); + DCHECK(classlinker_classes_lock_ == NULL); + classlinker_classes_lock_ = new Mutex("ClassLinker classes lock", kClassLinkerClassesLock); + DCHECK(heap_bitmap_lock_ == NULL); + heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock); + DCHECK(unexpected_signal_lock_ == NULL); + unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true); + DCHECK(thread_suspend_count_lock_ == NULL); + thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock); + } +} + +BaseMutex::BaseMutex(const char* name, MutexLevel level) : level_(level), name_(name) {} + +static void CheckUnattachedThread(MutexLevel level) { + // The check below enumerates the cases where we expect not to be able to sanity check locks + // on a thread. TODO: tighten this check. + Runtime* runtime = Runtime::Current(); + CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown() || + level == kDefaultMutexLevel || level == kThreadListLock || + level == kLoggingLock || level == kAbortLock); +} + +void BaseMutex::RegisterAsLockedWithCurrentThread() { + Thread* self = Thread::Current(); + if (self == NULL) { + CheckUnattachedThread(level_); return; } - if (rank == -1) { + // Check if a bad Mutex of this level or lower is held. + bool bad_mutexes_held = false; + for (int i = level_; i >= 0; --i) { + BaseMutex* held_mutex = self->GetHeldMutex(static_cast(i)); + if (UNLIKELY(held_mutex != NULL)) { + LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" (level " << i + << ") while locking \"" << name_ << "\" (level " << static_cast(level_) << ")"; + if (i > kAbortLock) { + // Only abort in the check below if this is more than abort level lock. + bad_mutexes_held = true; + } + } + } + CHECK(!bad_mutexes_held); + // Don't record monitors as they are outside the scope of analysis. They may be inspected off of + // the monitor list. + if (level_ != kMonitorLock) { + self->SetHeldMutex(level_, this); + } +} + +void BaseMutex::RegisterAsUnlockedWithCurrentThread() { + Thread* self = Thread::Current(); + if (self == NULL) { + CheckUnattachedThread(level_); return; } - Thread::Current()->CheckSafeToLockOrUnlock(rank, is_locking); + if (level_ != kMonitorLock) { + CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_; + self->SetHeldMutex(level_, NULL); + } } -static inline void CheckSafeToWait(MutexRank rank) { - if (!kIsDebugBuild) { +void BaseMutex::CheckSafeToWait() { + Thread* self = Thread::Current(); + if (self == NULL) { + CheckUnattachedThread(level_); return; } - Thread::Current()->CheckSafeToWait(rank); + CHECK(self->GetHeldMutex(level_) == this) << "Waiting on unacquired mutex: " << name_; + bool bad_mutexes_held = false; + for (int i = kMaxMutexLevel; i >= 0; --i) { + if (i != level_) { + BaseMutex* held_mutex = self->GetHeldMutex(static_cast(i)); + if (held_mutex != NULL) { + LOG(ERROR) << "Holding " << held_mutex->name_ << " (level " << i + << ") while performing wait on: " + << name_ << " (level " << static_cast(level_) << ")"; + bad_mutexes_held = true; + } + } + } + CHECK(!bad_mutexes_held); } -Mutex::Mutex(const char* name, MutexRank rank) : name_(name), rank_(rank) { - // Like Java, we use recursive mutexes. +Mutex::Mutex(const char* name, MutexLevel level, bool recursive) + : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) { +#if defined(__BIONIC__) + // Use recursive mutexes as Bionic's non-recursive mutexes don't have TIDs to check lock + // ownership of. pthread_mutexattr_t attributes; CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes)); CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE)); CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes)); CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes)); +#else + CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL)); +#endif } Mutex::~Mutex() { @@ -89,55 +203,69 @@ Mutex::~Mutex() { } } -void Mutex::Lock() { - CheckSafeToLockOrUnlock(rank_, true); - CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); +void Mutex::ExclusiveLock() { + bool is_held = IsExclusiveHeld(); + CHECK(recursive_ || !is_held) + << "Error attempt to recursively lock non-recursive lock \"" << name_ << "\""; + if (!is_held) { + CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); + RegisterAsLockedWithCurrentThread(); + } + recursion_count_++; + DCHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " + << name_ << " " << recursion_count_; AssertHeld(); } -bool Mutex::TryLock() { - int result = pthread_mutex_trylock(&mutex_); - if (result == EBUSY) { - return false; +bool Mutex::ExclusiveTryLock() { + bool is_held = IsExclusiveHeld(); + CHECK(recursive_ || !is_held) + << "Error attempt to recursively lock non-recursive lock \"" << name_ << "\""; + if (!is_held) { + int result = pthread_mutex_trylock(&mutex_); + if (result == EBUSY) { + return false; + } + if (result != 0) { + errno = result; + PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; + } + RegisterAsLockedWithCurrentThread(); } - if (result != 0) { - errno = result; - PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; - } - CheckSafeToLockOrUnlock(rank_, true); + recursion_count_++; AssertHeld(); return true; } -void Mutex::Unlock() { +void Mutex::ExclusiveUnlock() { AssertHeld(); - CheckSafeToLockOrUnlock(rank_, false); - CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); -} - -#if !defined(NDEBUG) -#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED < 1060 -// Mac OS 10.5 didn't have anything we could implement GetTid() with. One thing we could try would -// be using pthread_t instead of the actual tid; this would be acceptable in most places, and more -// portable. 10.5 is already obsolete, though, so doing so would probably be all pain for no gain. -void Mutex::AssertHeld() {} -void Mutex::AssertNotHeld() {} -#else -void Mutex::AssertHeld() { - DCHECK_EQ(GetOwner(), static_cast(GetTid())); + recursion_count_--; + if (!recursive_ || recursion_count_ == 0) { + DCHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: " + << name_ << " " << recursion_count_; + RegisterAsUnlockedWithCurrentThread(); + CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); + } } -void Mutex::AssertNotHeld() { - DCHECK_NE(GetOwner(), static_cast(GetTid())); +bool Mutex::IsExclusiveHeld() const { + Thread* self = Thread::Current(); + bool result; + if (self == NULL || level_ == kMonitorLock) { // Handle unattached threads and monitors. + result = (GetExclusiveOwnerTid() == static_cast(GetTid())); + } else { + result = (self->GetHeldMutex(level_) == this); + // Sanity debug check that if we think it is locked, so does the pthread. + DCHECK(result == (GetExclusiveOwnerTid() == static_cast(GetTid()))); + } + return result; } -#endif -#endif -uint64_t Mutex::GetOwner() { +uint64_t Mutex::GetExclusiveOwnerTid() const { #if defined(__BIONIC__) return static_cast((mutex_.value >> 16) & 0xffff); #elif defined(__GLIBC__) - return reinterpret_cast(&mutex_)->owner; + return reinterpret_cast(&mutex_)->owner; #elif defined(__APPLE__) return reinterpret_cast(&mutex_)->owner_tid; #else @@ -145,24 +273,104 @@ uint64_t Mutex::GetOwner() { #endif } -uint32_t Mutex::GetDepth() { - bool held = (GetOwner() == static_cast(GetTid())); - if (!held) { - return 0; +ReaderWriterMutex::ReaderWriterMutex(const char* name, MutexLevel level) : BaseMutex(name, level) { + CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL)); +} + +ReaderWriterMutex::~ReaderWriterMutex() { + // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread + // may still be using locks. + int rc = pthread_rwlock_destroy(&rwlock_); + if (rc != 0) { + errno = rc; + // TODO: should we just not log at all if shutting down? this could be the logging mutex! + bool shutting_down = Runtime::Current()->IsShuttingDown(); + PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_; + } +} + +void ReaderWriterMutex::ExclusiveLock() { + AssertNotExclusiveHeld(); + CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); + RegisterAsLockedWithCurrentThread(); + AssertExclusiveHeld(); +} + +void ReaderWriterMutex::ExclusiveUnlock() { + AssertExclusiveHeld(); + RegisterAsUnlockedWithCurrentThread(); + CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); +} + +bool ReaderWriterMutex::ExclusiveLockWithTimeout(const timespec& abs_timeout) { + int result = pthread_rwlock_timedwrlock(&rwlock_, &abs_timeout); + if (result == ETIMEDOUT) { + return false; } - uint32_t depth; + if (result != 0) { + errno = result; + PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; + } + RegisterAsLockedWithCurrentThread(); + AssertSharedHeld(); + return true; +} + +void ReaderWriterMutex::SharedLock() { + CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); + RegisterAsLockedWithCurrentThread(); + AssertSharedHeld(); +} + +bool ReaderWriterMutex::SharedTryLock() { + int result = pthread_rwlock_tryrdlock(&rwlock_); + if (result == EBUSY) { + return false; + } + if (result != 0) { + errno = result; + PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; + } + RegisterAsLockedWithCurrentThread(); + AssertSharedHeld(); + return true; +} + +void ReaderWriterMutex::SharedUnlock() { + AssertSharedHeld(); + RegisterAsUnlockedWithCurrentThread(); + CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); +} + +bool ReaderWriterMutex::IsExclusiveHeld() const { + bool result = (GetExclusiveOwnerTid() == static_cast(GetTid())); + // Sanity that if the pthread thinks we own the lock the Thread agrees. + Thread* self = Thread::Current(); + DCHECK((self == NULL) || !result || (self->GetHeldMutex(level_) == this)); + return result; +} + +bool ReaderWriterMutex::IsSharedHeld() const { + Thread* self = Thread::Current(); + bool result; + if (UNLIKELY(self == NULL)) { // Handle unattached threads. + result = IsExclusiveHeld(); // TODO: a better best effort here. + } else { + result = (self->GetHeldMutex(level_) == this); + } + return result; +} + +uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const { #if defined(__BIONIC__) - depth = static_cast((mutex_.value >> 2) & 0x7ff) + 1; + return rwlock_.writerThreadId; #elif defined(__GLIBC__) - depth = reinterpret_cast(&mutex_)->count; + return reinterpret_cast(&rwlock_)->writer; #elif defined(__APPLE__) - darwin_pthread_mutex_t* darwin_mutex = reinterpret_cast(&mutex_); - depth = ((darwin_mutex->value >> 16) & 0xffff); + return reinterpret_cast(&rwlock_)->rw_owner_tid; #else #error unsupported C library #endif - CHECK_NE(depth, 0U) << "owner=" << GetOwner() << " tid=" << GetTid(); - return depth; } ConditionVariable::ConditionVariable(const std::string& name) : name_(name) { @@ -189,10 +397,11 @@ void ConditionVariable::Signal() { } void ConditionVariable::Wait(Mutex& mutex) { - CheckSafeToWait(mutex.rank_); - uint unlock_depth = UnlockBeforeWait(mutex); + mutex.CheckSafeToWait(); + unsigned int old_recursion_count = mutex.recursion_count_; + mutex.recursion_count_ = 0; CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &mutex.mutex_)); - RelockAfterWait(mutex, unlock_depth); + mutex.recursion_count_ = old_recursion_count; } void ConditionVariable::TimedWait(Mutex& mutex, const timespec& ts) { @@ -201,32 +410,15 @@ void ConditionVariable::TimedWait(Mutex& mutex, const timespec& ts) { #else #define TIMEDWAIT pthread_cond_timedwait #endif - CheckSafeToWait(mutex.rank_); - uint unlock_depth = UnlockBeforeWait(mutex); + mutex.CheckSafeToWait(); + unsigned int old_recursion_count = mutex.recursion_count_; + mutex.recursion_count_ = 0; int rc = TIMEDWAIT(&cond_, &mutex.mutex_, &ts); - RelockAfterWait(mutex, unlock_depth); + mutex.recursion_count_ = old_recursion_count; if (rc != 0 && rc != ETIMEDOUT) { errno = rc; PLOG(FATAL) << "TimedWait failed for " << name_; } } -// Unlock a mutex down to depth == 1 so pthread conditional waiting can be used. -// After waiting, use RelockAfterWait to restore the lock depth. -uint32_t ConditionVariable::UnlockBeforeWait(Mutex& mutex) { - uint32_t unlock_count = 0; - CHECK_GT(mutex.GetDepth(), 0U); - while (mutex.GetDepth() != 1) { - mutex.Unlock(); - unlock_count++; - } - return unlock_count; -} - -void ConditionVariable::RelockAfterWait(Mutex& mutex, uint32_t unlock_count) { - for (uint32_t i = 0; i < unlock_count; i++) { - mutex.Lock(); - } -} - } // namespace art diff --git a/src/mutex.h b/src/mutex.h index 4c5d537be2..4899382b76 100644 --- a/src/mutex.h +++ b/src/mutex.h @@ -23,71 +23,316 @@ #include #include +#include "globals.h" #include "gtest/gtest.h" #include "logging.h" #include "macros.h" namespace art { -enum MutexRank { - kNoMutexRank = -1, - kHeapLock = 0, - kThreadListLock = 1, +class LOCKABLE Mutex; +class LOCKABLE ReaderWriterMutex; + +// MutexLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or +// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free +// partial ordering and thereby cause deadlock situations to fail checks. +// +// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163 +enum MutexLevel { + kLoggingLock = 0, + kUnexpectedSignalLock = 1, kThreadSuspendCountLock = 2, - kMaxMutexRank = kThreadSuspendCountLock, + kAbortLock = 3, + kDefaultMutexLevel = 4, + kLoadLibraryLock = 5, + kClassLinkerClassesLock = 6, + kThreadListLock = 7, + kHeapBitmapLock = 8, + kZygoteCreationLock = 9, + kMonitorLock = 10, + kMutatorLock = 11, + kMaxMutexLevel = kMutatorLock, }; -std::ostream& operator<<(std::ostream& os, const MutexRank& rhs); +std::ostream& operator<<(std::ostream& os, const MutexLevel& rhs); -class LOCKABLE Mutex { +// Global mutexes corresponding to the levels above. +class GlobalSynchronization { public: - explicit Mutex(const char* name, MutexRank rank = kNoMutexRank); - ~Mutex(); + static void Init(); - void Lock() EXCLUSIVE_LOCK_FUNCTION(); + // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block + // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds + // a share on the mutator_lock_. The garbage collector may also execute with shared access but + // at times requires exclusive access to the heap (not to be confused with the heap meta-data + // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks + // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_ + // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition + // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on + // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector) + // chance to acquire the lock. + // + // Thread suspension: + // Shared users | Exclusive user + // (holding mutator lock and in kRunnable state) | .. running .. + // .. running .. | Request thread suspension by: + // .. running .. | - acquiring thread_suspend_count_lock_ + // .. running .. | - incrementing Thread::suspend_count_ on + // .. running .. | all mutator threads + // .. running .. | - releasing thread_suspend_count_lock_ + // .. running .. | Block trying to acquire exclusive mutator lock + // Poll Thread::suspend_count_ and enter full | .. blocked .. + // suspend code. | .. blocked .. + // Change state to kSuspended | .. blocked .. + // x: Release share on mutator_lock_ | Carry out exclusive access + // Acquire thread_suspend_count_lock_ | .. exclusive .. + // while Thread::suspend_count_ > 0 | .. exclusive .. + // - wait on Thread::resume_cond_ | .. exclusive .. + // (releases thread_suspend_count_lock_) | .. exclusive .. + // .. waiting .. | Release mutator_lock_ + // .. waiting .. | Request thread resumption by: + // .. waiting .. | - acquiring thread_suspend_count_lock_ + // .. waiting .. | - decrementing Thread::suspend_count_ on + // .. waiting .. | all mutator threads + // .. waiting .. | - notifying on Thread::resume_cond_ + // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_ + // Release thread_suspend_count_lock_ | .. running .. + // Acquire share on mutator_lock_ | .. running .. + // - This could block but the thread still | .. running .. + // has a state of kSuspended and so this | .. running .. + // isn't an issue. | .. running .. + // Acquire thread_suspend_count_lock_ | .. running .. + // - we poll here as we're transitioning into | .. running .. + // kRunnable and an individual thread suspend | .. running .. + // request (e.g for debugging) won't try | .. running .. + // to acquire the mutator lock (which would | .. running .. + // block as we hold the mutator lock). This | .. running .. + // poll ensures that if the suspender thought | .. running .. + // we were suspended by incrementing our | .. running .. + // Thread::suspend_count_ and then reading | .. running .. + // our state we go back to waiting on | .. running .. + // Thread::resume_cond_. | .. running .. + // can_go_runnable = Thread::suspend_count_ == 0 | .. running .. + // Release thread_suspend_count_lock_ | .. running .. + // if can_go_runnable | .. running .. + // Change state to kRunnable | .. running .. + // else | .. running .. + // Goto x | .. running .. + // .. running .. | .. running .. + static ReaderWriterMutex* mutator_lock_; - bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true); + // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap. + static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_); - void Unlock() UNLOCK_FUNCTION(); + // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads + // attaching and detaching. + static Mutex* thread_list_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); -#if !defined(NDEBUG) - void AssertHeld(); - void AssertNotHeld(); -#else - void AssertHeld() {} - void AssertNotHeld() {} -#endif + // Guards lists of classes within the class linker. + static Mutex* classlinker_classes_lock_ ACQUIRED_AFTER(thread_list_lock_); - uint64_t GetOwner(); + // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code + // doesn't try to hold a higher level Mutex. + #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(classlinker_classes_lock_) - private: - uint32_t GetDepth(); + // Have an exclusive aborting thread. + static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); - pthread_mutex_t mutex_; + // Allow mutual exclusion when manipulating Thread::suspend_count_. + // TODO: Does the trade-off of a per-thread lock make sense? + static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_); + + // One unexpected signal at a time lock. + static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_); + + // Have an exclusive logging thread. + static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); +}; + +// Base class for all Mutex implementations +class BaseMutex { + public: + const std::string& GetName() const { + return name_; + } + + virtual bool IsMutex() const { return false; } + virtual bool IsReaderWriterMutex() const { return false; } + + protected: + friend class ConditionVariable; + + BaseMutex(const char* name, MutexLevel level); + virtual ~BaseMutex() {} + void RegisterAsLockedWithCurrentThread(); + void RegisterAsUnlockedWithCurrentThread(); + void CheckSafeToWait(); + + const MutexLevel level_; // Support for lock hierarchy. const std::string name_; - const MutexRank rank_; +}; + +// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain +// exclusive access to what it guards. A Mutex can be in one of two states: +// - Free - not owned by any thread, +// - Exclusive - owned by a single thread. +// +// The effect of locking and unlocking operations on the state is: +// State | ExclusiveLock | ExclusiveUnlock +// ------------------------------------------- +// Free | Exclusive | error +// Exclusive | Block* | Free +// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in +// an error. Being non-reentrant simplifies Waiting on ConditionVariables. +class LOCKABLE Mutex : public BaseMutex { + public: + explicit Mutex(const char* name, MutexLevel level = kDefaultMutexLevel, bool recursive = false); + ~Mutex(); + + virtual bool IsMutex() const { return true; } + + // Block until mutex is free then acquire exclusive access. + void ExclusiveLock() EXCLUSIVE_LOCK_FUNCTION(); + void Lock() EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(); } + + // Returns true if acquires exclusive access, false otherwise. + bool ExclusiveTryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true); + bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(); } + + // Release exclusive access. + void ExclusiveUnlock() UNLOCK_FUNCTION(); + void Unlock() UNLOCK_FUNCTION() { ExclusiveUnlock(); } + + // Is the current thread the exclusive holder of the Mutex. + bool IsExclusiveHeld() const; + // Assert that the Mutex is exclusively held by the current thread. + void AssertExclusiveHeld() { + if (kIsDebugBuild) { + CHECK(IsExclusiveHeld()); + } + } + void AssertHeld() { AssertExclusiveHeld(); } + + // Assert that the Mutex is not held by the current thread. + void AssertNotHeldExclusive() { + if (kIsDebugBuild) { + CHECK(!IsExclusiveHeld()); + } + } + void AssertNotHeld() { AssertNotHeldExclusive(); } + + // Id associated with exclusive owner. + uint64_t GetExclusiveOwnerTid() const; + + // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld. + unsigned int GetDepth() const { + return recursion_count_; + } + + private: + pthread_mutex_t mutex_; + const bool recursive_; // Can the lock be recursively held? + unsigned int recursion_count_; friend class ConditionVariable; friend class MutexTester; DISALLOW_COPY_AND_ASSIGN(Mutex); }; -class SCOPED_LOCKABLE MutexLock { +// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex. +// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader) +// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a +// condition variable. A ReaderWriterMutex can be in one of three states: +// - Free - not owned by any thread, +// - Exclusive - owned by a single thread, +// - Shared(n) - shared amongst n threads. +// +// The effect of locking and unlocking operations on the state is: +// +// State | ExclusiveLock | ExclusiveUnlock | SharedLock | SharedUnlock +// ---------------------------------------------------------------------------- +// Free | Exclusive | error | SharedLock(1) | error +// Exclusive | Block | Free | Block | error +// Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free +// * for large values of n the SharedLock may block. +class LOCKABLE ReaderWriterMutex : public BaseMutex { public: - explicit MutexLock(Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { - mu_.Lock(); + explicit ReaderWriterMutex(const char* name, MutexLevel level = kDefaultMutexLevel); + ~ReaderWriterMutex(); + + virtual bool IsReaderWriterMutex() const { return true; } + + // Block until ReaderWriterMutex is free then acquire exclusive access. + void ExclusiveLock() EXCLUSIVE_LOCK_FUNCTION(); + void WriterLock() EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(); } + + // Release exclusive access. + void ExclusiveUnlock() UNLOCK_FUNCTION(); + void WriterUnlock() UNLOCK_FUNCTION() { ExclusiveUnlock(); } + + // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success + // or false if timeout is reached. + bool ExclusiveLockWithTimeout(const timespec& abs_timeout) EXCLUSIVE_TRYLOCK_FUNCTION(true); + + // Block until ReaderWriterMutex is shared or free then acquire a share on the access. + void SharedLock() SHARED_LOCK_FUNCTION(); + void ReaderLock() SHARED_LOCK_FUNCTION() { SharedLock(); } + + // Try to acquire share of ReaderWriterMutex. + bool SharedTryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true); + + // Release a share of the access. + void SharedUnlock() UNLOCK_FUNCTION(); + void ReaderUnlock() UNLOCK_FUNCTION() { SharedUnlock(); } + + // Is the current thread the exclusive holder of the ReaderWriterMutex. + bool IsExclusiveHeld() const; + + // Assert the current thread has exclusive access to the ReaderWriterMutex. + void AssertExclusiveHeld() { + if (kIsDebugBuild) { + CHECK(IsExclusiveHeld()); + } } + void AssertWriterHeld() { AssertExclusiveHeld(); } - ~MutexLock() UNLOCK_FUNCTION() { - mu_.Unlock(); + // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex. + void AssertNotExclusiveHeld() { + if (kIsDebugBuild) { + CHECK(!IsExclusiveHeld()); + } + } + void AssertNotWriterHeld() { AssertNotExclusiveHeld(); } + + // Is the current thread a shared holder of the ReaderWriterMutex. + bool IsSharedHeld() const; + + // Assert the current thread has shared access to the ReaderWriterMutex. + void AssertSharedHeld() { + if (kIsDebugBuild) { + CHECK(IsSharedHeld()); + } } + void AssertReaderHeld() { AssertSharedHeld(); } + // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive + // mode. + void AssertNotHeld() { + if (kIsDebugBuild) { + CHECK(!IsSharedHeld()); + } + } + + // Id associated with exclusive owner. + uint64_t GetExclusiveOwnerTid() const; private: - Mutex& mu_; - DISALLOW_COPY_AND_ASSIGN(MutexLock); + pthread_rwlock_t rwlock_; + + friend class MutexTester; + DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex); }; -// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)". -#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name) +// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually +// (Signal) or all at once (Broadcast). class ConditionVariable { public: explicit ConditionVariable(const std::string& name); @@ -99,14 +344,91 @@ class ConditionVariable { void TimedWait(Mutex& mutex, const timespec& ts); private: - uint32_t UnlockBeforeWait(Mutex& mutex) NO_THREAD_SAFETY_ANALYSIS; - void RelockAfterWait(Mutex& mutex, uint32_t unlock_count) NO_THREAD_SAFETY_ANALYSIS; - pthread_cond_t cond_; std::string name_; DISALLOW_COPY_AND_ASSIGN(ConditionVariable); }; +// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it +// upon destruction. +class SCOPED_LOCKABLE MutexLock { + public: + explicit MutexLock(Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { + mu_.ExclusiveLock(); + } + + ~MutexLock() UNLOCK_FUNCTION() { + mu_.ExclusiveUnlock(); + } + + private: + Mutex& mu_; + DISALLOW_COPY_AND_ASSIGN(MutexLock); +}; +// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)". +#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name) + +// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon +// construction and releases it upon destruction. +class SCOPED_LOCKABLE ReaderMutexLock { + public: + explicit ReaderMutexLock(ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { + mu_.SharedLock(); + } + + ~ReaderMutexLock() UNLOCK_FUNCTION() { + mu_.SharedUnlock(); + } + + private: + ReaderWriterMutex& mu_; + DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock); +}; +// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of +// "ReaderMutexLock mu(lock)". +#define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name) + +// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon +// construction and releases it upon destruction. +class SCOPED_LOCKABLE WriterMutexLock { + public: + explicit WriterMutexLock(ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { + mu_.ExclusiveLock(); + } + + ~WriterMutexLock() UNLOCK_FUNCTION() { + mu_.ExclusiveUnlock(); + } + + private: + ReaderWriterMutex& mu_; + DISALLOW_COPY_AND_ASSIGN(WriterMutexLock); +}; +// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of +// "WriterMutexLock mu(lock)". +#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name) + +// Scoped unlocker/locker for a ReaderWriterMutex that releases read access to mu upon +// construction and acquires it again upon destruction. +class ReaderMutexUnlock { + public: + explicit ReaderMutexUnlock(ReaderWriterMutex& mu) UNLOCK_FUNCTION(mu) : mu_(mu) { + mu_.SharedUnlock(); + } + + ~ReaderMutexUnlock() SHARED_LOCK_FUNCTION(mu_) { + mu_.SharedLock(); + } + + private: + ReaderWriterMutex& mu_; + DISALLOW_COPY_AND_ASSIGN(ReaderMutexUnlock); +}; +// Catch bug where variable name is omitted. "ReaderMutexUnlock (lock);" instead of +// "ReaderMutexUnlock mu(lock)". +#define ReaderMutexUnlock(x) \ + COMPILE_ASSERT(0, reader_mutex_unlock_declaration_missing_variable_name) + } // namespace art #endif // ART_SRC_MUTEX_H_ diff --git a/src/mutex_test.cc b/src/mutex_test.cc index 69507d11d2..8a40cd60b3 100644 --- a/src/mutex_test.cc +++ b/src/mutex_test.cc @@ -60,7 +60,7 @@ TEST_F(MutexTest, TryLockUnlock) { // GCC has trouble with our mutex tests, so we have to turn off thread safety analysis. static void RecursiveLockUnlockTest() NO_THREAD_SAFETY_ANALYSIS { - Mutex mu("test mutex"); + Mutex mu("test mutex", kDefaultMutexLevel, true); MutexTester::AssertDepth(mu, 0U); mu.Lock(); MutexTester::AssertDepth(mu, 1U); @@ -78,7 +78,7 @@ TEST_F(MutexTest, RecursiveLockUnlock) { // GCC has trouble with our mutex tests, so we have to turn off thread safety analysis. static void RecursiveTryLockUnlockTest() NO_THREAD_SAFETY_ANALYSIS { - Mutex mu("test mutex"); + Mutex mu("test mutex", kDefaultMutexLevel, true); MutexTester::AssertDepth(mu, 0U); ASSERT_TRUE(mu.TryLock()); MutexTester::AssertDepth(mu, 1U); @@ -96,7 +96,9 @@ TEST_F(MutexTest, RecursiveTryLockUnlock) { struct RecursiveLockWait { - explicit RecursiveLockWait() : mu("test mutex"), cv("test condition variable") {} + explicit RecursiveLockWait() + : mu("test mutex", kDefaultMutexLevel, true), cv("test condition variable") { + } static void* Callback(void* arg) { RecursiveLockWait* state = reinterpret_cast(arg); @@ -132,4 +134,38 @@ TEST_F(MutexTest, RecursiveLockWait) { RecursiveLockWaitTest(); } +TEST_F(MutexTest, SharedLockUnlock) { + ReaderWriterMutex mu("test rwmutex"); + mu.AssertNotHeld(); + mu.SharedLock(); + mu.AssertSharedHeld(); + mu.AssertNotExclusiveHeld(); + mu.SharedUnlock(); + mu.AssertNotHeld(); +} + +TEST_F(MutexTest, ExclusiveLockUnlock) { + ReaderWriterMutex mu("test rwmutex"); + mu.AssertNotHeld(); + mu.ExclusiveLock(); + mu.AssertSharedHeld(); + mu.AssertExclusiveHeld(); + mu.ExclusiveUnlock(); + mu.AssertNotHeld(); +} + +// GCC has trouble with our mutex tests, so we have to turn off thread safety analysis. +static void SharedTryLockUnlockTest() NO_THREAD_SAFETY_ANALYSIS { + ReaderWriterMutex mu("test rwmutex"); + mu.AssertNotHeld(); + ASSERT_TRUE(mu.SharedTryLock()); + mu.AssertSharedHeld(); + mu.SharedUnlock(); + mu.AssertNotHeld(); +} + +TEST_F(MutexTest, SharedTryLockUnlock) { + SharedTryLockUnlockTest(); +} + } // namespace art diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 3e749e55dc..30f411cc3e 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -24,7 +24,7 @@ #include "logging.h" #include "os.h" #include "runtime.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" #include "space.h" @@ -89,12 +89,14 @@ static jint DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jst if (env->ExceptionCheck()) { return 0; } + ScopedObjectAccess soa(env); const DexFile* dex_file; if (outputName.c_str() == NULL) { dex_file = Runtime::Current()->GetClassLinker()->FindDexFileInOatFileFromDexLocation(source); } else { std::string output(outputName.c_str()); - dex_file = Runtime::Current()->GetClassLinker()->FindOrCreateOatFileForDexLocation(source, output); + dex_file = + Runtime::Current()->GetClassLinker()->FindOrCreateOatFileForDexLocation(source, output); } if (dex_file == NULL) { LOG(WARNING) << "Failed to open dex file: " << source; @@ -105,7 +107,8 @@ static jint DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jst return static_cast(reinterpret_cast(dex_file)); } -static const DexFile* toDexFile(int dex_file_address) { +static const DexFile* toDexFile(int dex_file_address) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile* dex_file = reinterpret_cast(static_cast(dex_file_address)); if (dex_file == NULL) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", "dex_file == null"); @@ -113,8 +116,12 @@ static const DexFile* toDexFile(int dex_file_address) { return dex_file; } -static void DexFile_closeDexFile(JNIEnv*, jclass, jint cookie) { - const DexFile* dex_file = toDexFile(cookie); +static void DexFile_closeDexFile(JNIEnv* env, jclass, jint cookie) { + const DexFile* dex_file; + { + ScopedObjectAccess soa(env); + dex_file = toDexFile(cookie); + } if (dex_file == NULL) { return; } @@ -126,7 +133,7 @@ static void DexFile_closeDexFile(JNIEnv*, jclass, jint cookie) { static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader, jint cookie) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); const DexFile* dex_file = toDexFile(cookie); if (dex_file == NULL) { return NULL; @@ -142,14 +149,18 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); class_linker->RegisterDexFile(*dex_file); - Object* class_loader_object = ts.Decode(javaLoader); + Object* class_loader_object = soa.Decode(javaLoader); ClassLoader* class_loader = down_cast(class_loader_object); Class* result = class_linker->DefineClass(descriptor, class_loader, *dex_file, *dex_class_def); - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jint cookie) { - const DexFile* dex_file = toDexFile(cookie); + const DexFile* dex_file; + { + ScopedObjectAccess soa(env); + dex_file = toDexFile(cookie); + } if (dex_file == NULL) { return NULL; } @@ -174,6 +185,7 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename if (!OS::FileExists(filename.c_str())) { LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename.c_str() << "' does not exist"; + ScopedObjectAccess soa(env); Thread::Current()->ThrowNewExceptionF("Ljava/io/FileNotFoundException;", "%s", filename.c_str()); return JNI_TRUE; } @@ -205,6 +217,7 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename } return JNI_FALSE; } + ScopedObjectAccess soa(env); if (ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum)) { if (debug_logging) { LOG(INFO) << "DexFile_isDexOptNeeded precompiled file " << oat_filename @@ -232,6 +245,7 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename // TODO: Ensure this works with multiple image spaces. const ImageHeader& image_header = (*cur)->AsImageSpace()->GetImageHeader(); if (oat_file->GetOatHeader().GetImageFileLocationChecksum() != image_header.GetOatChecksum()) { + ScopedObjectAccess soa(env); LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location << " has out-of-date checksum compared to " << image_header.GetImageRoot(ImageHeader::kOatLocation)->AsString()->ToModifiedUtf8(); @@ -246,9 +260,10 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename return JNI_TRUE; } + ScopedObjectAccess soa(env); if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum)) { LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location - << " has out-of-date checksum compared to " << filename.c_str(); + << " has out-of-date checksum compared to " << filename.c_str(); return JNI_TRUE; } diff --git a/src/native/dalvik_system_VMDebug.cc b/src/native/dalvik_system_VMDebug.cc index 70067fe016..3799bbe95f 100644 --- a/src/native/dalvik_system_VMDebug.cc +++ b/src/native/dalvik_system_VMDebug.cc @@ -22,7 +22,7 @@ #include "hprof/hprof.h" #include "jni_internal.h" #include "ScopedUtfChars.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "toStringArray.h" #include "trace.h" @@ -57,7 +57,8 @@ static void VMDebug_startMethodTracingDdmsImpl(JNIEnv*, jclass, jint bufferSize, Trace::Start("[DDMS]", -1, bufferSize, flags, true); } -static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename, jobject javaFd, jint bufferSize, jint flags) { +static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename, + jobject javaFd, jint bufferSize, jint flags) { int originalFd = jniGetFDFromFileDescriptor(env, javaFd); if (originalFd < 0) { return; @@ -65,7 +66,9 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF int fd = dup(originalFd); if (fd < 0) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", "dup(%d) failed: %s", originalFd, strerror(errno)); + ScopedObjectAccess soa(env); + Thread::Current()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", + "dup(%d) failed: %s", originalFd, strerror(errno)); return; } @@ -76,7 +79,8 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, false); } -static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring javaTraceFilename, jint bufferSize, jint flags) { +static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring javaTraceFilename, + jint bufferSize, jint flags) { ScopedUtfChars traceFilename(env, javaTraceFilename); if (traceFilename.c_str() == NULL) { return; @@ -114,23 +118,28 @@ static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) { return Dbg::LastDebuggerActivity(); } -static void VMDebug_startInstructionCounting(JNIEnv*, jclass) { +static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) { + ScopedObjectAccess soa(env); Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_stopInstructionCounting(JNIEnv*, jclass) { +static void VMDebug_stopInstructionCounting(JNIEnv* env, jclass) { + ScopedObjectAccess soa(env); Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_getInstructionCount(JNIEnv*, jclass, jintArray /*javaCounts*/) { +static void VMDebug_getInstructionCount(JNIEnv* env, jclass, jintArray /*javaCounts*/) { + ScopedObjectAccess soa(env); Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_resetInstructionCount(JNIEnv*, jclass) { +static void VMDebug_resetInstructionCount(JNIEnv* env, jclass) { + ScopedObjectAccess soa(env); Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); } -static void VMDebug_printLoadedClasses(JNIEnv*, jclass, jint flags) { +static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) { + ScopedObjectAccess soa(env); return Runtime::Current()->GetClassLinker()->DumpAllClasses(flags); } @@ -155,7 +164,9 @@ static jlong VMDebug_threadCpuTimeNanos(JNIEnv*, jclass) { static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) { // Only one of these may be NULL. if (javaFilename == NULL && javaFd == NULL) { - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "fileName == null && fd == null"); + ScopedObjectAccess soa(env); + Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", + "fileName == null && fd == null"); return; } @@ -174,7 +185,9 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job if (javaFd != NULL) { fd = jniGetFDFromFileDescriptor(env, javaFd); if (fd < 0) { - Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", "Invalid file descriptor"); + ScopedObjectAccess soa(env); + Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", + "Invalid file descriptor"); return; } } @@ -187,11 +200,11 @@ static void VMDebug_dumpHprofDataDdms(JNIEnv*, jclass) { } static void VMDebug_dumpReferenceTables(JNIEnv* env, jclass) { + ScopedObjectAccess soa(env); LOG(INFO) << "--- reference table dump ---"; - JNIEnvExt* e = reinterpret_cast(env); - e->DumpReferenceTables(LOG(INFO)); - e->vm->DumpReferenceTables(LOG(INFO)); + soa.Env()->DumpReferenceTables(LOG(INFO)); + soa.Vm()->DumpReferenceTables(LOG(INFO)); LOG(INFO) << "---"; } @@ -204,9 +217,10 @@ static void VMDebug_infopoint(JNIEnv*, jclass, jint id) { LOG(INFO) << "VMDebug infopoint " << id << " hit"; } -static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass, jboolean countAssignable) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(javaClass); +static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass, + jboolean countAssignable) { + ScopedObjectAccess soa(env); + Class* c = soa.Decode(javaClass); if (c == NULL) { return 0; } diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index 4ec1b92fbd..8dbbc77cf6 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -21,9 +21,7 @@ #include "jni_internal.h" #include "object.h" #include "object_utils.h" -#include "scoped_heap_lock.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "thread.h" #include "thread_list.h" @@ -49,7 +47,7 @@ static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) { } static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaElementClass, jint length) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); #ifdef MOVING_GARBAGE_COLLECTOR // TODO: right now, we don't have a copying collector, so there's no need // to do anything special here, but we ought to pass the non-movability @@ -57,7 +55,7 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle UNIMPLEMENTED(FATAL); #endif - Class* element_class = ts.Decode(javaElementClass); + Class* element_class = soa.Decode(javaElementClass); if (element_class == NULL) { Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "element class == null"); return NULL; @@ -76,15 +74,15 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle if (result == NULL) { return NULL; } - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) { if (javaArray == NULL) { // Most likely allocation failed return 0; } - ScopedJniThreadState ts(env); - Array* array = ts.Decode(javaArray); + ScopedObjectAccess soa(env); + Array* array = soa.Decode(javaArray); if (!array->IsArrayInstance()) { Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;", "not an array"); return 0; @@ -143,7 +141,7 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv*, jobject, jint targetSdkVersio #if !defined(ART_USE_LLVM_COMPILER) if (vm->check_jni) { LOG(WARNING) << "Turning off CheckJNI so we can turn on JNI app bug workarounds..."; - ScopedThreadListLock thread_list_lock; + MutexLock mu(*GlobalSynchronization::thread_list_lock_); vm->SetCheckJniEnabled(false); runtime->GetThreadList()->ForEach(DisableCheckJniCallback, NULL); } @@ -160,8 +158,6 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv*, jobject, jint targetSdkVersio } static void VMRuntime_trimHeap(JNIEnv*, jobject) { - ScopedHeapLock heap_lock; - // Trim the managed heap. Heap* heap = Runtime::Current()->GetHeap(); const Spaces& spaces = heap->GetSpaces(); diff --git a/src/native/dalvik_system_VMStack.cc b/src/native/dalvik_system_VMStack.cc index 12fa8db904..3284c979d8 100644 --- a/src/native/dalvik_system_VMStack.cc +++ b/src/native/dalvik_system_VMStack.cc @@ -18,19 +18,34 @@ #include "jni_internal.h" #include "nth_caller_visitor.h" #include "object.h" -#include "scoped_heap_lock.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock.h" +#include "scoped_thread_state_change.h" #include "thread_list.h" namespace art { -static jobject GetThreadStack(JNIEnv* env, jobject javaThread) { - ScopedJniThreadState ts(env); - ScopedHeapLock heap_lock; - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, javaThread); - return (thread != NULL) ? GetThreadStack(ts, thread) : NULL; +static jobject GetThreadStack(JNIEnv* env, jobject peer) { + bool timeout; + { + ScopedObjectAccess soa(env); + Thread* self = Thread::Current(); + if (soa.Decode(peer) == self->GetPeer()) { + return self->CreateInternalStackTrace(soa); + } + } + // Suspend thread to build stack trace. + Thread* thread = Thread::SuspendForDebugger(peer, true, &timeout); + if (thread != NULL) { + jobject trace; + { + ScopedObjectAccess soa(env); + trace = thread->CreateInternalStackTrace(soa); + } + // Restart suspended thread. + Runtime::Current()->GetThreadList()->Resume(thread, true); + return trace; + } else { + return NULL; + } } static jint VMStack_fillStackTraceElements(JNIEnv* env, jclass, jobject javaThread, jobjectArray javaSteArray) { @@ -45,10 +60,10 @@ static jint VMStack_fillStackTraceElements(JNIEnv* env, jclass, jobject javaThre // Returns the defining class loader of the caller's caller. static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env); - NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 2); + ScopedObjectAccess soa(env); + NthCallerVisitor visitor(soa.Self()->GetManagedStack(), soa.Self()->GetTraceStack(), 2); visitor.WalkStack(); - return ts.AddLocalReference(visitor.caller->GetDeclaringClass()->GetClassLoader()); + return soa.AddLocalReference(visitor.caller->GetDeclaringClass()->GetClassLoader()); } static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap, jobject javaSystem) { @@ -74,21 +89,21 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject ja Object* system; Object* class_loader; }; - ScopedJniThreadState ts(env); - Object* bootstrap = ts.Decode(javaBootstrap); - Object* system = ts.Decode(javaSystem); - ClosestUserClassLoaderVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), + ScopedObjectAccess soa(env); + Object* bootstrap = soa.Decode(javaBootstrap); + Object* system = soa.Decode(javaSystem); + ClosestUserClassLoaderVisitor visitor(soa.Self()->GetManagedStack(), soa.Self()->GetTraceStack(), bootstrap, system); visitor.WalkStack(); - return ts.AddLocalReference(visitor.class_loader); + return soa.AddLocalReference(visitor.class_loader); } // Returns the class of the caller's caller's caller. static jclass VMStack_getStackClass2(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env); - NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 3); + ScopedObjectAccess soa(env); + NthCallerVisitor visitor(soa.Self()->GetManagedStack(), soa.Self()->GetTraceStack(), 3); visitor.WalkStack(); - return ts.AddLocalReference(visitor.caller->GetDeclaringClass()); + return soa.AddLocalReference(visitor.caller->GetDeclaringClass()); } static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject javaThread) { diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index bc1d0def39..e63cf1aece 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -20,15 +20,16 @@ #include "nth_caller_visitor.h" #include "object.h" #include "object_utils.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" #include "well_known_classes.h" namespace art { -static Class* DecodeClass(const ScopedJniThreadState& ts, jobject java_class) { - Class* c = ts.Decode(java_class); +static Class* DecodeClass(const ScopedObjectAccess& soa, jobject java_class) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* c = soa.Decode(java_class); DCHECK(c != NULL); DCHECK(c->IsClass()); // TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke . @@ -39,7 +40,7 @@ static Class* DecodeClass(const ScopedJniThreadState& ts, jobject java_class) { // "name" is in "binary name" format, e.g. "dalvik.system.Debug$1". static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean initialize, jobject javaLoader) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); ScopedUtfChars name(env, javaName); if (name.c_str() == NULL) { return NULL; @@ -55,7 +56,7 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean } std::string descriptor(DotToDescriptor(name.c_str())); - ClassLoader* class_loader = ts.Decode(javaLoader); + ClassLoader* class_loader = soa.Decode(javaLoader); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Class* c = class_linker->FindClass(descriptor.c_str(), class_loader); if (c == NULL) { @@ -70,12 +71,12 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean if (initialize) { class_linker->EnsureInitialized(c, true, true); } - return ts.AddLocalReference(c); + return soa.AddLocalReference(c); } static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaClass); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaClass); if (c->IsPrimitive() || c->IsArrayClass() || c->IsProxyClass()) { return 0; // primitive, array and proxy classes don't have class definitions } @@ -87,17 +88,22 @@ static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { } } +// TODO: Remove this redundant struct when GCC annotalysis works correctly on top-level functions. +struct WorkAroundGccAnnotalysisBug { template -static jobjectArray ToArray(const ScopedJniThreadState& ts, const char* array_class_name, - const std::vector& objects) { - ScopedLocalRef array_class(ts.Env(), ts.Env()->FindClass(array_class_name)); - jobjectArray result = ts.Env()->NewObjectArray(objects.size(), array_class.get(), NULL); +static jobjectArray ToArray(const ScopedObjectAccessUnchecked& soa, const char* array_class_name, + const std::vector& objects) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ScopedLocalRef array_class(soa.Env(), soa.Env()->FindClass(array_class_name)); + jobjectArray result = soa.Env()->NewObjectArray(objects.size(), array_class.get(), NULL); for (size_t i = 0; i < objects.size(); ++i) { - ScopedLocalRef object(ts.Env(), ts.AddLocalReference(objects[i])); - ts.Env()->SetObjectArrayElement(result, i, object.get()); + ScopedLocalRef object(soa.Env(), soa.AddLocalReference(objects[i])); + soa.Env()->SetObjectArrayElement(result, i, object.get()); } return result; } +}; +#define ToArray(a, b, c) WorkAroundGccAnnotalysisBug::ToArray(a, b, c) static bool IsVisibleConstructor(Method* m, bool public_only) { if (public_only && !m->IsPublic()) { @@ -110,8 +116,8 @@ static bool IsVisibleConstructor(Method* m, bool public_only) { } static jobjectArray Class_getDeclaredConstructors(JNIEnv* env, jclass javaClass, jboolean publicOnly) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaClass); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaClass); std::vector constructors; for (size_t i = 0; i < c->NumDirectMethods(); ++i) { Method* m = c->GetDirectMethod(i); @@ -120,7 +126,7 @@ static jobjectArray Class_getDeclaredConstructors(JNIEnv* env, jclass javaClass, } } - return ToArray(ts, "java/lang/reflect/Constructor", constructors); + return ToArray(soa, "java/lang/reflect/Constructor", constructors); } static bool IsVisibleField(Field* f, bool public_only) { @@ -131,8 +137,8 @@ static bool IsVisibleField(Field* f, bool public_only) { } static jobjectArray Class_getDeclaredFields(JNIEnv* env, jclass javaClass, jboolean publicOnly) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaClass); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaClass); std::vector fields; FieldHelper fh; for (size_t i = 0; i < c->NumInstanceFields(); ++i) { @@ -164,7 +170,7 @@ static jobjectArray Class_getDeclaredFields(JNIEnv* env, jclass javaClass, jbool } } - return ToArray(ts, "java/lang/reflect/Field", fields); + return ToArray(soa, "java/lang/reflect/Field", fields); } static bool IsVisibleMethod(Method* m, bool public_only) { @@ -181,8 +187,8 @@ static bool IsVisibleMethod(Method* m, bool public_only) { } static jobjectArray Class_getDeclaredMethods(JNIEnv* env, jclass javaClass, jboolean publicOnly) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaClass); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaClass); if (c == NULL) { return NULL; } @@ -218,12 +224,12 @@ static jobjectArray Class_getDeclaredMethods(JNIEnv* env, jclass javaClass, jboo } } - return ToArray(ts, "java/lang/reflect/Method", methods); + return ToArray(soa, "java/lang/reflect/Method", methods); } static jobject Class_getDex(JNIEnv* env, jobject javaClass) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaClass); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaClass); DexCache* dex_cache = c->GetDexCache(); if (dex_cache == NULL) { @@ -233,7 +239,8 @@ static jobject Class_getDex(JNIEnv* env, jobject javaClass) { return Runtime::Current()->GetClassLinker()->FindDexFile(dex_cache).GetDexObject(env); } -static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray* arg_array) { +static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray* arg_array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (name != mh->GetName()) { return false; } @@ -254,7 +261,8 @@ static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray } static Method* FindConstructorOrMethodInArray(ObjectArray* methods, const std::string& name, - ObjectArray* arg_array) { + ObjectArray* arg_array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (methods == NULL) { return NULL; } @@ -282,10 +290,10 @@ static Method* FindConstructorOrMethodInArray(ObjectArray* methods, cons static jobject Class_getDeclaredConstructorOrMethod(JNIEnv* env, jclass javaClass, jstring javaName, jobjectArray javaArgs) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaClass); - std::string name(ts.Decode(javaName)->ToModifiedUtf8()); - ObjectArray* arg_array = ts.Decode*>(javaArgs); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaClass); + std::string name(soa.Decode(javaName)->ToModifiedUtf8()); + ObjectArray* arg_array = soa.Decode*>(javaArgs); Method* m = FindConstructorOrMethodInArray(c->GetDirectMethods(), name, arg_array); if (m == NULL) { @@ -293,16 +301,16 @@ static jobject Class_getDeclaredConstructorOrMethod(JNIEnv* env, jclass javaClas } if (m != NULL) { - return ts.AddLocalReference(m); + return soa.AddLocalReference(m); } else { return NULL; } } static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobject jname) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, java_class); - String* name = ts.Decode(jname); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, java_class); + String* name = soa.Decode(jname); DCHECK(name->GetClass()->IsStringClass()); FieldHelper fh; @@ -314,7 +322,7 @@ static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobj DCHECK(env->ExceptionOccurred()); return NULL; } - return ts.AddLocalReference(f); + return soa.AddLocalReference(f); } } for (size_t i = 0; i < c->NumStaticFields(); ++i) { @@ -325,40 +333,40 @@ static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobj DCHECK(env->ExceptionOccurred()); return NULL; } - return ts.AddLocalReference(f); + return soa.AddLocalReference(f); } } return NULL; } static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaThis); - return ts.AddLocalReference(c->ComputeName()); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaThis); + return soa.AddLocalReference(c->ComputeName()); } static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - SynthesizedProxyClass* c = down_cast(DecodeClass(ts, javaThis)); - return ts.AddLocalReference(c->GetInterfaces()->Clone()); + ScopedObjectAccess soa(env); + SynthesizedProxyClass* c = down_cast(DecodeClass(soa, javaThis)); + return soa.AddLocalReference(c->GetInterfaces()->Clone()); } static jboolean Class_isAssignableFrom(JNIEnv* env, jobject javaLhs, jclass javaRhs) { - ScopedJniThreadState ts(env); - Class* lhs = DecodeClass(ts, javaLhs); - Class* rhs = ts.Decode(javaRhs); // Can be null. + ScopedObjectAccess soa(env); + Class* lhs = DecodeClass(soa, javaLhs); + Class* rhs = soa.Decode(javaRhs); // Can be null. if (rhs == NULL) { - ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "class == null"); + soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "class == null"); return JNI_FALSE; } return lhs->IsAssignableFrom(rhs) ? JNI_TRUE : JNI_FALSE; } static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - Class* c = DecodeClass(ts, javaThis); + ScopedObjectAccess soa(env); + Class* c = DecodeClass(soa, javaThis); if (c->IsPrimitive() || c->IsInterface() || c->IsArrayClass() || c->IsAbstract()) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Class %s can not be instantiated", PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str()); return NULL; } @@ -369,7 +377,7 @@ static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) { Method* init = c->FindDeclaredDirectMethod("", "()V"); if (init == NULL) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Class %s has no default ()V constructor", PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str()); return NULL; } @@ -383,20 +391,20 @@ static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) { // constructor must be public or, if the caller is in the same package, // have package scope. - NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 2); + NthCallerVisitor visitor(soa.Self()->GetManagedStack(), soa.Self()->GetTraceStack(), 2); visitor.WalkStack(); Class* caller_class = visitor.caller->GetDeclaringClass(); ClassHelper caller_ch(caller_class); if (!caller_class->CanAccess(c)) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;", "Class %s is not accessible from class %s", PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str(), PrettyDescriptor(caller_ch.GetDescriptor()).c_str()); return NULL; } if (!caller_class->CanAccessMember(init->GetDeclaringClass(), init->GetAccessFlags())) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;", "%s is not accessible from class %s", PrettyMethod(init).c_str(), PrettyDescriptor(caller_ch.GetDescriptor()).c_str()); @@ -405,13 +413,13 @@ static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) { Object* new_obj = c->AllocObject(); if (new_obj == NULL) { - DCHECK(ts.Self()->IsExceptionPending()); + DCHECK(soa.Self()->IsExceptionPending()); return NULL; } // invoke constructor; unlike reflection calls, we don't wrap exceptions - jclass java_class = ts.AddLocalReference(c); - jmethodID mid = ts.EncodeMethod(init); + jclass java_class = soa.AddLocalReference(c); + jmethodID mid = soa.EncodeMethod(init); return env->NewObject(java_class, mid); } diff --git a/src/native/java_lang_Object.cc b/src/native/java_lang_Object.cc index d6b1bd6cf8..89019f78a9 100644 --- a/src/native/java_lang_Object.cc +++ b/src/native/java_lang_Object.cc @@ -16,31 +16,31 @@ #include "jni_internal.h" #include "object.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { static jobject Object_internalClone(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(javaThis); - return ts.AddLocalReference(o->Clone()); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(javaThis); + return soa.AddLocalReference(o->Clone()); } static void Object_notify(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(javaThis); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(javaThis); o->Notify(); } static void Object_notifyAll(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(javaThis); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(javaThis); o->NotifyAll(); } static void Object_wait(JNIEnv* env, jobject javaThis, jlong ms, jint ns) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(javaThis); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(javaThis); o->Wait(ms, ns); } diff --git a/src/native/java_lang_Runtime.cc b/src/native/java_lang_Runtime.cc index 1b657b1801..6dc850e83f 100644 --- a/src/native/java_lang_Runtime.cc +++ b/src/native/java_lang_Runtime.cc @@ -22,13 +22,12 @@ #include "jni_internal.h" #include "object.h" #include "runtime.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" namespace art { -static void Runtime_gc(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env); +static void Runtime_gc(JNIEnv*, jclass) { Runtime::Current()->GetHeap()->CollectGarbage(false); } @@ -45,13 +44,13 @@ static void Runtime_nativeExit(JNIEnv*, jclass, jint status) { * message on failure. */ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); ScopedUtfChars filename(env, javaFilename); if (filename.c_str() == NULL) { return NULL; } - ClassLoader* classLoader = ts.Decode(javaLoader); + ClassLoader* classLoader = soa.Decode(javaLoader); std::string detail; JavaVMExt* vm = Runtime::Current()->GetJavaVM(); bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, detail); diff --git a/src/native/java_lang_String.cc b/src/native/java_lang_String.cc index 96fcf96287..bfdc31ac68 100644 --- a/src/native/java_lang_String.cc +++ b/src/native/java_lang_String.cc @@ -16,7 +16,7 @@ #include "jni_internal.h" #include "object.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #ifdef HAVE__MEMCMP16 // "count" is in 16-bit units. @@ -36,9 +36,9 @@ uint32_t MemCmp16(const uint16_t* s0, const uint16_t* s1, size_t count) { namespace art { static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) { - ScopedJniThreadState ts(env); - String* lhs = ts.Decode(javaThis); - String* rhs = ts.Decode(javaRhs); + ScopedObjectAccess soa(env); + String* lhs = soa.Decode(javaThis); + String* rhs = soa.Decode(javaRhs); if (rhs == NULL) { Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "rhs == null"); @@ -70,11 +70,11 @@ static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) { } static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint start) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); // This method does not handle supplementary characters. They're dealt with in managed code. DCHECK_LE(ch, 0xffff); - String* s = ts.Decode(java_this); + String* s = soa.Decode(java_this); jint count = s->GetLength(); if (start < 0) { @@ -96,10 +96,10 @@ static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint sta } static jstring String_intern(JNIEnv* env, jobject javaThis) { - ScopedJniThreadState ts(env); - String* s = ts.Decode(javaThis); + ScopedObjectAccess soa(env); + String* s = soa.Decode(javaThis); String* result = s->Intern(); - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index 76ac670fbd..f4fe6ca9cd 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -16,7 +16,7 @@ #include "jni_internal.h" #include "object.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" /* * We make guarantees about the atomicity of accesses to primitive @@ -101,28 +101,29 @@ static void move32(void* dst, const void* src, size_t n) { namespace art { -static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) { +static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "%s of type %s is not an array", identifier, actualType.c_str()); } static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); // Null pointer checks. if (javaSrc == NULL) { - ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "src == null"); + soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "src == null"); return; } if (javaDst == NULL) { - ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "dst == null"); + soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "dst == null"); return; } // Make sure source and destination are both arrays. - Object* srcObject = ts.Decode(javaSrc); - Object* dstObject = ts.Decode(javaDst); + Object* srcObject = soa.Decode(javaSrc); + Object* dstObject = soa.Decode(javaDst); if (!srcObject->IsArrayInstance()) { ThrowArrayStoreException_NotAnArray("source", srcObject); return; @@ -138,7 +139,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, // Bounds checking. if (srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d", srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length); return; @@ -150,7 +151,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, if (srcComponentType->IsPrimitive() != dstComponentType->IsPrimitive() || srcComponentType != dstComponentType) { std::string srcType(PrettyTypeOf(srcArray)); std::string dstType(PrettyTypeOf(dstArray)); - ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "Incompatible types: src=%s, dst=%s", srcType.c_str(), dstType.c_str()); return; } @@ -233,7 +234,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, if (i != length) { std::string actualSrcType(PrettyTypeOf(o)); std::string dstType(PrettyTypeOf(dstArray)); - ts.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "source[%d] of type %s cannot be stored in destination array of type %s", srcPos + i, actualSrcType.c_str(), dstType.c_str()); return; @@ -241,9 +242,9 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, } static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) { - ScopedJniThreadState ts(env); - Object* o = ts.Decode(javaObject); - return static_cast(reinterpret_cast(o)); + ScopedObjectAccess soa(env); + Object* o = soa.Decode(javaObject); + return static_cast(o->IdentityHashCode()); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index 626255e267..65042e4f43 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -17,8 +17,7 @@ #include "debugger.h" #include "jni_internal.h" #include "object.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock.h" +#include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" #include "thread.h" #include "thread_list.h" @@ -26,19 +25,18 @@ namespace art { static jobject Thread_currentThread(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env); - return ts.AddLocalReference(ts.Self()->GetPeer()); + ScopedObjectAccess soa(env); + return soa.AddLocalReference(soa.Self()->GetPeer()); } static jboolean Thread_interrupted(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env, kNative); // Doesn't touch objects, so keep in native state. - return ts.Self()->Interrupted(); + return reinterpret_cast(env)->self->Interrupted() ? JNI_TRUE : JNI_FALSE; } static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) { - ScopedJniThreadState ts(env); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, java_thread); + ScopedObjectAccess soa(env); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, java_thread); return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE; } @@ -56,53 +54,62 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha const jint kJavaTimedWaiting = 4; const jint kJavaTerminated = 5; - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, java_thread); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); internal_thread_state = thread->GetState(); } switch (internal_thread_state) { - case kTerminated: return kJavaTerminated; - case kRunnable: return kJavaRunnable; - case kTimedWaiting: return kJavaTimedWaiting; - case kBlocked: return kJavaBlocked; - case kWaiting: return kJavaWaiting; - case kStarting: return kJavaNew; - case kNative: return kJavaRunnable; - case kVmWait: return kJavaWaiting; - case kSuspended: return kJavaRunnable; + case kTerminated: return kJavaTerminated; + case kRunnable: return kJavaRunnable; + case kTimedWaiting: return kJavaTimedWaiting; + case kBlocked: return kJavaBlocked; + case kWaiting: return kJavaWaiting; + case kStarting: return kJavaNew; + case kNative: return kJavaRunnable; + case kWaitingForGcToComplete: return kJavaWaiting; + case kWaitingPerformingGc: return kJavaWaiting; + case kWaitingForDebuggerSend: return kJavaWaiting; + case kWaitingForDebuggerToAttach: return kJavaWaiting; + case kWaitingInMainDebuggerLoop: return kJavaWaiting; + case kWaitingForDebuggerSuspension: return kJavaWaiting; + case kWaitingForJniOnLoad: return kJavaWaiting; + case kWaitingForSignalCatcherOutput: return kJavaWaiting; + case kWaitingInMainSignalCatcherLoop: return kJavaWaiting; + case kSuspended: return kJavaRunnable; // Don't add a 'default' here so the compiler can spot incompatible enum changes. } return -1; // Unreachable. } static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) { - ScopedJniThreadState ts(env); - Object* object = ts.Decode(java_object); + ScopedObjectAccess soa(env); + Object* object = soa.Decode(java_object); if (object == NULL) { Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null"); return JNI_FALSE; } - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, java_thread); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, java_thread); return thread->HoldsLock(object); } static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) { - ScopedJniThreadState ts(env); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, java_thread); + ScopedObjectAccess soa(env); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { thread->Interrupt(); } } static void Thread_nativeSetName(JNIEnv* env, jobject java_thread, jstring java_name) { - ScopedJniThreadState ts(env); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, java_thread); + ScopedObjectAccess soa(env); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread == NULL) { return; } @@ -119,9 +126,9 @@ static void Thread_nativeSetName(JNIEnv* env, jobject java_thread, jstring java_ * threads at Thread.NORM_PRIORITY (5). */ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_priority) { - ScopedJniThreadState ts(env); - ScopedThreadListLock thread_list_lock; - Thread* thread = Thread::FromManagedThread(ts, java_thread); + ScopedObjectAccess soa(env); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { thread->SetNativePriority(new_priority); } diff --git a/src/native/java_lang_Throwable.cc b/src/native/java_lang_Throwable.cc index 1c59a34e51..332a1305e0 100644 --- a/src/native/java_lang_Throwable.cc +++ b/src/native/java_lang_Throwable.cc @@ -15,14 +15,14 @@ */ #include "jni_internal.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "thread.h" namespace art { static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) { - ScopedJniThreadState ts(env); - return ts.Self()->CreateInternalStackTrace(ts); + ScopedObjectAccess soa(env); + return soa.Self()->CreateInternalStackTrace(soa); } static jobjectArray Throwable_nativeGetStackTrace(JNIEnv* env, jclass, jobject javaStackState) { diff --git a/src/native/java_lang_VMClassLoader.cc b/src/native/java_lang_VMClassLoader.cc index 0689f74986..4b5c31c64b 100644 --- a/src/native/java_lang_VMClassLoader.cc +++ b/src/native/java_lang_VMClassLoader.cc @@ -17,15 +17,15 @@ #include "class_linker.h" #include "class_loader.h" #include "jni_internal.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" #include "zip_archive.h" namespace art { static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) { - ScopedJniThreadState ts(env); - ClassLoader* loader = ts.Decode(javaLoader); + ScopedObjectAccess soa(env); + ClassLoader* loader = soa.Decode(javaLoader); ScopedUtfChars name(env, javaName); if (name.c_str() == NULL) { return NULL; @@ -34,7 +34,7 @@ static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoa std::string descriptor(DotToDescriptor(name.c_str())); Class* c = Runtime::Current()->GetClassLinker()->LookupClass(descriptor.c_str(), loader); if (c != NULL && c->IsResolved()) { - return ts.AddLocalReference(c); + return soa.AddLocalReference(c); } else { // Class wasn't resolved so it may be erroneous or not yet ready, force the caller to go into // the regular loadClass code. diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index 729312ed94..fa5975019d 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -18,13 +18,14 @@ #include "jni_internal.h" #include "object.h" #include "object_utils.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { // Recursively create an array with multiple dimensions. Elements may be // Objects or primitive types. -static Array* CreateMultiArray(Class* array_class, int current_dimension, IntArray* dimensions) { +static Array* CreateMultiArray(Class* array_class, int current_dimension, IntArray* dimensions) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { int32_t array_length = dimensions->Get(current_dimension++); SirtRef new_array(Array::Alloc(array_class, array_length)); if (new_array.get() == NULL) { @@ -69,12 +70,12 @@ static Array* CreateMultiArray(Class* array_class, int current_dimension, IntArr // subtract pieces off. Besides, we want to start with the outermost // piece and work our way in. static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); DCHECK(javaElementClass != NULL); - Class* element_class = ts.Decode(javaElementClass); + Class* element_class = soa.Decode(javaElementClass); DCHECK(element_class->IsClass()); DCHECK(javaDimArray != NULL); - Object* dimensions_obj = ts.Decode(javaDimArray); + Object* dimensions_obj = soa.Decode(javaDimArray); DCHECK(dimensions_obj->IsArrayInstance()); DCHECK_STREQ(ClassHelper(dimensions_obj->GetClass()).GetDescriptor(), "[I"); IntArray* dimensions_array = down_cast(dimensions_obj); @@ -90,7 +91,7 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla for (int i = 0; i < num_dimensions; i++) { int dimension = dimensions_array->Get(i); if (dimension < 0) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "Dimension %d: %d", i, dimension); return NULL; } @@ -113,15 +114,15 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla CHECK(Thread::Current()->IsExceptionPending()); return NULL; } - return ts.AddLocalReference(new_array); + return soa.AddLocalReference(new_array); } static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) { - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); DCHECK(javaElementClass != NULL); - Class* element_class = ts.Decode(javaElementClass); + Class* element_class = soa.Decode(javaElementClass); if (length < 0) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); + soa.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); return NULL; } std::string descriptor; @@ -131,16 +132,16 @@ static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementCl ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader()); if (array_class == NULL) { - CHECK(ts.Self()->IsExceptionPending()); + CHECK(soa.Self()->IsExceptionPending()); return NULL; } DCHECK(array_class->IsArrayClass()); Array* new_array = Array::Alloc(array_class, length); if (new_array == NULL) { - CHECK(ts.Self()->IsExceptionPending()); + CHECK(soa.Self()->IsExceptionPending()); return NULL; } - return ts.AddLocalReference(new_array); + return soa.AddLocalReference(new_array); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_reflect_Constructor.cc b/src/native/java_lang_reflect_Constructor.cc index 564d6dbf9b..a6bd450fee 100644 --- a/src/native/java_lang_reflect_Constructor.cc +++ b/src/native/java_lang_reflect_Constructor.cc @@ -19,7 +19,7 @@ #include "object.h" #include "object_utils.h" #include "reflection.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { @@ -31,17 +31,17 @@ namespace art { * with an interface, array, or primitive class. */ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) { - ScopedJniThreadState ts(env); - Method* m = ts.Decode(javaMethod)->AsMethod(); + ScopedObjectAccess soa(env); + Method* m = soa.Decode(javaMethod)->AsMethod(); Class* c = m->GetDeclaringClass(); if (c->IsAbstract()) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Can't instantiate abstract class %s", PrettyDescriptor(c).c_str()); return NULL; } if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { - DCHECK(ts.Self()->IsExceptionPending()); + DCHECK(soa.Self()->IsExceptionPending()); return NULL; } @@ -50,8 +50,8 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA return NULL; } - jobject javaReceiver = ts.AddLocalReference(receiver); - InvokeMethod(ts, javaMethod, javaReceiver, javaArgs); + jobject javaReceiver = soa.AddLocalReference(receiver); + InvokeMethod(soa, javaMethod, javaReceiver, javaArgs); // Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod. return javaReceiver; diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index b2ede634b7..e764b2500b 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -19,14 +19,16 @@ #include "object.h" #include "object_utils.h" #include "reflection.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { -static bool GetFieldValue(const ScopedJniThreadState& ts, Object* o, Field* f, JValue& value, - bool allow_references) { +static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, + JValue& value, bool allow_references) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK_EQ(value.GetJ(), 0LL); - if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), + true, true)) { return false; } switch (FieldHelper(f).GetTypeAsPrimitiveType()) { @@ -65,18 +67,20 @@ static bool GetFieldValue(const ScopedJniThreadState& ts, Object* o, Field* f, J // Never okay. break; } - ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", "Not a primitive field: %s", PrettyField(f).c_str()); return false; } -static bool CheckReceiver(const ScopedJniThreadState& ts, jobject javaObj, Field* f, Object*& o) { +static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, Field* f, + Object*& o) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (f->IsStatic()) { o = NULL; return true; } - o = ts.Decode(javaObj); + o = soa.Decode(javaObj); Class* declaringClass = f->GetDeclaringClass(); if (!VerifyObjectInClass(o, declaringClass)) { return false; @@ -85,34 +89,34 @@ static bool CheckReceiver(const ScopedJniThreadState& ts, jobject javaObj, Field } static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj) { - ScopedJniThreadState ts(env); - Field* f = ts.DecodeField(env->FromReflectedField(javaField)); + ScopedObjectAccess soa(env); + Field* f = soa.DecodeField(env->FromReflectedField(javaField)); Object* o = NULL; - if (!CheckReceiver(ts, javaObj, f, o)) { + if (!CheckReceiver(soa, javaObj, f, o)) { return NULL; } // Get the field's value, boxing if necessary. JValue value; - if (!GetFieldValue(ts, o, f, value, true)) { + if (!GetFieldValue(soa, o, f, value, true)) { return NULL; } BoxPrimitive(FieldHelper(f).GetTypeAsPrimitiveType(), value); - return ts.AddLocalReference(value.GetL()); + return soa.AddLocalReference(value.GetL()); } static JValue GetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char dst_descriptor) { - ScopedJniThreadState ts(env); - Field* f = ts.DecodeField(env->FromReflectedField(javaField)); + ScopedObjectAccess soa(env); + Field* f = soa.DecodeField(env->FromReflectedField(javaField)); Object* o = NULL; - if (!CheckReceiver(ts, javaObj, f, o)) { + if (!CheckReceiver(soa, javaObj, f, o)) { return JValue(); } // Read the value. JValue field_value; - if (!GetFieldValue(ts, o, f, field_value, false)) { + if (!GetFieldValue(soa, o, f, field_value, false)) { return JValue(); } @@ -158,8 +162,10 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) { return GetPrimitiveField(env, javaField, javaObj, 'S').GetS(); } -static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool allow_references) { - if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { +static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool allow_references) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), + true, true)) { return; } switch (FieldHelper(f).GetTypeAsPrimitiveType()) { @@ -208,11 +214,11 @@ static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool all } static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue) { - ScopedJniThreadState ts(env); - Field* f = ts.DecodeField(env->FromReflectedField(javaField)); + ScopedObjectAccess soa(env); + Field* f = soa.DecodeField(env->FromReflectedField(javaField)); // Unbox the value, if necessary. - Object* boxed_value = ts.Decode(javaValue); + Object* boxed_value = soa.Decode(javaValue); JValue unboxed_value; if (!UnboxPrimitiveForField(boxed_value, FieldHelper(f).GetType(), unboxed_value, f)) { return; @@ -220,7 +226,7 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j // Check that the receiver is non-null and an instance of the field's declaring class. Object* o = NULL; - if (!CheckReceiver(ts, javaObj, f, o)) { + if (!CheckReceiver(soa, javaObj, f, o)) { return; } @@ -229,15 +235,15 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char src_descriptor, const JValue& new_value) { - ScopedJniThreadState ts(env); - Field* f = ts.DecodeField(env->FromReflectedField(javaField)); + ScopedObjectAccess soa(env); + Field* f = soa.DecodeField(env->FromReflectedField(javaField)); Object* o = NULL; - if (!CheckReceiver(ts, javaObj, f, o)) { + if (!CheckReceiver(soa, javaObj, f, o)) { return; } FieldHelper fh(f); if (!fh.IsPrimitiveType()) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", "Not a primitive field: %s", PrettyField(f).c_str()); return; } diff --git a/src/native/java_lang_reflect_Method.cc b/src/native/java_lang_reflect_Method.cc index 269582269e..2a6ee50ed3 100644 --- a/src/native/java_lang_reflect_Method.cc +++ b/src/native/java_lang_reflect_Method.cc @@ -19,18 +19,18 @@ #include "object.h" #include "object_utils.h" #include "reflection.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver, jobject javaArgs) { - ScopedJniThreadState ts(env); - return InvokeMethod(ts, javaMethod, javaReceiver, javaArgs); + ScopedObjectAccess soa(env); + return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs); } static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { - ScopedJniThreadState ts(env); - Method* proxy_method = ts.Decode(javaMethod)->AsMethod(); + ScopedObjectAccess soa(env); + Method* proxy_method = soa.Decode(javaMethod)->AsMethod(); CHECK(proxy_method->GetDeclaringClass()->IsProxyClass()); SynthesizedProxyClass* proxy_class = down_cast(proxy_method->GetDeclaringClass()); @@ -44,13 +44,13 @@ static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { } CHECK_NE(throws_index, -1); ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); - return ts.AddLocalReference(declared_exceptions->Clone()); + return soa.AddLocalReference(declared_exceptions->Clone()); } static jobject Method_findOverriddenMethodNative(JNIEnv* env, jobject javaMethod) { - ScopedJniThreadState ts(env); - Method* method = ts.Decode(javaMethod)->AsMethod(); - return ts.AddLocalReference(method->FindOverriddenMethod()); + ScopedObjectAccess soa(env); + Method* method = soa.Decode(javaMethod)->AsMethod(); + return soa.AddLocalReference(method->FindOverriddenMethod()); } static JNINativeMethod gMethods[] = { diff --git a/src/native/java_lang_reflect_Proxy.cc b/src/native/java_lang_reflect_Proxy.cc index a1337a60d4..81e3f16910 100644 --- a/src/native/java_lang_reflect_Proxy.cc +++ b/src/native/java_lang_reflect_Proxy.cc @@ -18,20 +18,20 @@ #include "class_loader.h" #include "jni_internal.h" #include "object.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring javaName, jobjectArray javaInterfaces, jobject javaLoader, jobjectArray javaMethods, jobjectArray javaThrows) { - ScopedJniThreadState ts(env); - String* name = ts.Decode(javaName); - ObjectArray* interfaces = ts.Decode*>(javaInterfaces); - ClassLoader* loader = ts.Decode(javaLoader); - ObjectArray* methods = ts.Decode*>(javaMethods); - ObjectArray >* throws = ts.Decode >*>(javaThrows); + ScopedObjectAccess soa(env); + String* name = soa.Decode(javaName); + ObjectArray* interfaces = soa.Decode*>(javaInterfaces); + ClassLoader* loader = soa.Decode(javaLoader); + ObjectArray* methods = soa.Decode*>(javaMethods); + ObjectArray >* throws = soa.Decode >*>(javaThrows); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Class* result = class_linker->CreateProxyClass(name, interfaces, loader, methods, throws); - return ts.AddLocalReference(result); + return soa.AddLocalReference(result); } static JNINativeMethod gMethods[] = { diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc index e3c31b01eb..fe95746995 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc @@ -16,12 +16,14 @@ #include "debugger.h" #include "logging.h" +#include "scoped_thread_state_change.h" #include "ScopedPrimitiveArray.h" namespace art { static void DdmServer_nativeSendChunk(JNIEnv* env, jclass, jint type, jbyteArray javaData, jint offset, jint length) { + ScopedObjectAccess soa(env); ScopedByteArrayRO data(env, javaData); DCHECK_LE(offset + length, static_cast(data.size())); Dbg::DdmSendChunk(type, length, reinterpret_cast(&data[offset])); diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 87d2b22663..b14d6ffe90 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -17,9 +17,9 @@ #include "debugger.h" #include "jni_internal.h" #include "logging.h" -#include "scoped_heap_lock.h" -#include "scoped_jni_thread_state.h" -#include "scoped_thread_list_lock.h" +#include "../mutex.h" // Avoid pulling in icu4c's mutex.h +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" #include "ScopedPrimitiveArray.h" #include "stack.h" #include "thread_list.h" @@ -30,7 +30,8 @@ static void DdmVmInternal_enableRecentAllocations(JNIEnv*, jclass, jboolean enab Dbg::SetAllocTrackingEnabled(enable); } -static jbyteArray DdmVmInternal_getRecentAllocations(JNIEnv*, jclass) { +static jbyteArray DdmVmInternal_getRecentAllocations(JNIEnv* env, jclass) { + ScopedObjectAccess soa(env); return Dbg::GetRecentAllocations(); } @@ -38,7 +39,7 @@ static jboolean DdmVmInternal_getRecentAllocationStatus(JNIEnv*, jclass) { return Dbg::IsAllocTrackingEnabled(); } -static Thread* FindThreadByThinLockId(uint32_t thin_lock_id) { +static jobject FindThreadByThinLockId(JNIEnv* env, uint32_t thin_lock_id) { struct ThreadFinder { explicit ThreadFinder(uint32_t thin_lock_id) : thin_lock_id(thin_lock_id), thread(NULL) { } @@ -54,8 +55,16 @@ static Thread* FindThreadByThinLockId(uint32_t thin_lock_id) { Thread* thread; }; ThreadFinder finder(thin_lock_id); - Runtime::Current()->GetThreadList()->ForEach(ThreadFinder::Callback, &finder); - return finder.thread; + { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Runtime::Current()->GetThreadList()->ForEach(ThreadFinder::Callback, &finder); + } + if (finder.thread != NULL) { + ScopedObjectAccess soa(env); + return soa.AddLocalReference(finder.thread->GetPeer()); + } else { + return NULL; + } } /* @@ -63,15 +72,26 @@ static Thread* FindThreadByThinLockId(uint32_t thin_lock_id) { * NULL on failure, e.g. if the threadId couldn't be found. */ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) { - ScopedHeapLock heap_lock; - ScopedThreadListLock thread_list_lock; - Thread* thread = FindThreadByThinLockId(static_cast(thin_lock_id)); - if (thread == NULL) { + ScopedLocalRef peer(env, + FindThreadByThinLockId(env, static_cast(thin_lock_id))); + if (peer.get() == NULL) { + return NULL; + } + bool timeout; + // Suspend thread to build stack trace. + Thread* thread = Thread::SuspendForDebugger(peer.get(), true, &timeout); + if (thread != NULL) { + jobject trace; + { + ScopedObjectAccess soa(env); + trace = thread->CreateInternalStackTrace(soa); + } + // Restart suspended thread. + Runtime::Current()->GetThreadList()->Resume(thread, true); + return Thread::InternalStackTraceToStackTraceElementArray(env, trace); + } else { return NULL; } - ScopedJniThreadState ts(env); - jobject stack = GetThreadStack(ts, thread); - return (stack != NULL) ? Thread::InternalStackTraceToStackTraceElementArray(env, stack) : NULL; } static void ThreadCountCallback(Thread*, void* context) { @@ -109,7 +129,10 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) { std::vector& bytes = *reinterpret_cast*>(context); JDWP::Append4BE(bytes, t->GetThinLockId()); - JDWP::Append1BE(bytes, t->GetState()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + JDWP::Append1BE(bytes, t->GetState()); + } JDWP::Append4BE(bytes, t->GetTid()); JDWP::Append4BE(bytes, utime); JDWP::Append4BE(bytes, stime); @@ -119,7 +142,7 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) { static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) { std::vector bytes; { - ScopedThreadListLock thread_list_lock; + MutexLock mu(*GlobalSynchronization::thread_list_lock_); ThreadList* thread_list = Runtime::Current()->GetThreadList(); uint16_t thread_count = 0; @@ -139,7 +162,8 @@ static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) { return result; } -static jint DdmVmInternal_heapInfoNotify(JNIEnv*, jclass, jint when) { +static jint DdmVmInternal_heapInfoNotify(JNIEnv* env, jclass, jint when) { + ScopedObjectAccess soa(env); return Dbg::DdmHandleHpifChunk(static_cast(when)); } diff --git a/src/native/sun_misc_Unsafe.cc b/src/native/sun_misc_Unsafe.cc index dfddd86b66..282731dc8d 100644 --- a/src/native/sun_misc_Unsafe.cc +++ b/src/native/sun_misc_Unsafe.cc @@ -16,34 +16,34 @@ #include "jni_internal.h" #include "object.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" namespace art { static jlong Unsafe_objectFieldOffset0(JNIEnv* env, jclass, jobject javaField) { // TODO: move to Java code jfieldID fid = env->FromReflectedField(javaField); - ScopedJniThreadState ts(env); - Field* field = ts.DecodeField(fid); + ScopedObjectAccess soa(env); + Field* field = soa.DecodeField(fid); return field->GetOffset().Int32Value(); } static jint Unsafe_arrayBaseOffset0(JNIEnv* env, jclass, jclass javaArrayClass) { // TODO: move to Java code - ScopedJniThreadState ts(env); - Class* array_class = ts.Decode(javaArrayClass); + ScopedObjectAccess soa(env); + Class* array_class = soa.Decode(javaArrayClass); return Array::DataOffset(array_class->GetComponentSize()).Int32Value(); } static jint Unsafe_arrayIndexScale0(JNIEnv* env, jclass, jclass javaClass) { - ScopedJniThreadState ts(env); - Class* c = ts.Decode(javaClass); + ScopedObjectAccess soa(env); + Class* c = soa.Decode(javaClass); return c->GetComponentSize(); } static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint expectedValue, jint newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_release_cas() returns 0 on success, not failure. @@ -52,8 +52,8 @@ static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, } static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong expectedValue, jlong newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int64_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_cmpxchg() returns 0 on success, not failure. @@ -62,10 +62,10 @@ static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, } static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaExpectedValue, jobject javaNewValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); - Object* expectedValue = ts.Decode(javaExpectedValue); - Object* newValue = ts.Decode(javaNewValue); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); + Object* expectedValue = soa.Decode(javaExpectedValue); + Object* newValue = soa.Decode(javaNewValue); byte* raw_addr = reinterpret_cast(obj) + offset; int32_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_cmpxchg() returns 0 on success, not failure. @@ -78,105 +78,105 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb } static jint Unsafe_getInt(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); return obj->GetField32(MemberOffset(offset), false); } static jint Unsafe_getIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); return android_atomic_acquire_load(address); } static void Unsafe_putInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); obj->SetField32(MemberOffset(offset), newValue, false); } static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); android_atomic_release_store(newValue, address); } static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); ANDROID_MEMBAR_STORE(); obj->SetField32(MemberOffset(offset), newValue, false); } static jlong Unsafe_getLong(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; int64_t* address = reinterpret_cast(raw_addr); return *address; } static jlong Unsafe_getLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); return obj->GetField64(MemberOffset(offset), true); } static void Unsafe_putLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); obj->SetField64(MemberOffset(offset), newValue, false); } static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); obj->SetField64(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); ANDROID_MEMBAR_STORE(); obj->SetField64(MemberOffset(offset), newValue, false); } static jobject Unsafe_getObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); Object* value = obj->GetFieldObject(MemberOffset(offset), true); - return ts.AddLocalReference(value); + return soa.AddLocalReference(value); } static jobject Unsafe_getObject(JNIEnv* env, jobject, jobject javaObj, jlong offset) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); Object* value = obj->GetFieldObject(MemberOffset(offset), false); - return ts.AddLocalReference(value); + return soa.AddLocalReference(value); } static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); - Object* newValue = ts.Decode(javaNewValue); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); + Object* newValue = soa.Decode(javaNewValue); obj->SetFieldObject(MemberOffset(offset), newValue, false); } static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); - Object* newValue = ts.Decode(javaNewValue); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); + Object* newValue = soa.Decode(javaNewValue); obj->SetFieldObject(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { - ScopedJniThreadState ts(env); - Object* obj = ts.Decode(javaObj); - Object* newValue = ts.Decode(javaNewValue); + ScopedObjectAccess soa(env); + Object* obj = soa.Decode(javaObj); + Object* newValue = soa.Decode(javaNewValue); ANDROID_MEMBAR_STORE(); obj->SetFieldObject(MemberOffset(offset), newValue, false); } diff --git a/src/oat/jni/arm/calling_convention_arm.cc b/src/oat/jni/arm/calling_convention_arm.cc index 75c0380bed..e06a583618 100644 --- a/src/oat/jni/arm/calling_convention_arm.cc +++ b/src/oat/jni/arm/calling_convention_arm.cc @@ -53,48 +53,27 @@ ManagedRegister ArmJniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } -// Managed runtime calling convention +ManagedRegister ArmJniCallingConvention::IntReturnRegister() { + return ArmManagedRegister::FromCoreRegister(R0); +} -std::vector ArmManagedRuntimeCallingConvention::entry_spills_; +// Managed runtime calling convention ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() { return ArmManagedRegister::FromCoreRegister(R0); } bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() { - return itr_slots_ < 3; + return false; // Everything moved to stack on entry. } bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() { - if (itr_slots_ < 2) { - return false; - } else if (itr_slots_ > 2) { - return true; - } else { - // handle funny case of a long/double straddling registers and the stack - return IsParamALongOrDouble(itr_args_); - } + return true; } -static const Register kManagedArgumentRegisters[] = { - R1, R2, R3 -}; ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() { - CHECK(IsCurrentParamInRegister()); - if (IsParamALongOrDouble(itr_args_)) { - if (itr_slots_ == 0) { - return ArmManagedRegister::FromRegisterPair(R1_R2); - } else if (itr_slots_ == 1) { - return ArmManagedRegister::FromRegisterPair(R2_R3); - } else { - // This is a long/double split between registers and the stack - return ArmManagedRegister::FromCoreRegister( - kManagedArgumentRegisters[itr_slots_]); - } - } else { - return - ArmManagedRegister::FromCoreRegister(kManagedArgumentRegisters[itr_slots_]); - } + LOG(FATAL) << "Should not reach here"; + return ManagedRegister::NoRegister(); } FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() { @@ -103,15 +82,26 @@ FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() { FrameOffset(displacement_.Int32Value() + // displacement kPointerSize + // Method* (itr_slots_ * kPointerSize)); // offset into in args - if (itr_slots_ == 2) { - // the odd spanning case, bump the offset to skip the first half of the - // input which is in a register - CHECK(IsCurrentParamInRegister()); - result = FrameOffset(result.Int32Value() + 4); - } return result; } +const std::vector& ArmManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on ARM to free them up for scratch use, we then assume + // all arguments are on the stack. + if (entry_spills_.size() == 0) { + size_t num_spills = NumArgs() + NumLongOrDoubleArgs(); + if (num_spills > 0) { + entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1)); + if (num_spills > 1) { + entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2)); + if (num_spills > 2) { + entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3)); + } + } + } + } + return entry_spills_; +} // JNI calling convention ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized, @@ -165,11 +155,6 @@ size_t ArmJniCallingConvention::OutArgSize() { kStackAlignment); } -// Will reg be crushed by an outgoing argument? -bool ArmJniCallingConvention::IsMethodRegisterClobberedPreCall() { - return true; // The method register R0 is always clobbered by the JNIEnv -} - // JniCallingConvention ABI follows AAPCS where longs and doubles must occur // in even register numbers and stack slots void ArmJniCallingConvention::Next() { diff --git a/src/oat/jni/arm/calling_convention_arm.h b/src/oat/jni/arm/calling_convention_arm.h index 527ffa1f2b..b536b6b0de 100644 --- a/src/oat/jni/arm/calling_convention_arm.h +++ b/src/oat/jni/arm/calling_convention_arm.h @@ -36,12 +36,10 @@ class ArmManagedRuntimeCallingConvention : public ManagedRuntimeCallingConventio virtual bool IsCurrentParamOnStack(); virtual ManagedRegister CurrentParamRegister(); virtual FrameOffset CurrentParamStackOffset(); - virtual const std::vector& EntrySpills() { - DCHECK(entry_spills_.empty()); - return entry_spills_; - } + virtual const std::vector& EntrySpills(); + private: - static std::vector entry_spills_; + std::vector entry_spills_; DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention); }; @@ -52,6 +50,7 @@ class ArmJniCallingConvention : public JniCallingConvention { virtual ~ArmJniCallingConvention() {} // Calling convention virtual ManagedRegister ReturnRegister(); + virtual ManagedRegister IntReturnRegister(); virtual ManagedRegister InterproceduralScratchRegister(); // JNI calling convention virtual void Next(); // Override default behavior for AAPCS @@ -65,7 +64,6 @@ class ArmJniCallingConvention : public JniCallingConvention { virtual uint32_t FpSpillMask() const { return 0; // Floats aren't spilled in JNI down call } - virtual bool IsMethodRegisterClobberedPreCall(); virtual bool IsCurrentParamInRegister(); virtual bool IsCurrentParamOnStack(); virtual ManagedRegister CurrentParamRegister(); diff --git a/src/oat/jni/calling_convention.h b/src/oat/jni/calling_convention.h index ae6c7ede88..121d1f80ae 100644 --- a/src/oat/jni/calling_convention.h +++ b/src/oat/jni/calling_convention.h @@ -41,9 +41,9 @@ class CallingConvention { return result; } - // Register that holds result of this method + // Register that holds result of this method invocation. virtual ManagedRegister ReturnRegister() = 0; - // Register reserved for scratch usage during procedure calls + // Register reserved for scratch usage during procedure calls. virtual ManagedRegister InterproceduralScratchRegister() = 0; // Offset of Method within the frame @@ -224,6 +224,8 @@ class JniCallingConvention : public CallingConvention { // Location where the return value of a call can be squirreled if another // call is made following the native call FrameOffset ReturnValueSaveLocation() const; + // Register that holds result if it is integer. + virtual ManagedRegister IntReturnRegister() = 0; // Callee save registers to spill prior to native code (which may clobber) virtual const std::vector& CalleeSaveRegisters() const = 0; @@ -232,10 +234,6 @@ class JniCallingConvention : public CallingConvention { virtual uint32_t CoreSpillMask() const = 0; virtual uint32_t FpSpillMask() const = 0; - // Returns true if the method register will have been clobbered during argument - // set up - virtual bool IsMethodRegisterClobberedPreCall() = 0; - // An extra scratch register live after the call virtual ManagedRegister ReturnScratchRegister() const = 0; diff --git a/src/oat/jni/jni_compiler.cc b/src/oat/jni/jni_compiler.cc index 49160726a9..a9a1bcacfe 100644 --- a/src/oat/jni/jni_compiler.cc +++ b/src/oat/jni/jni_compiler.cc @@ -36,140 +36,13 @@ namespace art { -static void ChangeThreadState(Assembler* jni_asm, ThreadState new_state, - ManagedRegister scratch, ManagedRegister return_reg, - FrameOffset return_save_location, - size_t return_size) { - /* - * This code mirrors that of Thread::SetState where detail is given on why - * barriers occur when they do. - */ - if (new_state == kRunnable) { - /* - * Change our status to kRunnable. The transition requires - * that we check for pending suspension, because the runtime considers - * us to be "asleep" in all other states, and another thread could - * be performing a GC now. - */ - __ StoreImmediateToThread(Thread::StateOffset(), kRunnable, scratch); - __ MemoryBarrier(scratch); - __ SuspendPoll(scratch, return_reg, return_save_location, return_size); - } else { - /* - * Not changing to kRunnable. No additional work required. - */ - __ MemoryBarrier(scratch); - __ StoreImmediateToThread(Thread::StateOffset(), new_state, scratch); - } -} - -// Copy a single parameter from the managed to the JNI calling convention static void CopyParameter(Assembler* jni_asm, ManagedRuntimeCallingConvention* mr_conv, JniCallingConvention* jni_conv, - size_t frame_size, size_t out_arg_size) { - bool input_in_reg = mr_conv->IsCurrentParamInRegister(); - bool output_in_reg = jni_conv->IsCurrentParamInRegister(); - FrameOffset sirt_offset(0); - bool null_allowed = false; - bool ref_param = jni_conv->IsCurrentParamAReference(); - CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); - // input may be in register, on stack or both - but not none! - CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack()); - if (output_in_reg) { // output shouldn't straddle registers and stack - CHECK(!jni_conv->IsCurrentParamOnStack()); - } else { - CHECK(jni_conv->IsCurrentParamOnStack()); - } - // References need placing in SIRT and the entry address passing - if (ref_param) { - null_allowed = mr_conv->IsCurrentArgPossiblyNull(); - // Compute SIRT offset. Note null is placed in the SIRT but the jobject - // passed to the native code must be null (not a pointer into the SIRT - // as with regular references). - sirt_offset = jni_conv->CurrentParamSirtEntryOffset(); - // Check SIRT offset is within frame. - CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size)); - } - if (input_in_reg && output_in_reg) { - ManagedRegister in_reg = mr_conv->CurrentParamRegister(); - ManagedRegister out_reg = jni_conv->CurrentParamRegister(); - if (ref_param) { - __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed); - } else { - if (!mr_conv->IsCurrentParamOnStack()) { - // regular non-straddling move - __ Move(out_reg, in_reg, mr_conv->CurrentParamSize()); - } else { - UNIMPLEMENTED(FATAL); // we currently don't expect to see this case - } - } - } else if (!input_in_reg && !output_in_reg) { - FrameOffset out_off = jni_conv->CurrentParamStackOffset(); - if (ref_param) { - __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(), - null_allowed); - } else { - FrameOffset in_off = mr_conv->CurrentParamStackOffset(); - size_t param_size = mr_conv->CurrentParamSize(); - CHECK_EQ(param_size, jni_conv->CurrentParamSize()); - __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size); - } - } else if (!input_in_reg && output_in_reg) { - FrameOffset in_off = mr_conv->CurrentParamStackOffset(); - ManagedRegister out_reg = jni_conv->CurrentParamRegister(); - // Check that incoming stack arguments are above the current stack frame. - CHECK_GT(in_off.Uint32Value(), frame_size); - if (ref_param) { - __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed); - } else { - size_t param_size = mr_conv->CurrentParamSize(); - CHECK_EQ(param_size, jni_conv->CurrentParamSize()); - __ Load(out_reg, in_off, param_size); - } - } else { - CHECK(input_in_reg && !output_in_reg); - ManagedRegister in_reg = mr_conv->CurrentParamRegister(); - FrameOffset out_off = jni_conv->CurrentParamStackOffset(); - // Check outgoing argument is within frame - CHECK_LT(out_off.Uint32Value(), frame_size); - if (ref_param) { - // TODO: recycle value in in_reg rather than reload from SIRT - __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(), - null_allowed); - } else { - size_t param_size = mr_conv->CurrentParamSize(); - CHECK_EQ(param_size, jni_conv->CurrentParamSize()); - if (!mr_conv->IsCurrentParamOnStack()) { - // regular non-straddling store - __ Store(out_off, in_reg, param_size); - } else { - // store where input straddles registers and stack - CHECK_EQ(param_size, 8u); - FrameOffset in_off = mr_conv->CurrentParamStackOffset(); - __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister()); - } - } - } -} - + size_t frame_size, size_t out_arg_size); static void SetNativeParameter(Assembler* jni_asm, JniCallingConvention* jni_conv, - ManagedRegister in_reg) { - if (jni_conv->IsCurrentParamOnStack()) { - FrameOffset dest = jni_conv->CurrentParamStackOffset(); - __ StoreRawPtr(dest, in_reg); - } else { - if (!jni_conv->CurrentParamRegister().Equals(in_reg)) { - __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize()); - } - } -} - -static bool IsRegisterPair(InstructionSet instruction_set, ManagedRegister r) { - return ((instruction_set == kArm && r.AsArm().IsRegisterPair()) || - (instruction_set == kX86 && r.AsX86().IsRegisterPair())); -} + ManagedRegister in_reg); // Generate the JNI bridge for the given method, general contract: // - Arguments are in the managed runtime format, either on stack or in @@ -267,103 +140,55 @@ CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler, jni_conv->Next(); } - // 4. Transition from being in managed to native code. Save the top_of_managed_stack_ - // so that the managed stack can be crawled while in native code. Clear the corresponding - // PC value that has no meaning for the this frame. + // 4. Write out the end of the quick frames. __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset()); __ StoreImmediateToThread(Thread::TopOfManagedStackPcOffset(), 0, mr_conv->InterproceduralScratchRegister()); - ChangeThreadState(jni_asm.get(), kNative, - mr_conv->InterproceduralScratchRegister(), - ManagedRegister::NoRegister(), FrameOffset(0), 0); - // 5. Move frame down to allow space for out going args. Do for as short a - // time as possible to aid profiling.. + // 5. Move frame down to allow space for out going args. const size_t out_arg_size = jni_conv->OutArgSize(); __ IncreaseFrameSize(out_arg_size); - // 6. Acquire lock for synchronized methods. - if (is_synchronized) { - // Compute arguments in registers to preserve - mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size)); - std::vector live_argument_regs; - std::vector live_argument_regs_size; - while (mr_conv->HasNext()) { - if (mr_conv->IsCurrentParamInRegister()) { - live_argument_regs.push_back(mr_conv->CurrentParamRegister()); - live_argument_regs_size.push_back(mr_conv->CurrentParamSize()); - } - mr_conv->Next(); - } - - // Copy arguments to preserve to callee save registers - CHECK_LE(live_argument_regs.size(), callee_save_regs.size()); - for (size_t in = 0, out = 0; in < live_argument_regs.size(); ++in) { - size_t size = live_argument_regs_size.at(in); - if (IsRegisterPair(instruction_set, live_argument_regs.at(in))) { - CHECK_EQ(instruction_set, kArm); - arm::ArmManagedRegister pair(live_argument_regs.at(in).AsArm()); - arm::Register lo(pair.AsRegisterPairLow()); - arm::Register hi(pair.AsRegisterPairHigh()); - __ Move(callee_save_regs.at(out++), arm::ArmManagedRegister::FromCoreRegister(lo), size / 2); - __ Move(callee_save_regs.at(out++), arm::ArmManagedRegister::FromCoreRegister(hi), size / 2); - } else { - __ Move(callee_save_regs.at(out++), live_argument_regs.at(in), size); - } - } - // Get SIRT entry for 1st argument (jclass or this) to be 1st argument to - // monitor enter - mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size)); + // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable + // can occur. The result is the saved JNI local state that is restored by the exit call. We + // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer + // arguments. + uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) + : ENTRYPOINT_OFFSET(pJniMethodStart); + jni_conv->ResetIterator(FrameOffset(out_arg_size)); + FrameOffset locked_object_sirt_offset(0); + if (is_synchronized) { + // Pass object for locking. + jni_conv->Next(); // Skip JNIEnv. + locked_object_sirt_offset = jni_conv->CurrentParamSirtEntryOffset(); jni_conv->ResetIterator(FrameOffset(out_arg_size)); - jni_conv->Next(); // Skip JNIEnv* - if (is_static) { - FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset(); - if (jni_conv->IsCurrentParamOnStack()) { - FrameOffset out_off = jni_conv->CurrentParamStackOffset(); - __ CreateSirtEntry(out_off, sirt_offset, - mr_conv->InterproceduralScratchRegister(), - false); - } else { - ManagedRegister out_reg = jni_conv->CurrentParamRegister(); - __ CreateSirtEntry(out_reg, sirt_offset, - ManagedRegister::NoRegister(), false); - } + if (jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + __ CreateSirtEntry(out_off, locked_object_sirt_offset, + mr_conv->InterproceduralScratchRegister(), + false); } else { - CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size, - out_arg_size); - } - - // Generate JNIEnv* in place and leave a copy in jni_fns_register - jni_conv->ResetIterator(FrameOffset(out_arg_size)); - ManagedRegister jni_fns_register = - jni_conv->InterproceduralScratchRegister(); - __ LoadRawPtrFromThread(jni_fns_register, Thread::JniEnvOffset()); - SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_fns_register); - - // Call JNIEnv->MonitorEnter(object) - __ LoadRawPtr(jni_fns_register, jni_fns_register, functions); - __ Call(jni_fns_register, monitor_enter, - jni_conv->InterproceduralScratchRegister()); - - // Check for exceptions - __ ExceptionPoll(jni_conv->InterproceduralScratchRegister()); - - // Restore live arguments - for (size_t in = 0, out = 0; out < live_argument_regs.size(); ++out) { - size_t size = live_argument_regs_size.at(out); - if (IsRegisterPair(instruction_set, live_argument_regs.at(out))) { - CHECK_EQ(instruction_set, kArm); - arm::ArmManagedRegister pair(live_argument_regs.at(out).AsArm()); - arm::Register lo(pair.AsRegisterPairLow()); - arm::Register hi(pair.AsRegisterPairHigh()); - __ Move(arm::ArmManagedRegister::FromCoreRegister(lo), callee_save_regs.at(in++), size / 2); - __ Move(arm::ArmManagedRegister::FromCoreRegister(hi), callee_save_regs.at(in++), size / 2); - } else { - __ Move(live_argument_regs.at(out), callee_save_regs.at(in++), size); - } + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + __ CreateSirtEntry(out_reg, locked_object_sirt_offset, + ManagedRegister::NoRegister(), false); } + jni_conv->Next(); + } + if (jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(jni_conv->CurrentParamRegister()); + __ Call(jni_conv->CurrentParamRegister(), Offset(jni_start), + jni_conv->InterproceduralScratchRegister()); + } else { + __ GetCurrentThread(jni_conv->CurrentParamStackOffset(), + jni_conv->InterproceduralScratchRegister()); + __ Call(ThreadOffset(jni_start), jni_conv->InterproceduralScratchRegister()); + } + if (is_synchronized) { // Check for exceptions from monitor enter. + __ ExceptionPoll(jni_conv->InterproceduralScratchRegister(), out_arg_size); } + FrameOffset saved_cookie_offset = jni_conv->SavedLocalReferenceCookieOffset(); + __ Store(saved_cookie_offset, jni_conv->IntReturnRegister(), 4); // 7. Iterate over arguments placing values from managed calling convention in // to the convention required for a native call (shuffling). For references @@ -384,17 +209,17 @@ CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler, for (uint32_t i = 0; i < args_count; ++i) { mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size)); jni_conv->ResetIterator(FrameOffset(out_arg_size)); - jni_conv->Next(); // Skip JNIEnv* + jni_conv->Next(); // Skip JNIEnv*. if (is_static) { - jni_conv->Next(); // Skip Class for now + jni_conv->Next(); // Skip Class for now. } + // Skip to the argument we're interested in. for (uint32_t j = 0; j < args_count - i - 1; ++j) { mr_conv->Next(); jni_conv->Next(); } CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size, out_arg_size); } - if (is_static) { // Create argument for Class mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size)); @@ -412,178 +237,120 @@ CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler, ManagedRegister::NoRegister(), false); } } - // 8. Create 1st argument, the JNI environment ptr and save the top of the local reference table + + // 8. Create 1st argument, the JNI environment ptr. jni_conv->ResetIterator(FrameOffset(out_arg_size)); // Register that will hold local indirect reference table if (jni_conv->IsCurrentParamInRegister()) { ManagedRegister jni_env = jni_conv->CurrentParamRegister(); DCHECK(!jni_env.Equals(jni_conv->InterproceduralScratchRegister())); __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset()); - // Frame[saved_local_ref_cookie_offset] = env->local_ref_cookie - __ Copy(jni_conv->SavedLocalReferenceCookieOffset(), - jni_env, JNIEnvExt::LocalRefCookieOffset(), - jni_conv->InterproceduralScratchRegister(), 4); - // env->local_ref_cookie = env->locals.segment_state - __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(), - jni_env, JNIEnvExt::SegmentStateOffset(), - jni_conv->InterproceduralScratchRegister(), 4); } else { FrameOffset jni_env = jni_conv->CurrentParamStackOffset(); __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(), jni_conv->InterproceduralScratchRegister()); - // Frame[saved_local_ref_cookie_offset] = env->local_ref_cookie - __ Copy(jni_conv->SavedLocalReferenceCookieOffset(), - jni_env, JNIEnvExt::LocalRefCookieOffset(), - jni_conv->InterproceduralScratchRegister(), 4); - // env->local_ref_cookie = env->locals.segment_state - __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(), - jni_env, JNIEnvExt::SegmentStateOffset(), - jni_conv->InterproceduralScratchRegister(), 4); } - // 9. Plant call to native code associated with method - if (!jni_conv->IsMethodRegisterClobberedPreCall()) { - // Method register shouldn't have been crushed by setting up outgoing - // arguments - __ Call(mr_conv->MethodRegister(), Method::NativeMethodOffset(), - mr_conv->InterproceduralScratchRegister()); - } else { - __ Call(jni_conv->MethodStackOffset(), Method::NativeMethodOffset(), - mr_conv->InterproceduralScratchRegister()); - } + // 9. Plant call to native code associated with method. + __ Call(jni_conv->MethodStackOffset(), Method::NativeMethodOffset(), + mr_conv->InterproceduralScratchRegister()); - // 10. Release lock for synchronized methods. - if (is_synchronized) { - mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size)); - jni_conv->ResetIterator(FrameOffset(out_arg_size)); - jni_conv->Next(); // Skip JNIEnv* - // Save return value - FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation(); - if (jni_conv->SizeOfReturnValue() != 0) { - FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation(); - CHECK_LT(return_save_location.Uint32Value(), frame_size+out_arg_size); - __ Store(return_save_location, jni_conv->ReturnRegister(), - jni_conv->SizeOfReturnValue()); - } - // Get SIRT entry for 1st argument - if (is_static) { - FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset(); - if (jni_conv->IsCurrentParamOnStack()) { - FrameOffset out_off = jni_conv->CurrentParamStackOffset(); - __ CreateSirtEntry(out_off, sirt_offset, - mr_conv->InterproceduralScratchRegister(), - false); - } else { - ManagedRegister out_reg = jni_conv->CurrentParamRegister(); - __ CreateSirtEntry(out_reg, sirt_offset, - ManagedRegister::NoRegister(), false); - } - } else { - CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size, - out_arg_size); - } - // Generate JNIEnv* in place and leave a copy in jni_env_register - jni_conv->ResetIterator(FrameOffset(out_arg_size)); - ManagedRegister jni_env_register = - jni_conv->InterproceduralScratchRegister(); - __ LoadRawPtrFromThread(jni_env_register, Thread::JniEnvOffset()); - SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_env_register); - // Call JNIEnv->MonitorExit(object) - __ LoadRawPtr(jni_env_register, jni_env_register, functions); - __ Call(jni_env_register, monitor_exit, - jni_conv->InterproceduralScratchRegister()); - // Reload return value - if (jni_conv->SizeOfReturnValue() != 0) { - __ Load(jni_conv->ReturnRegister(), return_save_location, - jni_conv->SizeOfReturnValue()); - } - } - - // 11. Release outgoing argument area - __ DecreaseFrameSize(out_arg_size); - mr_conv->ResetIterator(FrameOffset(frame_size)); - jni_conv->ResetIterator(FrameOffset(0)); - - // 12. Transition from being in native to managed code, possibly entering a - // safepoint - // Don't clobber result - CHECK(!jni_conv->InterproceduralScratchRegister().Equals(jni_conv->ReturnRegister())); - // Location to preserve result on slow path, ensuring its within the frame - FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation(); - CHECK(return_save_location.Uint32Value() < frame_size || - jni_conv->SizeOfReturnValue() == 0); - ChangeThreadState(jni_asm.get(), kRunnable, - jni_conv->InterproceduralScratchRegister(), - jni_conv->ReturnRegister(), return_save_location, - jni_conv->SizeOfReturnValue()); - - // 13. Place result in correct register possibly loading from indirect - // reference table - if (jni_conv->IsReturnAReference()) { - __ IncreaseFrameSize(out_arg_size); - jni_conv->ResetIterator(FrameOffset(out_arg_size)); - - jni_conv->Next(); // Skip Thread* argument - // Pass result as arg2 - SetNativeParameter(jni_asm.get(), jni_conv.get(), - jni_conv->ReturnRegister()); - - // Pass Thread* - jni_conv->ResetIterator(FrameOffset(out_arg_size)); - if (jni_conv->IsCurrentParamInRegister()) { - __ GetCurrentThread(jni_conv->CurrentParamRegister()); - __ Call(jni_conv->CurrentParamRegister(), - Offset(ENTRYPOINT_OFFSET(pDecodeJObjectInThread)), - jni_conv->InterproceduralScratchRegister()); - } else { - __ GetCurrentThread(jni_conv->CurrentParamStackOffset(), - jni_conv->InterproceduralScratchRegister()); - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pDecodeJObjectInThread)), - jni_conv->InterproceduralScratchRegister()); - } - - __ DecreaseFrameSize(out_arg_size); - jni_conv->ResetIterator(FrameOffset(0)); - } else if (instruction_set == kX86) { + // 10. Fix differences in result widths. + if (instruction_set == kX86) { if (jni_conv->GetReturnType() == Primitive::kPrimByte || jni_conv->GetReturnType() == Primitive::kPrimShort) { - __ SignExtend(jni_conv->ReturnRegister(), Primitive::ComponentSize(jni_conv->GetReturnType())); + __ SignExtend(jni_conv->ReturnRegister(), + Primitive::ComponentSize(jni_conv->GetReturnType())); } else if (jni_conv->GetReturnType() == Primitive::kPrimBoolean || jni_conv->GetReturnType() == Primitive::kPrimChar) { - __ ZeroExtend(jni_conv->ReturnRegister(), Primitive::ComponentSize(jni_conv->GetReturnType())); + __ ZeroExtend(jni_conv->ReturnRegister(), + Primitive::ComponentSize(jni_conv->GetReturnType())); } } - DCHECK_EQ(mr_conv->SizeOfReturnValue(), jni_conv->SizeOfReturnValue()); - __ Move(mr_conv->ReturnRegister(), jni_conv->ReturnRegister(), mr_conv->SizeOfReturnValue()); - // 14. Restore segment state and remove SIRT from thread - { - ManagedRegister jni_env = jni_conv->InterproceduralScratchRegister(); - __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset()); - // env->locals.segment_state = env->local_ref_cookie - __ Copy(jni_env, JNIEnvExt::SegmentStateOffset(), - jni_env, JNIEnvExt::LocalRefCookieOffset(), - jni_conv->ReturnScratchRegister(), 4); - // env->local_ref_cookie = Frame[saved_local_ref_cookie_offset] - __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(), - jni_conv->SavedLocalReferenceCookieOffset(), - jni_conv->ReturnScratchRegister(), 4); + // 11. Save return value + bool reference_return = jni_conv->IsReturnAReference(); + FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation(); + if (jni_conv->SizeOfReturnValue() != 0 && !reference_return) { + CHECK_LT(return_save_location.Uint32Value(), frame_size+out_arg_size); + __ Store(return_save_location, jni_conv->ReturnRegister(), jni_conv->SizeOfReturnValue()); } - __ CopyRawPtrToThread(Thread::TopSirtOffset(), jni_conv->SirtLinkOffset(), - jni_conv->InterproceduralScratchRegister()); - - // 15. Check for pending exception and forward if there - __ ExceptionPoll(jni_conv->InterproceduralScratchRegister()); - // 16. Remove activation + // 12. Call into JNI method end possibly passing a returned reference, the method and the current + // thread. + { + // Modify iterator for call, important offsets were saved above. + size_t jni_end_arg_count = 0; + if (reference_return) { jni_end_arg_count++; } + if (is_synchronized) { jni_end_arg_count++; } + const char* jni_end_shorty = jni_end_arg_count == 0 ? "I" + : (jni_end_arg_count == 1 ? "II" : "III"); + jni_conv.reset(JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, + instruction_set)); + // Ensure out arguments will fit in space taken before (we expect this due to stack alignment). + size_t jni_end_out_arg_size = jni_conv->OutArgSize(); + CHECK_LE(jni_end_out_arg_size, out_arg_size); + jni_conv->ResetIterator(FrameOffset(jni_end_out_arg_size)); + } + uintptr_t jni_end; + if (reference_return) { + // Pass result. + jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) + : ENTRYPOINT_OFFSET(pJniMethodEndWithReference); + SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_conv->ReturnRegister()); + jni_conv->Next(); + } else { + jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) + : ENTRYPOINT_OFFSET(pJniMethodEnd); + } + // Pass saved local reference state. + if (jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + __ Copy(out_off, saved_cookie_offset, jni_conv->InterproceduralScratchRegister(), 4); + } else { + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + __ Load(out_reg, saved_cookie_offset, 4); + } + jni_conv->Next(); if (is_synchronized) { - __ RemoveFrame(frame_size, callee_save_regs); + // Pass object for unlocking. + if (jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + __ CreateSirtEntry(out_off, locked_object_sirt_offset, + jni_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + __ CreateSirtEntry(out_reg, locked_object_sirt_offset, + ManagedRegister::NoRegister(), false); + } + jni_conv->Next(); + } + if (jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(jni_conv->CurrentParamRegister()); + __ Call(jni_conv->CurrentParamRegister(), Offset(jni_end), + jni_conv->InterproceduralScratchRegister()); } else { - // no need to restore callee save registers because we didn't - // clobber them while locking the monitor. - __ RemoveFrame(frame_size, std::vector()); + __ GetCurrentThread(jni_conv->CurrentParamStackOffset(), + jni_conv->InterproceduralScratchRegister()); + __ Call(ThreadOffset(jni_end), jni_conv->InterproceduralScratchRegister()); } + // 13. Reload return value + if (jni_conv->SizeOfReturnValue() != 0 && !reference_return) { + __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue()); + } + + // 14. Move frame up now we're done with the out arg space. + __ DecreaseFrameSize(out_arg_size); + + // 15. Process pending exceptions from JNI call or monitor exit. + __ ExceptionPoll(jni_conv->InterproceduralScratchRegister(), 0); + + // 16. Remove activation - no need to restore callee save registers because we didn't clobber + // them. + __ RemoveFrame(frame_size, std::vector()); + // 17. Finalize code generation __ EmitSlowPaths(); size_t cs = __ CodeSize(); @@ -601,6 +368,109 @@ CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler, jni_conv->FpSpillMask()); } +// Copy a single parameter from the managed to the JNI calling convention +static void CopyParameter(Assembler* jni_asm, + ManagedRuntimeCallingConvention* mr_conv, + JniCallingConvention* jni_conv, + size_t frame_size, size_t out_arg_size) { + bool input_in_reg = mr_conv->IsCurrentParamInRegister(); + bool output_in_reg = jni_conv->IsCurrentParamInRegister(); + FrameOffset sirt_offset(0); + bool null_allowed = false; + bool ref_param = jni_conv->IsCurrentParamAReference(); + CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); + // input may be in register, on stack or both - but not none! + CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack()); + if (output_in_reg) { // output shouldn't straddle registers and stack + CHECK(!jni_conv->IsCurrentParamOnStack()); + } else { + CHECK(jni_conv->IsCurrentParamOnStack()); + } + // References need placing in SIRT and the entry address passing + if (ref_param) { + null_allowed = mr_conv->IsCurrentArgPossiblyNull(); + // Compute SIRT offset. Note null is placed in the SIRT but the jobject + // passed to the native code must be null (not a pointer into the SIRT + // as with regular references). + sirt_offset = jni_conv->CurrentParamSirtEntryOffset(); + // Check SIRT offset is within frame. + CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size)); + } + if (input_in_reg && output_in_reg) { + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + if (ref_param) { + __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed); + } else { + if (!mr_conv->IsCurrentParamOnStack()) { + // regular non-straddling move + __ Move(out_reg, in_reg, mr_conv->CurrentParamSize()); + } else { + UNIMPLEMENTED(FATAL); // we currently don't expect to see this case + } + } + } else if (!input_in_reg && !output_in_reg) { + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + if (ref_param) { + __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(), + null_allowed); + } else { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size); + } + } else if (!input_in_reg && output_in_reg) { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + // Check that incoming stack arguments are above the current stack frame. + CHECK_GT(in_off.Uint32Value(), frame_size); + if (ref_param) { + __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed); + } else { + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + __ Load(out_reg, in_off, param_size); + } + } else { + CHECK(input_in_reg && !output_in_reg); + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + // Check outgoing argument is within frame + CHECK_LT(out_off.Uint32Value(), frame_size); + if (ref_param) { + // TODO: recycle value in in_reg rather than reload from SIRT + __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(), + null_allowed); + } else { + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + if (!mr_conv->IsCurrentParamOnStack()) { + // regular non-straddling store + __ Store(out_off, in_reg, param_size); + } else { + // store where input straddles registers and stack + CHECK_EQ(param_size, 8u); + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister()); + } + } + } +} + +static void SetNativeParameter(Assembler* jni_asm, + JniCallingConvention* jni_conv, + ManagedRegister in_reg) { + if (jni_conv->IsCurrentParamOnStack()) { + FrameOffset dest = jni_conv->CurrentParamStackOffset(); + __ StoreRawPtr(dest, in_reg); + } else { + if (!jni_conv->CurrentParamRegister().Equals(in_reg)) { + __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize()); + } + } +} + } // namespace art extern "C" art::CompiledMethod* ArtJniCompileMethod(art::Compiler& compiler, diff --git a/src/oat/jni/x86/calling_convention_x86.cc b/src/oat/jni/x86/calling_convention_x86.cc index 1cd849c1fa..90c050cfcd 100644 --- a/src/oat/jni/x86/calling_convention_x86.cc +++ b/src/oat/jni/x86/calling_convention_x86.cc @@ -61,6 +61,10 @@ ManagedRegister X86JniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty(), true); } +ManagedRegister X86JniCallingConvention::IntReturnRegister() { + return X86ManagedRegister::FromCpuRegister(EAX); +} + // Managed runtime calling convention ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() { @@ -131,10 +135,6 @@ size_t X86JniCallingConvention::OutArgSize() { return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment); } -bool X86JniCallingConvention::IsMethodRegisterClobberedPreCall() { - return IsSynchronized(); // Monitor enter crushes the method register -} - bool X86JniCallingConvention::IsCurrentParamInRegister() { return false; // Everything is passed by stack. } @@ -149,15 +149,17 @@ ManagedRegister X86JniCallingConvention::CurrentParamRegister() { } FrameOffset X86JniCallingConvention::CurrentParamStackOffset() { - return FrameOffset(displacement_.Int32Value() - OutArgSize() + - (itr_slots_ * kPointerSize)); + return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kPointerSize)); } size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() { size_t static_args = IsStatic() ? 1 : 0; // count jclass // regular argument parameters and this size_t param_args = NumArgs() + NumLongOrDoubleArgs(); - return static_args + param_args + 2; // count JNIEnv* and return pc (pushed after Method*) + // count JNIEnv* and return pc (pushed after Method*) + size_t total_args = static_args + param_args + 2; + return total_args; + } } // namespace x86 diff --git a/src/oat/jni/x86/calling_convention_x86.h b/src/oat/jni/x86/calling_convention_x86.h index 959a37fc2a..5116a46a95 100644 --- a/src/oat/jni/x86/calling_convention_x86.h +++ b/src/oat/jni/x86/calling_convention_x86.h @@ -49,6 +49,7 @@ class X86JniCallingConvention : public JniCallingConvention { virtual ~X86JniCallingConvention() {} // Calling convention virtual ManagedRegister ReturnRegister(); + virtual ManagedRegister IntReturnRegister(); virtual ManagedRegister InterproceduralScratchRegister(); // JNI calling convention virtual size_t FrameSize(); @@ -61,7 +62,6 @@ class X86JniCallingConvention : public JniCallingConvention { virtual uint32_t FpSpillMask() const { return 0; } - virtual bool IsMethodRegisterClobberedPreCall(); virtual bool IsCurrentParamInRegister(); virtual bool IsCurrentParamOnStack(); virtual ManagedRegister CurrentParamRegister(); diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc index 37454fdc57..1aa069e899 100644 --- a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc +++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc @@ -60,8 +60,17 @@ extern "C" void* art_get_obj_static_from_code(uint32_t); extern "C" void art_handle_fill_data_from_code(void*, void*); // JNI entrypoints. -extern Object* DecodeJObjectInThread(Thread* thread, jobject obj); extern void* FindNativeMethod(Thread* thread); +extern uint32_t JniMethodStart(Thread* self); +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, + Thread* self); +extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self); +extern Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self); // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); @@ -182,8 +191,13 @@ void InitEntryPoints(EntryPoints* points) { points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code; // JNI - points->pDecodeJObjectInThread = DecodeJObjectInThread; points->pFindNativeMethod = FindNativeMethod; + points->pJniMethodStart = JniMethodStart; + points->pJniMethodStartSynchronized = JniMethodStartSynchronized; + points->pJniMethodEnd = JniMethodEnd; + points->pJniMethodEndSynchronized = JniMethodEndSynchronized; + points->pJniMethodEndWithReference = JniMethodEndWithReference; + points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; // Locks points->pLockObjectFromCode = art_lock_object_from_code; diff --git a/src/oat/runtime/arm/stub_arm.cc b/src/oat/runtime/arm/stub_arm.cc index 5a2074871c..d3c94a84c1 100644 --- a/src/oat/runtime/arm/stub_arm.cc +++ b/src/oat/runtime/arm/stub_arm.cc @@ -17,6 +17,7 @@ #include "jni_internal.h" #include "oat/utils/arm/assembler_arm.h" #include "oat/runtime/oat_support_entrypoints.h" +#include "oat/runtime/stub.h" #include "object.h" #include "stack_indirect_reference_table.h" diff --git a/src/oat/runtime/callee_save_frame.h b/src/oat/runtime/callee_save_frame.h index 15095535e0..14ba0465c7 100644 --- a/src/oat/runtime/callee_save_frame.h +++ b/src/oat/runtime/callee_save_frame.h @@ -23,9 +23,11 @@ namespace art { class Method; -// Place a special frame at the TOS that will save the callee saves for the given type -static void FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type) { - // Be aware the store below may well stomp on an incoming argument +// Place a special frame at the TOS that will save the callee saves for the given type. +static void FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + // Be aware the store below may well stomp on an incoming argument. + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); *sp = Runtime::Current()->GetCalleeSaveMethod(type); self->SetTopOfStack(sp, 0); self->VerifyStack(); diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h index a235e4fa71..39d9eab2ac 100644 --- a/src/oat/runtime/oat_support_entrypoints.h +++ b/src/oat/runtime/oat_support_entrypoints.h @@ -72,8 +72,14 @@ struct PACKED EntryPoints { void (*pHandleFillArrayDataFromCode)(void*, void*); // JNI - Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj); void* (*pFindNativeMethod)(Thread* thread); + uint32_t (*pJniMethodStart)(Thread*); + uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self); + void (*pJniMethodEnd)(uint32_t cookie, Thread* self); + void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self); + Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self); + Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie, + jobject locked, Thread* self); // Locks void (*pLockObjectFromCode)(void*); diff --git a/src/oat/runtime/stub.h b/src/oat/runtime/stub.h new file mode 100644 index 0000000000..5d8b37d69c --- /dev/null +++ b/src/oat/runtime/stub.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_OAT_RUNTIME_OAT_RUNTIME_STUB_H_ +#define ART_SRC_OAT_RUNTIME_OAT_RUNTIME_STUB_H_ + +#include "runtime.h" + +namespace art { + +namespace arm { + ByteArray* CreateAbstractMethodErrorStub() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + ByteArray* CreateJniDlsymLookupStub() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +} + +namespace x86 { + ByteArray* CreateAbstractMethodErrorStub() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + ByteArray* CreateJniDlsymLookupStub() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +} + +} // namespace art + +#endif // ART_SRC_OAT_RUNTIME_OAT_RUNTIME_STUB_H_ diff --git a/src/oat/runtime/support_alloc.cc b/src/oat/runtime/support_alloc.cc index d9394d2581..4a03f98568 100644 --- a/src/oat/runtime/support_alloc.cc +++ b/src/oat/runtime/support_alloc.cc @@ -20,39 +20,45 @@ namespace art { extern "C" Object* artAllocObjectFromCode(uint32_t type_idx, Method* method, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, false); } extern "C" Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, Method* method, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, true); } extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, self, false); } extern "C" Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method, int32_t component_count, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, self, true); } extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, - int32_t component_count, Thread* self, Method** sp) { + int32_t component_count, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false); } extern "C" Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method, int32_t component_count, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true); } diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc index 139239f7df..ea083f1aae 100644 --- a/src/oat/runtime/support_cast.cc +++ b/src/oat/runtime/support_cast.cc @@ -20,14 +20,16 @@ namespace art { // Assignable test for code, won't throw. Null and equality tests already performed -extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class) { +extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(klass != NULL); DCHECK(ref_class != NULL); return klass->IsAssignableFrom(ref_class) ? 1 : 0; } // Check whether it is safe to cast one class to the other, throw exception and return -1 on failure -extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp) { +extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(a->IsClass()) << PrettyClass(a); DCHECK(b->IsClass()) << PrettyClass(b); if (LIKELY(b->IsAssignableFrom(a))) { @@ -45,7 +47,8 @@ extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self // Tests whether 'element' can be assigned into an array of type 'array_class'. // Returns 0 on success and -1 if an exception is pending. extern "C" int artCanPutArrayElementFromCode(const Object* element, const Class* array_class, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(array_class != NULL); // element can't be NULL as we catch this is screened in runtime_support Class* element_class = element->GetClass(); diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc index ef6e0b1be4..996804396a 100644 --- a/src/oat/runtime/support_debug.cc +++ b/src/oat/runtime/support_debug.cc @@ -25,13 +25,15 @@ namespace art { * method entry and offset 0 within the method, we'll use an offset of -1 * to denote method entry. */ -extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp) { +extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); Dbg::UpdateDebugger(dex_pc, self); } // Temporary debugging hook for compiler. -extern void DebugMe(Method* method, uint32_t info) { +extern void DebugMe(Method* method, uint32_t info) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { LOG(INFO) << "DebugMe"; if (method != NULL) { LOG(INFO) << PrettyMethod(method); diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc index 49e038d370..8e7c2ad8a4 100644 --- a/src/oat/runtime/support_dexcache.cc +++ b/src/oat/runtime/support_dexcache.cc @@ -20,7 +20,8 @@ namespace art { extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Method* referrer, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Called to ensure static storage base is initialized for direct static field reads and writes. // A class may be accessing another class' fields when it doesn't have access, as access has been // given by inheritance. @@ -29,7 +30,8 @@ extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Me } extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* referrer, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Called when method->dex_cache_resolved_types_[] misses. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveVerifyAndClinit(type_idx, referrer, self, false, false); @@ -37,7 +39,8 @@ extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* ref extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, const Method* referrer, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); @@ -45,7 +48,8 @@ extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, } extern "C" String* artResolveStringFromCode(Method* referrer, int32_t string_idx, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveStringFromCode(referrer, string_idx); } diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc index 77fe618adc..99e3a944a0 100644 --- a/src/oat/runtime/support_field.cc +++ b/src/oat/runtime/support_field.cc @@ -22,7 +22,8 @@ namespace art { extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* referrer, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int32_t)); if (LIKELY(field != NULL)) { return field->Get32(NULL); @@ -36,7 +37,8 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* ref } extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* referrer, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int64_t)); if (LIKELY(field != NULL)) { return field->Get64(NULL); @@ -50,7 +52,8 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* ref } extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* referrer, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, false, false, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(NULL); @@ -64,7 +67,8 @@ extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* ref } extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get32(obj); @@ -82,7 +86,8 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, } extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int64_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get64(obj); @@ -100,7 +105,8 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, } extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, false, false, sizeof(Object*)); if (LIKELY(field != NULL && obj != NULL)) { return field->GetObj(obj); @@ -118,7 +124,8 @@ extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, } extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int32_t)); if (LIKELY(field != NULL)) { field->Set32(NULL, new_value); @@ -134,7 +141,8 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, } extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer, - uint64_t new_value, Thread* self, Method** sp) { + uint64_t new_value, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int64_t)); if (LIKELY(field != NULL)) { field->Set64(NULL, new_value); @@ -150,7 +158,8 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer } extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, false, true, sizeof(Object*)); if (LIKELY(field != NULL)) { if (LIKELY(!FieldHelper(field).IsPrimitiveType())) { @@ -168,7 +177,8 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, } extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_t new_value, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { field->Set32(obj, new_value); @@ -188,7 +198,8 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_ } extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_t new_value, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); Method* referrer = sp[callee_save->GetFrameSizeInBytes() / sizeof(Method*)]; Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int64_t)); @@ -211,7 +222,8 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_ } extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, Object* obj, Object* new_value, - const Method* referrer, Thread* self, Method** sp) { + const Method* referrer, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, false, true, sizeof(Object*)); if (LIKELY(field != NULL && obj != NULL)) { field->SetObj(obj, new_value); diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc index 7227f6ba13..8561bd8308 100644 --- a/src/oat/runtime/support_fillarray.cc +++ b/src/oat/runtime/support_fillarray.cc @@ -37,7 +37,8 @@ namespace art { */ extern "C" int artHandleFillArrayDataFromCode(Array* array, const Instruction::ArrayDataPayload* payload, - Thread* self, Method** sp) { + Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); if (UNLIKELY(array == NULL)) { diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc index 466968834e..9c7b3a2967 100644 --- a/src/oat/runtime/support_invoke.cc +++ b/src/oat/runtime/support_invoke.cc @@ -20,7 +20,8 @@ namespace art { static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method* caller_method, - Thread* self, Method** sp, bool access_check, InvokeType type) { + Thread* self, Method** sp, bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); @@ -54,14 +55,16 @@ static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method // See comments in runtime_support_asm.S extern "C" uint64_t artInvokeInterfaceTrampoline(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, false, kInterface); } extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface); } @@ -69,28 +72,32 @@ extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_ extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect); } extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic); } extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper); } extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, - Method** sp) { + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual); } diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc index cfa1a11df8..bbff673b9d 100644 --- a/src/oat/runtime/support_jni.cc +++ b/src/oat/runtime/support_jni.cc @@ -16,20 +16,23 @@ #include "object.h" #include "object_utils.h" +#include "scoped_thread_state_change.h" #include "thread.h" namespace art { // Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern void* FindNativeMethod(Thread* self) { +extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + GlobalSynchronization::mutator_lock_->AssertNotHeld(); // We come here as Native. DCHECK(Thread::Current() == self); + ScopedObjectAccess soa(self); - Method* method = const_cast(self->GetCurrentMethod()); + Method* method = self->GetCurrentMethod(); DCHECK(method != NULL); // Lookup symbol address for method, on failure we'll return NULL with an // exception set, otherwise we return the address of the method we found. - void* native_code = self->GetJniEnv()->vm->FindCodeForNativeMethod(method); + void* native_code = soa.Vm()->FindCodeForNativeMethod(method); if (native_code == NULL) { DCHECK(self->IsExceptionPending()); return NULL; @@ -40,23 +43,61 @@ extern void* FindNativeMethod(Thread* self) { } } -// Return value helper for jobject return types, used for JNI return values. -extern Object* DecodeJObjectInThread(Thread* self, jobject java_object) { - if (self->IsExceptionPending()) { - return NULL; +// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. +extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { + JNIEnvExt* env = self->GetJniEnv(); + uint32_t saved_local_ref_cookie = env->local_ref_cookie; + env->local_ref_cookie = env->locals.GetSegmentState(); + self->TransitionFromRunnableToSuspended(kNative); + return saved_local_ref_cookie; +} + +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) + UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + self->DecodeJObject(to_lock)->MonitorEnter(self); + return JniMethodStart(self); +} + +static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + env->locals.SetSegmentState(env->local_ref_cookie); + env->local_ref_cookie = saved_local_ref_cookie; + self->PopSirt(); +} + +static void UnlockJniSynchronizedMethod(jobject locked, Thread* self) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_) { + // Save any pending exception over monitor exit call. + Throwable* saved_exception = NULL; + if (UNLIKELY(self->IsExceptionPending())) { + saved_exception = self->GetException(); + self->ClearException(); + } + // Decode locked object and unlock, before popping local references. + self->DecodeJObject(locked)->MonitorExit(self); + if (UNLIKELY(self->IsExceptionPending())) { + LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" + << saved_exception->Dump() + << "\nEncountered second exception during implicit MonitorExit:\n" + << self->GetException()->Dump(); } - Object* o = self->DecodeJObject(java_object); - if (o == NULL || !self->GetJniEnv()->check_jni) { - return o; + // Restore pending exception. + if (saved_exception != NULL) { + self->SetException(saved_exception); } +} +static void CheckReferenceResult(Object* o, Thread* self) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + if (o == NULL) { + return; + } if (o == kInvalidIndirectRefObject) { JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(self->GetCurrentMethod()).c_str()); } - - // Make sure that the result is an instance of the type this - // method was expected to return. + // Make sure that the result is an instance of the type this method was expected to return. Method* m = self->GetCurrentMethod(); MethodHelper mh(m); Class* return_type = mh.GetReturnType(); @@ -65,7 +106,53 @@ extern Object* DecodeJObjectInThread(Thread* self, jobject java_object) { JniAbortF(NULL, "attempt to return an instance of %s from %s", PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); } +} + +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + PopLocalReferences(saved_local_ref_cookie, self); +} + + +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); +} + +extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + Object* o = self->DecodeJObject(result); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} +extern Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + Object* o = self->DecodeJObject(result); + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } return o; } @@ -77,7 +164,8 @@ static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { *arg_ptr = reinterpret_cast(value_as_work_around_rep); } -extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) { +extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ DCHECK(Thread::Current() == self); // TODO: this code is specific to ARM // On entry the stack pointed by sp is: diff --git a/src/oat/runtime/support_locks.cc b/src/oat/runtime/support_locks.cc index 30fc5678d6..9d44e559ac 100644 --- a/src/oat/runtime/support_locks.cc +++ b/src/oat/runtime/support_locks.cc @@ -19,14 +19,16 @@ namespace art { -extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, Method** sp) { +extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, Method** sp) + UNLOCK_FUNCTION(monitor_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK(obj != NULL); // Assumed to have been checked before entry // MonitorExit may throw exception return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */; } -extern "C" void artLockObjectFromCode(Object* obj, Thread* thread, Method** sp) { +extern "C" void artLockObjectFromCode(Object* obj, Thread* thread, Method** sp) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); DCHECK(obj != NULL); // Assumed to have been checked before entry obj->MonitorEnter(thread); // May block diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc index 83d2265975..972779d178 100644 --- a/src/oat/runtime/support_proxy.cc +++ b/src/oat/runtime/support_proxy.cc @@ -18,7 +18,7 @@ #include "object_utils.h" #include "reflection.h" #include "runtime_support.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "well_known_classes.h" @@ -43,7 +43,8 @@ namespace art { // reference arguments (so they survive GC) and create a boxed argument array. Finally we invoke // the invocation handler which is a field within the proxy object receiver. extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, - Thread* self, byte* stack_args) { + Thread* self, byte* stack_args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Register the top of the managed stack Method** proxy_sp = reinterpret_cast(stack_args - SP_OFFSET_IN_BYTES); DCHECK_EQ(*proxy_sp, proxy_method); @@ -51,11 +52,11 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), FRAME_SIZE_IN_BYTES); // Start new JNI local reference state JNIEnvExt* env = self->GetJniEnv(); - ScopedJniThreadState ts(env); + ScopedObjectAccessUnchecked soa(env); ScopedJniEnvLocalRefState env_state(env); // Create local ref. copies of proxy method and the receiver - jobject rcvr_jobj = ts.AddLocalReference(receiver); - jobject proxy_method_jobj = ts.AddLocalReference(proxy_method); + jobject rcvr_jobj = soa.AddLocalReference(receiver); + jobject proxy_method_jobj = soa.AddLocalReference(proxy_method); // Placing into local references incoming arguments from the caller's register arguments, // replacing original Object* with jobject @@ -74,7 +75,7 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, while (cur_arg < args_in_regs && param_index < num_params) { if (proxy_mh.IsParamAReference(param_index)) { Object* obj = *reinterpret_cast(stack_args + (cur_arg * kPointerSize)); - jobject jobj = ts.AddLocalReference(obj); + jobject jobj = soa.AddLocalReference(obj); *reinterpret_cast(stack_args + (cur_arg * kPointerSize)) = jobj; } cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1); @@ -85,7 +86,7 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, while (param_index < num_params) { if (proxy_mh.IsParamAReference(param_index)) { Object* obj = *reinterpret_cast(stack_args + (cur_arg * kPointerSize)); - jobject jobj = ts.AddLocalReference(obj); + jobject jobj = soa.AddLocalReference(obj); *reinterpret_cast(stack_args + (cur_arg * kPointerSize)) = jobj; } cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1); @@ -104,13 +105,13 @@ extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, CHECK(self->IsExceptionPending()); return; } - args_jobj[2].l = ts.AddLocalReference(args); + args_jobj[2].l = soa.AddLocalReference(args); } // Convert proxy method into expected interface method Method* interface_method = proxy_method->FindOverriddenMethod(); DCHECK(interface_method != NULL); DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - args_jobj[1].l = ts.AddLocalReference(interface_method); + args_jobj[1].l = soa.AddLocalReference(interface_method); // Box arguments cur_arg = 0; // reset stack location to read to start // reset index, will index into param type array which doesn't include the receiver diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc index 3f6bc8f2ae..013f88515b 100644 --- a/src/oat/runtime/support_stubs.cc +++ b/src/oat/runtime/support_stubs.cc @@ -23,7 +23,7 @@ #if defined(ART_USE_LLVM_COMPILER) #include "nth_caller_visitor.h" #endif -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" // Architecture specific assembler helper to deliver exception. extern "C" void art_deliver_exception_from_code(void*); @@ -33,7 +33,8 @@ namespace art { #if !defined(ART_USE_LLVM_COMPILER) // Lazily resolve a method. Called by stub code. const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp, Thread* thread, - Runtime::TrampolineType type) { + Runtime::TrampolineType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { #if defined(__arm__) // On entry the stack pointed by sp is: // | argN | | @@ -82,7 +83,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs); // Start new JNI local reference state JNIEnvExt* env = thread->GetJniEnv(); - ScopedJniThreadState ts(env); + ScopedObjectAccessUnchecked soa(env); ScopedJniEnvLocalRefState env_state(env); // Compute details about the called method (avoid GCs) @@ -147,7 +148,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp // If we thought we had fewer than 3 arguments in registers, account for the receiver args_in_regs++; } - ts.AddLocalReference(obj); + soa.AddLocalReference(obj); } size_t shorty_index = 1; // skip return value // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip @@ -157,7 +158,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp shorty_index++; if (c == 'L') { Object* obj = reinterpret_cast(regs[cur_arg]); - ts.AddLocalReference(obj); + soa.AddLocalReference(obj); } cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); } @@ -168,7 +169,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp shorty_index++; if (c == 'L') { Object* obj = reinterpret_cast(regs[cur_arg]); - ts.AddLocalReference(obj); + soa.AddLocalReference(obj); } cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); } @@ -308,7 +309,8 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** ca #if !defined(ART_USE_LLVM_COMPILER) // Called by the AbstractMethodError. Called by stub code. -extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp) { +extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", PrettyMethod(method).c_str()); diff --git a/src/oat/runtime/support_thread.cc b/src/oat/runtime/support_thread.cc index 6cd595bffb..32284bbccc 100644 --- a/src/oat/runtime/support_thread.cc +++ b/src/oat/runtime/support_thread.cc @@ -20,15 +20,18 @@ namespace art { -void CheckSuspendFromCode(Thread* thread) { - // Called when thread->suspend_count_ != 0 - Runtime::Current()->GetThreadList()->FullSuspendCheck(thread); +void CheckSuspendFromCode(Thread* thread) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame. + thread->VerifyStack(); + thread->FullSuspendCheck(); } -extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp) { +extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Called when suspend count check value is 0 and thread->suspend_count_ != 0 FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); - Runtime::Current()->GetThreadList()->FullSuspendCheck(thread); + thread->FullSuspendCheck(); } } // namespace art diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc index 31cf7d9373..4fa2387e9e 100644 --- a/src/oat/runtime/support_throw.cc +++ b/src/oat/runtime/support_throw.cc @@ -23,13 +23,15 @@ namespace art { // Deliver an exception that's pending on thread helping set up a callee save frame on the way. -extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp) { +extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->DeliverException(); } // Called by generated call to throw an exception. -extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp) { +extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { /* * exception may be NULL, in which case this routine should * throw NPE. NOTE: this is a convenience for generated code, @@ -47,7 +49,8 @@ extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread } // Called by generated call to throw a NPE exception. -extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) { +extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); uint32_t dex_pc; Method* throw_method = self->GetCurrentMethod(&dex_pc); @@ -56,21 +59,24 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) } // Called by generated call to throw an arithmetic divide by zero exception. -extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) { +extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); thread->DeliverException(); } // Called by generated call to throw an array index out of bounds exception. -extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp) { +extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "length=%d; index=%d", limit, index); thread->DeliverException(); } -extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) { +extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); // Remove extra entry pushed onto second stack during method tracing. if (Runtime::Current()->IsMethodTracingActive()) { @@ -83,7 +89,8 @@ extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) { thread->DeliverException(); } -extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) { +extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); Method* method = self->GetCurrentMethod(); self->ThrowNewException("Ljava/lang/NoSuchMethodError;", @@ -91,7 +98,9 @@ extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, M self->DeliverException(); } -extern "C" void artThrowVerificationErrorFromCode(int32_t kind, int32_t ref, Thread* self, Method** sp) { +extern "C" void artThrowVerificationErrorFromCode(int32_t kind, int32_t ref, Thread* self, + Method** sp) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); Method* method = self->GetCurrentMethod(); ThrowVerificationError(self, method, kind, ref); diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc index a28a898cf4..e52569da3b 100644 --- a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc +++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc @@ -59,8 +59,17 @@ extern "C" void* art_get_obj_static_from_code(uint32_t); extern "C" void art_handle_fill_data_from_code(void*, void*); // JNI entrypoints. -extern Object* DecodeJObjectInThread(Thread* thread, jobject obj); extern void* FindNativeMethod(Thread* thread); +extern uint32_t JniMethodStart(Thread* self); +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, + Thread* self); +extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self); +extern Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self); // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); @@ -153,8 +162,13 @@ void InitEntryPoints(EntryPoints* points) { points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code; // JNI - points->pDecodeJObjectInThread = DecodeJObjectInThread; points->pFindNativeMethod = FindNativeMethod; + points->pJniMethodStart = JniMethodStart; + points->pJniMethodStartSynchronized = JniMethodStartSynchronized; + points->pJniMethodEnd = JniMethodEnd; + points->pJniMethodEndSynchronized = JniMethodEndSynchronized; + points->pJniMethodEndWithReference = JniMethodEndWithReference; + points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; // Locks points->pLockObjectFromCode = art_lock_object_from_code; diff --git a/src/oat/runtime/x86/stub_x86.cc b/src/oat/runtime/x86/stub_x86.cc index a9db314988..74e0f391ab 100644 --- a/src/oat/runtime/x86/stub_x86.cc +++ b/src/oat/runtime/x86/stub_x86.cc @@ -16,6 +16,7 @@ #include "jni_internal.h" #include "oat/runtime/oat_support_entrypoints.h" +#include "oat/runtime/stub.h" #include "oat/utils/x86/assembler_x86.h" #include "object.h" #include "stack_indirect_reference_table.h" diff --git a/src/oat/utils/arm/assembler_arm.cc b/src/oat/utils/arm/assembler_arm.cc index 55b618735a..de665dd0d8 100644 --- a/src/oat/utils/arm/assembler_arm.cc +++ b/src/oat/utils/arm/assembler_arm.cc @@ -1440,10 +1440,9 @@ void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, const std::vector& callee_save_regs, const std::vector& entry_spills) { CHECK_ALIGNED(frame_size, kStackAlignment); - DCHECK_EQ(entry_spills.size(), 0u); CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister()); - // Push callee saves and link register + // Push callee saves and link register. RegList push_list = 1 << LR; size_t pushed_values = 1; for (size_t i = 0; i < callee_save_regs.size(); i++) { @@ -1453,13 +1452,19 @@ void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } PushList(push_list); - // Increase frame to required size + // Increase frame to required size. CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method* size_t adjust = frame_size - (pushed_values * kPointerSize); IncreaseFrameSize(adjust); - // Write out Method* + // Write out Method*. StoreToOffset(kStoreWord, R0, SP, 0); + + // Write out entry spills. + for (size_t i = 0; i < entry_spills.size(); ++i) { + Register reg = entry_spills.at(i).AsArm().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); + } } void ArmAssembler::RemoveFrame(size_t frame_size, @@ -1891,9 +1896,9 @@ void ArmSuspendCountSlowPath::Emit(Assembler* sasm) { #undef __ } -void ArmAssembler::ExceptionPoll(ManagedRegister mscratch) { +void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { ArmManagedRegister scratch = mscratch.AsArm(); - ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch); + ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust); buffer_.EnqueueSlowPath(slow); LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), TR, Thread::ExceptionOffset().Int32Value()); @@ -1905,7 +1910,9 @@ void ArmExceptionSlowPath::Emit(Assembler* sasm) { ArmAssembler* sp_asm = down_cast(sasm); #define __ sp_asm-> __ Bind(&entry_); - + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } // Pass exception object as argument // Don't care about preserving R0 as this call won't return __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); diff --git a/src/oat/utils/arm/assembler_arm.h b/src/oat/utils/arm/assembler_arm.h index edfaf30dfd..2410bac494 100644 --- a/src/oat/utils/arm/assembler_arm.h +++ b/src/oat/utils/arm/assembler_arm.h @@ -564,7 +564,7 @@ class ArmAssembler : public Assembler { // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch); + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); private: void EmitType01(Condition cond, @@ -650,10 +650,13 @@ class ArmAssembler : public Assembler { // Slowpath entered when Thread::Current()->_exception is non-null class ArmExceptionSlowPath : public SlowPath { public: - explicit ArmExceptionSlowPath(ArmManagedRegister scratch) : scratch_(scratch) {} + explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust) + : scratch_(scratch), stack_adjust_(stack_adjust) { + } virtual void Emit(Assembler *sp_asm); private: const ArmManagedRegister scratch_; + const size_t stack_adjust_; }; // Slowpath entered when Thread::Current()->_suspend_count is non-zero diff --git a/src/oat/utils/assembler.h b/src/oat/utils/assembler.h index dabd3217bd..68108e7508 100644 --- a/src/oat/utils/assembler.h +++ b/src/oat/utils/assembler.h @@ -446,7 +446,7 @@ class Assembler { // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch) = 0; + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0; virtual ~Assembler() {} diff --git a/src/oat/utils/x86/assembler_x86.cc b/src/oat/utils/x86/assembler_x86.cc index b7f0c1f248..78f2b57a91 100644 --- a/src/oat/utils/x86/assembler_x86.cc +++ b/src/oat/utils/x86/assembler_x86.cc @@ -1862,8 +1862,8 @@ void X86SuspendCountSlowPath::Emit(Assembler *sasm) { #undef __ } -void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/) { - X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(); +void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { + X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust); buffer_.EnqueueSlowPath(slow); fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0)); j(kNotEqual, slow->Entry()); @@ -1874,6 +1874,9 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) { #define __ sp_asm-> __ Bind(&entry_); // Note: the return value is dead + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } // Pass exception as argument in EAX __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset())); __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException))); diff --git a/src/oat/utils/x86/assembler_x86.h b/src/oat/utils/x86/assembler_x86.h index c8edf44da1..7291211911 100644 --- a/src/oat/utils/x86/assembler_x86.h +++ b/src/oat/utils/x86/assembler_x86.h @@ -598,7 +598,7 @@ class X86Assembler : public Assembler { // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch); + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); private: inline void EmitUint8(uint8_t value); @@ -650,8 +650,10 @@ inline void X86Assembler::EmitOperandSizeOverride() { // Slowpath entered when Thread::Current()->_exception is non-null class X86ExceptionSlowPath : public SlowPath { public: - X86ExceptionSlowPath() {} + X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {} virtual void Emit(Assembler *sp_asm); + private: + const size_t stack_adjust_; }; // Slowpath entered when Thread::Current()->_suspend_count is non-zero diff --git a/src/oat_compilation_unit.h b/src/oat_compilation_unit.h index 41c1847fbc..97815ac43f 100644 --- a/src/oat_compilation_unit.h +++ b/src/oat_compilation_unit.h @@ -30,23 +30,19 @@ class DexCache; class OatCompilationUnit { public: - OatCompilationUnit(ClassLoader* class_loader, ClassLinker* class_linker, - const DexFile& dex_file, DexCache& dex_cache, - const DexFile::CodeItem* code_item, - uint32_t method_idx, uint32_t access_flags) - : class_loader_(class_loader), class_linker_(class_linker), - dex_file_(&dex_file), dex_cache_(&dex_cache), code_item_(code_item), - method_idx_(method_idx), access_flags_(access_flags) { + OatCompilationUnit(jobject class_loader, ClassLinker* class_linker, const DexFile& dex_file, + const DexFile::CodeItem* code_item, uint32_t method_idx, uint32_t access_flags) + : class_loader_(class_loader), class_linker_(class_linker), dex_file_(&dex_file), + code_item_(code_item), method_idx_(method_idx), access_flags_(access_flags) { } OatCompilationUnit* GetCallee(uint32_t callee_method_idx, uint32_t callee_access_flags) { - return new OatCompilationUnit(class_loader_, class_linker_, *dex_file_, - *dex_cache_, NULL, callee_method_idx, - callee_access_flags); + return new OatCompilationUnit(class_loader_, class_linker_, *dex_file_, NULL, + callee_method_idx, callee_access_flags); } - ClassLoader* GetClassLoader() const { + jobject GetClassLoader() const { return class_loader_; } @@ -58,10 +54,6 @@ class OatCompilationUnit { return dex_file_; } - DexCache* GetDexCache() const { - return dex_cache_; - } - uint32_t GetDexMethodIndex() const { return method_idx_; } @@ -85,15 +77,14 @@ class OatCompilationUnit { } public: - ClassLoader* class_loader_; - ClassLinker* class_linker_; + jobject class_loader_; + ClassLinker* const class_linker_; - const DexFile* dex_file_; - DexCache* dex_cache_; + const DexFile* const dex_file_; - const DexFile::CodeItem* code_item_; - uint32_t method_idx_; - uint32_t access_flags_; + const DexFile::CodeItem* const code_item_; + const uint32_t method_idx_; + const uint32_t access_flags_; }; } // namespace art diff --git a/src/oat_test.cc b/src/oat_test.cc index dae61bbe34..288854bd8a 100644 --- a/src/oat_test.cc +++ b/src/oat_test.cc @@ -25,7 +25,8 @@ class OatTest : public CommonTest { protected: void CheckMethod(Method* method, const OatFile::OatMethod& oat_method, - const DexFile* dex_file) { + const DexFile* dex_file) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const CompiledMethod* compiled_method = compiler_->GetCompiledMethod(Compiler::MethodReference(dex_file, method->GetDexMethodIndex())); @@ -62,15 +63,16 @@ TEST_F(OatTest, WriteRead) { const bool compile = false; // DISABLED_ due to the time to compile libcore ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - SirtRef class_loader(NULL); + jobject class_loader = NULL; if (compile) { compiler_.reset(new Compiler(kThumb2, false, 2, false, NULL, true, true)); - compiler_->CompileAll(class_loader.get(), class_linker->GetBootClassPath()); + compiler_->CompileAll(class_loader, class_linker->GetBootClassPath()); } + ScopedObjectAccess soa(Thread::Current()); ScratchFile tmp; bool success = OatWriter::Create(tmp.GetFile(), - class_loader.get(), + class_loader, class_linker->GetBootClassPath(), 42U, "lue.art", @@ -78,7 +80,7 @@ TEST_F(OatTest, WriteRead) { ASSERT_TRUE(success); if (compile) { // OatWriter strips the code, regenerate to compare - compiler_->CompileAll(class_loader.get(), class_linker->GetBootClassPath()); + compiler_->CompileAll(class_loader, class_linker->GetBootClassPath()); } UniquePtr oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), @@ -105,7 +107,7 @@ TEST_F(OatTest, WriteRead) { UniquePtr oat_class(oat_dex_file->GetOatClass(i)); - Class* klass = class_linker->FindClass(descriptor, class_loader.get()); + Class* klass = class_linker->FindClass(descriptor, NULL); size_t method_index = 0; for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) { diff --git a/src/oat_writer.cc b/src/oat_writer.cc index 013a561957..0546f2b9f3 100644 --- a/src/oat_writer.cc +++ b/src/oat_writer.cc @@ -23,13 +23,14 @@ #include "file.h" #include "os.h" #include "safe_map.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "stl_util.h" namespace art { bool OatWriter::Create(File* file, - ClassLoader* class_loader, + jobject class_loader, const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, @@ -45,7 +46,7 @@ bool OatWriter::Create(File* file, OatWriter::OatWriter(const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, - ClassLoader* class_loader, + jobject class_loader, const Compiler& compiler) { compiler_ = &compiler; class_loader_ = class_loader; @@ -380,8 +381,10 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, if (compiler_->IsImage()) { ClassLinker* linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = linker->FindDexCache(*dex_file); - Method* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader_, - is_direct); + // Unchecked as we hold mutator_lock_ on entry. + ScopedObjectAccessUnchecked soa(Thread::Current()); + Method* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache, + soa.Decode(class_loader_), is_direct); CHECK(method != NULL); method->SetFrameSizeInBytes(frame_size_in_bytes); method->SetCoreSpillMask(core_spill_mask); diff --git a/src/oat_writer.h b/src/oat_writer.h index b748dbc35f..60a79a281b 100644 --- a/src/oat_writer.h +++ b/src/oat_writer.h @@ -64,18 +64,19 @@ class OatWriter { public: // Write an oat file. Returns true on success, false on failure. static bool Create(File* file, - ClassLoader* class_loader, + jobject class_loader, const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, - const Compiler& compiler); + const Compiler& compiler) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: OatWriter(const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, - ClassLoader* class_loader, - const Compiler& compiler); + jobject class_loader, + const Compiler& compiler) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); ~OatWriter(); size_t InitOatHeader(); @@ -83,17 +84,21 @@ class OatWriter { size_t InitDexFiles(size_t offset); size_t InitOatClasses(size_t offset); size_t InitOatCode(size_t offset); - size_t InitOatCodeDexFiles(size_t offset); + size_t InitOatCodeDexFiles(size_t offset) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t InitOatCodeDexFile(size_t offset, size_t& oat_class_index, - const DexFile& dex_file); + const DexFile& dex_file) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t InitOatCodeClassDef(size_t offset, size_t oat_class_index, size_t class_def_index, const DexFile& dex_file, - const DexFile::ClassDef& class_def); + const DexFile::ClassDef& class_def) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index, size_t class_def_method_index, bool is_native, bool is_static, - bool is_direct, uint32_t method_idx, const DexFile*); + bool is_direct, uint32_t method_idx, const DexFile*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool Write(File* file); bool WriteTables(File* file); @@ -146,7 +151,7 @@ class OatWriter { const Compiler* compiler_; // TODO: remove the ClassLoader when the code storage moves out of Method - ClassLoader* class_loader_; + jobject class_loader_; // note OatFile does not take ownership of the DexFiles const std::vector* dex_files_; diff --git a/src/oatdump.cc b/src/oatdump.cc index b1aa47e987..2fc728ef57 100644 --- a/src/oatdump.cc +++ b/src/oatdump.cc @@ -32,6 +32,7 @@ #include "os.h" #include "runtime.h" #include "safe_map.h" +#include "scoped_thread_state_change.h" #include "space.h" #include "stringpiece.h" #include "verifier/gc_map.h" @@ -155,7 +156,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetOatCode(Method* m) { + const void* GetOatCode(Method* m) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { MethodHelper mh(m); for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; @@ -486,7 +487,7 @@ class ImageDumper { : os_(os), image_filename_(image_filename), host_prefix_(host_prefix), image_space_(image_space), image_header_(image_header) {} - void Dump() { + void Dump() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { os_ << "MAGIC:\n"; os_ << image_header_.GetMagic() << "\n\n"; @@ -578,7 +579,8 @@ class ImageDumper { } private: - static void PrettyObjectValue(std::string& summary, Class* type, Object* value) { + static void PrettyObjectValue(std::string& summary, Class* type, Object* value) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(type != NULL); if (value == NULL) { StringAppendF(&summary, "null %s\n", PrettyDescriptor(type).c_str()); @@ -599,7 +601,8 @@ class ImageDumper { } } - static void PrintField(std::string& summary, Field* field, Object* obj) { + static void PrintField(std::string& summary, Field* field, Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { FieldHelper fh(field); Class* type = fh.GetType(); StringAppendF(&summary, "\t%s: ", fh.GetName()); @@ -617,7 +620,8 @@ class ImageDumper { } } - static void DumpFields(std::string& summary, Object* obj, Class* klass) { + static void DumpFields(std::string& summary, Object* obj, Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* super = klass->GetSuperClass(); if (super != NULL) { DumpFields(summary, obj, super); @@ -635,7 +639,8 @@ class ImageDumper { return image_space_.Contains(object); } - const void* GetOatCodeBegin(Method* m) { + const void* GetOatCodeBegin(Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); if (code == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { @@ -647,7 +652,8 @@ class ImageDumper { return code; } - uint32_t GetOatCodeSize(Method* m) { + uint32_t GetOatCodeSize(Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { return 0; @@ -655,7 +661,8 @@ class ImageDumper { return oat_code_begin[-1]; } - const void* GetOatCodeEnd(Method* m) { + const void* GetOatCodeEnd(Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { return NULL; @@ -663,7 +670,8 @@ class ImageDumper { return oat_code_begin + GetOatCodeSize(m); } - static void Callback(Object* obj, void* arg) { + static void Callback(Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(obj != NULL); DCHECK(arg != NULL); ImageDumper* state = reinterpret_cast(arg); @@ -921,7 +929,8 @@ class ImageDumper { method_outlier.push_back(method); } - void DumpOutliers(std::ostream& os) { + void DumpOutliers(std::ostream& os) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { size_t sum_of_sizes = 0; size_t sum_of_sizes_squared = 0; size_t sum_of_expansion = 0; @@ -1021,7 +1030,7 @@ class ImageDumper { os << "\n" << std::flush; } - void Dump(std::ostream& os) { + void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { os << "\tart_file_bytes = " << PrettySize(file_bytes) << "\n\n" << "\tart_file_bytes = header_bytes + object_bytes + alignment_bytes\n" << StringPrintf("\theader_bytes = %8zd (%2.0f%% of art file bytes)\n" @@ -1197,11 +1206,15 @@ static int oatdump(int argc, char** argv) { options.push_back(std::make_pair("host-prefix", host_prefix->c_str())); } - UniquePtr runtime(Runtime::Create(options, false)); - if (runtime.get() == NULL) { + if (!Runtime::Create(options, false)) { fprintf(stderr, "Failed to create runtime\n"); return EXIT_FAILURE; } + UniquePtr runtime(Runtime::Current()); + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more managable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + ScopedObjectAccess soa(Thread::Current()); Heap* heap = Runtime::Current()->GetHeap(); ImageSpace* image_space = heap->GetImageSpace(); diff --git a/src/object.cc b/src/object.cc index ecaef3b82d..dd984fc293 100644 --- a/src/object.cc +++ b/src/object.cc @@ -425,7 +425,8 @@ Method* Method::FindOverriddenMethod() const { return result; } -static const void* GetOatCode(const Method* m) { +static const void* GetOatCode(const Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); // Peel off any method tracing trampoline. @@ -513,10 +514,13 @@ uint32_t Method::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const { } void Method::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const { - // Push a transition back into managed code onto the linked list in thread. - CHECK_EQ(kRunnable, self->GetState()); - self->AssertThreadSuspensionIsAllowable(); + if (kIsDebugBuild) { + self->AssertThreadSuspensionIsAllowable(); + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(kRunnable, self->GetState()); + } + // Push a transition back into managed code onto the linked list in thread. ManagedStack fragment; self->PushManagedStackFragment(&fragment); @@ -1219,6 +1223,7 @@ Array* Array::Alloc(Class* array_class, int32_t component_count, size_t componen } Array* Array::Alloc(Class* array_class, int32_t component_count) { + DCHECK(array_class->IsArrayClass()); return Alloc(array_class, component_count, array_class->GetComponentSize()); } diff --git a/src/object.h b/src/object.h index 0fc6acc78c..03ed132c26 100644 --- a/src/object.h +++ b/src/object.h @@ -209,35 +209,48 @@ class MANAGED Object { void SetClass(Class* new_klass); - bool InstanceOf(const Class* klass) const; + bool InstanceOf(const Class* klass) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - size_t SizeOf() const; + size_t SizeOf() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + Object* Clone() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Object* Clone(); + int32_t IdentityHashCode() const { + #ifdef MOVING_GARBAGE_COLLECTOR + // TODO: we'll need to use the Object's internal concept of identity + UNIMPLEMENTED(FATAL); + #endif + return reinterpret_cast(this); + } static MemberOffset MonitorOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); } volatile int32_t* GetRawLockWordAddress() { - byte* raw_addr = reinterpret_cast(this) + OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value(); + byte* raw_addr = reinterpret_cast(this) + + OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value(); int32_t* word_addr = reinterpret_cast(raw_addr); return const_cast(word_addr); } uint32_t GetThinLockId(); - void MonitorEnter(Thread* thread); + void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); - bool MonitorExit(Thread* thread); + bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_); - void Notify(); + void Notify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void NotifyAll(); + void NotifyAll() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Wait(int64_t timeout); + void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Wait(int64_t timeout, int32_t nanos); + void Wait(int64_t timeout, int32_t nanos) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsClass() const; @@ -285,14 +298,14 @@ class MANAGED Object { return down_cast(this); } - bool IsField() const; + bool IsField() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* AsField() { + Field* AsField() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(IsField()); return down_cast(this); } - const Field* AsField() const { + const Field* AsField() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(IsField()); return down_cast(this); } @@ -403,16 +416,6 @@ class MANAGED Object { DISALLOW_IMPLICIT_CONSTRUCTORS(Object); }; -struct ObjectIdentityHash { - size_t operator()(const Object* const& obj) const { -#ifdef MOVING_GARBAGE_COLLECTOR - // TODO: we'll need to use the Object's internal concept of identity - UNIMPLEMENTED(FATAL); -#endif - return reinterpret_cast(obj); - } -}; - // C++ mirror of java.lang.reflect.Field class MANAGED Field : public Object { public: @@ -458,32 +461,56 @@ class MANAGED Field : public Object { void SetOffset(MemberOffset num_bytes); // field access, null object for static fields - bool GetBoolean(const Object* object) const; - void SetBoolean(Object* object, bool z) const; - int8_t GetByte(const Object* object) const; - void SetByte(Object* object, int8_t b) const; - uint16_t GetChar(const Object* object) const; - void SetChar(Object* object, uint16_t c) const; - int16_t GetShort(const Object* object) const; - void SetShort(Object* object, int16_t s) const; - int32_t GetInt(const Object* object) const; - void SetInt(Object* object, int32_t i) const; - int64_t GetLong(const Object* object) const; - void SetLong(Object* object, int64_t j) const; - float GetFloat(const Object* object) const; - void SetFloat(Object* object, float f) const; - double GetDouble(const Object* object) const; - void SetDouble(Object* object, double d) const; - Object* GetObject(const Object* object) const; - void SetObject(Object* object, const Object* l) const; + bool GetBoolean(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetBoolean(Object* object, bool z) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int8_t GetByte(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetByte(Object* object, int8_t b) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + uint16_t GetChar(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetChar(Object* object, uint16_t c) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int16_t GetShort(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetShort(Object* object, int16_t s) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int32_t GetInt(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetInt(Object* object, int32_t i) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int64_t GetLong(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetLong(Object* object, int64_t j) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + float GetFloat(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetFloat(Object* object, float f) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + double GetDouble(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetDouble(Object* object, double d) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* GetObject(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetObject(Object* object, const Object* l) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // raw field accesses - uint32_t Get32(const Object* object) const; - void Set32(Object* object, uint32_t new_value) const; - uint64_t Get64(const Object* object) const; - void Set64(Object* object, uint64_t new_value) const; - Object* GetObj(const Object* object) const; - void SetObj(Object* object, const Object* new_value) const; + uint32_t Get32(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Set32(Object* object, uint32_t new_value) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + uint64_t Get64(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Set64(Object* object, uint64_t new_value) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* GetObj(const Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetObj(Object* object, const Object* new_value) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static Class* GetJavaLangReflectField() { DCHECK(java_lang_reflect_Field_ != NULL); @@ -658,9 +685,10 @@ class MANAGED Method : public Object { void SetDexCacheInitializedStaticStorage(ObjectArray* new_value); // Find the method that this method overrides - Method* FindOverriddenMethod() const; + Method* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const; + void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); const void* GetCode() const { return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Method, code_), false); @@ -670,7 +698,7 @@ class MANAGED Method : public Object { SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Method, code_), code, false); } - uint32_t GetCodeSize() const { + uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this); uintptr_t code = reinterpret_cast(GetCode()); if (code == 0) { @@ -681,7 +709,8 @@ class MANAGED Method : public Object { return reinterpret_cast(code)[-1]; } - bool IsWithinCode(uintptr_t pc) const { + bool IsWithinCode(uintptr_t pc) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uintptr_t code = reinterpret_cast(GetCode()); if (code == 0) { return pc == 0; @@ -689,7 +718,8 @@ class MANAGED Method : public Object { return (code <= pc && pc < code + GetCodeSize()); } - void AssertPcIsWithinCode(uintptr_t pc) const; + void AssertPcIsWithinCode(uintptr_t pc) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); uint32_t GetOatCodeOffset() const { DCHECK(!Runtime::Current()->IsStarted()); @@ -813,9 +843,10 @@ class MANAGED Method : public Object { bool IsRegistered() const; - void RegisterNative(Thread* self, const void* native_method); + void RegisterNative(Thread* self, const void* native_method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void UnregisterNative(Thread* self); + void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static MemberOffset NativeMethodOffset() { return OFFSET_OF_OBJECT_MEMBER(Method, native_method_); @@ -915,14 +946,17 @@ class MANAGED Method : public Object { // Converts a native PC to a dex PC. TODO: this is a no-op // until we associate a PC mapping table with each method. - uint32_t ToDexPC(const uintptr_t pc) const; + uint32_t ToDexPC(const uintptr_t pc) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Converts a dex PC to a native PC. TODO: this is a no-op // until we associate a PC mapping table with each method. - uintptr_t ToNativePC(const uint32_t dex_pc) const; + uintptr_t ToNativePC(const uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Find the catch block for the given exception type and dex_pc - uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const; + uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method); @@ -1012,9 +1046,11 @@ class MANAGED Array : public Object { public: // A convenience for code that doesn't know the component size, // and doesn't want to have to work it out itself. - static Array* Alloc(Class* array_class, int32_t component_count); + static Array* Alloc(Class* array_class, int32_t component_count) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static Array* Alloc(Class* array_class, int32_t component_count, size_t component_size); + static Array* Alloc(Class* array_class, int32_t component_count, size_t component_size) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t SizeOf() const; @@ -1051,7 +1087,8 @@ class MANAGED Array : public Object { } protected: - bool IsValidIndex(int32_t index) const { + bool IsValidIndex(int32_t index) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (UNLIKELY(index < 0 || index >= length_)) { return ThrowArrayIndexOutOfBoundsException(index); } @@ -1059,8 +1096,10 @@ class MANAGED Array : public Object { } protected: - bool ThrowArrayIndexOutOfBoundsException(int32_t index) const; - bool ThrowArrayStoreException(Object* object) const; + bool ThrowArrayIndexOutOfBoundsException(int32_t index) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool ThrowArrayStoreException(Object* object) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: // The number of array elements. @@ -1074,23 +1113,27 @@ class MANAGED Array : public Object { template class MANAGED ObjectArray : public Array { public: - static ObjectArray* Alloc(Class* object_array_class, int32_t length); + static ObjectArray* Alloc(Class* object_array_class, int32_t length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - T* Get(int32_t i) const; + T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void Set(int32_t i, T* object); + void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Set element without bound and element type checks, to be used in limited // circumstances, such as during boot image writing - void SetWithoutChecks(int32_t i, T* object); + void SetWithoutChecks(int32_t i, T* object) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - T* GetWithoutChecks(int32_t i) const; + T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void Copy(const ObjectArray* src, int src_pos, ObjectArray* dst, int dst_pos, - size_t length); + size_t length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - ObjectArray* CopyOf(int32_t new_length); + ObjectArray* CopyOf(int32_t new_length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray); @@ -1181,7 +1224,7 @@ class MANAGED Class : public StaticStorageBase { return static_cast(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), false)); } - void SetStatus(Status new_status); + void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns true if the class has failed to link. bool IsErroneous() const { @@ -1291,7 +1334,8 @@ class MANAGED Class : public StaticStorageBase { String* GetName() const; // Returns the cached name void SetName(String* name); // Sets the cached name - String* ComputeName(); // Computes the name, then sets the cached value + String* ComputeName() // Computes the name, then sets the cached value + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsProxyClass() const { // Read access flags without using getter as whether something is a proxy can be check in @@ -1370,7 +1414,7 @@ class MANAGED Class : public StaticStorageBase { bool IsStringClass() const; - bool IsThrowableClass() const; + bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); Class* GetComponentType() const { return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), false); @@ -1394,7 +1438,7 @@ class MANAGED Class : public StaticStorageBase { } // Creates a raw object instance but does not invoke the default constructor. - Object* AllocObject(); + Object* AllocObject() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsVariableSize() const { // Classes and arrays vary in size, and so the object_size_ field cannot @@ -1412,9 +1456,10 @@ class MANAGED Class : public StaticStorageBase { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false); } - void SetClassSize(size_t new_class_size); + void SetClassSize(size_t new_class_size) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - size_t GetObjectSize() const { + size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this); DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false); @@ -1429,19 +1474,21 @@ class MANAGED Class : public StaticStorageBase { } // Returns true if this class is in the same packages as that class. - bool IsInSamePackage(const Class* that) const; + bool IsInSamePackage(const Class* that) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); // Returns true if this class can access that class. - bool CanAccess(Class* that) const { + bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return that->IsPublic() || this->IsInSamePackage(that); } // Can this class access a member in the provided class with the provided member access flags? // Note that access to the class isn't checked in case the declaring class is protected and the // method has been exposed by a public sub-class - bool CanAccessMember(Class* access_to, uint32_t member_flags) const { + bool CanAccessMember(Class* access_to, uint32_t member_flags) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Classes can access all of their own members if (this == access_to) { return true; @@ -1464,14 +1511,16 @@ class MANAGED Class : public StaticStorageBase { return this->IsInSamePackage(access_to); } - bool IsSubClass(const Class* klass) const; + bool IsSubClass(const Class* klass) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Can src be assigned to this class? For example, String can be assigned to Object (by an // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign // to themselves. Classes for primitive types may not assign to each other. - bool IsAssignableFrom(const Class* src) const { + bool IsAssignableFrom(const Class* src) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(src != NULL); if (this == src) { // Can always assign to things of the same type @@ -1526,7 +1575,8 @@ class MANAGED Class : public StaticStorageBase { kDumpClassInitialized = (1 << 2), }; - void DumpClass(std::ostream& os, int flags) const; + void DumpClass(std::ostream& os, int flags) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); DexCache* GetDexCache() const; @@ -1546,11 +1596,13 @@ class MANAGED Class : public StaticStorageBase { new_direct_methods, false); } - Method* GetDirectMethod(int32_t i) const { + Method* GetDirectMethod(int32_t i) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetDirectMethods()->Get(i); } - void SetDirectMethod(uint32_t i, Method* f) { // TODO: uint16_t + void SetDirectMethod(uint32_t i, Method* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ ObjectArray* direct_methods = GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); @@ -1581,17 +1633,20 @@ class MANAGED Class : public StaticStorageBase { return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0; } - Method* GetVirtualMethod(uint32_t i) const { + Method* GetVirtualMethod(uint32_t i) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetVirtualMethods()->Get(i); } - Method* GetVirtualMethodDuringLinking(uint32_t i) const { + Method* GetVirtualMethodDuringLinking(uint32_t i) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous()); return GetVirtualMethods()->Get(i); } - void SetVirtualMethod(uint32_t i, Method* f) { // TODO: uint16_t + void SetVirtualMethod(uint32_t i, Method* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectArray* virtual_methods = GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); @@ -1619,7 +1674,8 @@ class MANAGED Class : public StaticStorageBase { // Given a method implemented by this class but potentially from a // super class, return the specific implementation // method for this class. - Method* FindVirtualMethodForVirtual(Method* method) { + Method* FindVirtualMethodForVirtual(Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(!method->GetDeclaringClass()->IsInterface()); // The argument method may from a super class. // Use the index to a potentially overridden one for this instance's class. @@ -1629,13 +1685,17 @@ class MANAGED Class : public StaticStorageBase { // Given a method implemented by this class, but potentially from a // super class or interface, return the specific implementation // method for this class. - Method* FindVirtualMethodForInterface(Method* method); + Method* FindVirtualMethodForInterface(Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const; + Method* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const; + Method* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindVirtualMethodForVirtualOrInterface(Method* method) { + Method* FindVirtualMethodForVirtualOrInterface(Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (method->IsDirect()) { return method; } @@ -1645,21 +1705,29 @@ class MANAGED Class : public StaticStorageBase { return FindVirtualMethodForVirtual(method); } - Method* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const; + Method* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const; + Method* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const; + Method* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const; + Method* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const; + Method* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const; + Method* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const; + Method* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const; + Method* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); int32_t GetIfTableCount() const { ObjectArray* iftable = GetIfTable(); @@ -1695,12 +1763,14 @@ class MANAGED Class : public StaticStorageBase { return (GetIFields() != NULL) ? GetIFields()->GetLength() : 0; } - Field* GetInstanceField(uint32_t i) const { // TODO: uint16_t + Field* GetInstanceField(uint32_t i) const // TODO: uint16_t + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ DCHECK_NE(NumInstanceFields(), 0U); return GetIFields()->Get(i); } - void SetInstanceField(uint32_t i, Field* f) { // TODO: uint16_t + void SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ ObjectArray* ifields= GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); ifields->Set(i, f); @@ -1770,11 +1840,13 @@ class MANAGED Class : public StaticStorageBase { return (GetSFields() != NULL) ? GetSFields()->GetLength() : 0; } - Field* GetStaticField(uint32_t i) const { // TODO: uint16_t + Field* GetStaticField(uint32_t i) const // TODO: uint16_t + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetSFields()->Get(i); } - void SetStaticField(uint32_t i, Field* f) { // TODO: uint16_t + void SetStaticField(uint32_t i, Field* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectArray* sfields= GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); sfields->Set(i, f); @@ -1787,29 +1859,38 @@ class MANAGED Class : public StaticStorageBase { void SetReferenceStaticOffsets(uint32_t new_reference_offsets); // Find a static or instance field using the JLS resolution order - Field* FindField(const StringPiece& name, const StringPiece& type); + Field* FindField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds the given instance field in this class or a superclass. - Field* FindInstanceField(const StringPiece& name, const StringPiece& type); + Field* FindInstanceField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds the given instance field in this class or a superclass, only searches classes that // have the same dex cache. - Field* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx); + Field* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type); + Field* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx); + Field* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds the given static field in this class or a superclass. - Field* FindStaticField(const StringPiece& name, const StringPiece& type); + Field* FindStaticField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Finds the given static field in this class or superclass, only searches classes that // have the same dex cache. - Field* FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx); + Field* FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type); + Field* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx); + Field* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); pid_t GetClinitThreadId() const { DCHECK(IsIdxLoaded() || IsErroneous()); @@ -1834,14 +1915,18 @@ class MANAGED Class : public StaticStorageBase { } private: - void SetVerifyErrorClass(Class* klass) { + void SetVerifyErrorClass(Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(klass != NULL) << PrettyClass(this); SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false); } - bool Implements(const Class* klass) const; - bool IsArrayAssignableFromArray(const Class* klass) const; - bool IsAssignableFromArray(const Class* klass) const; + bool Implements(const Class* klass) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsArrayAssignableFromArray(const Class* klass) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsAssignableFromArray(const Class* klass) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // defining class loader, or NULL for the "bootstrap" system loader ClassLoader* class_loader_; @@ -1995,8 +2080,7 @@ inline bool Object::IsArrayInstance() const { inline bool Object::IsField() const { Class* java_lang_Class = klass_->klass_; - Class* java_lang_reflect_Field = - java_lang_Class->GetInstanceField(0)->GetClass(); + Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass(); return GetClass() == java_lang_reflect_Field; } @@ -2178,7 +2262,8 @@ class MANAGED PrimitiveArray : public Array { public: typedef T ElementType; - static PrimitiveArray* Alloc(size_t length); + static PrimitiveArray* Alloc(size_t length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); const T* GetData() const { intptr_t data = reinterpret_cast(this) + DataOffset(sizeof(T)).Int32Value(); @@ -2190,14 +2275,14 @@ class MANAGED PrimitiveArray : public Array { return reinterpret_cast(data); } - T Get(int32_t i) const { + T Get(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (!IsValidIndex(i)) { return T(0); } return GetData()[i]; } - void Set(int32_t i, T value) { + void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // TODO: ArrayStoreException if (IsValidIndex(i)) { GetData()[i] = value; @@ -2251,9 +2336,9 @@ class MANAGED String : public Object { int32_t GetLength() const; - int32_t GetHashCode(); + int32_t GetHashCode() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void ComputeHashCode() { + void ComputeHashCode() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength())); } @@ -2261,36 +2346,44 @@ class MANAGED String : public Object { return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength()); } - uint16_t CharAt(int32_t index) const; + uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - String* Intern(); + String* Intern() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static String* AllocFromUtf16(int32_t utf16_length, const uint16_t* utf16_data_in, - int32_t hash_code = 0); + int32_t hash_code = 0) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static String* AllocFromModifiedUtf8(const char* utf); + static String* AllocFromModifiedUtf8(const char* utf) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static String* AllocFromModifiedUtf8(int32_t utf16_length, - const char* utf8_data_in); + const char* utf8_data_in) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static String* Alloc(Class* java_lang_String, int32_t utf16_length); + static String* Alloc(Class* java_lang_String, int32_t utf16_length) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static String* Alloc(Class* java_lang_String, CharArray* array); + static String* Alloc(Class* java_lang_String, CharArray* array) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool Equals(const char* modified_utf8) const; + bool Equals(const char* modified_utf8) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // TODO: do we need this overload? give it a more intention-revealing name. - bool Equals(const StringPiece& modified_utf8) const; + bool Equals(const StringPiece& modified_utf8) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - bool Equals(const String* that) const; + bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Compare UTF-16 code point values not in a locale-sensitive manner int Compare(int32_t utf16_length, const char* utf8_data_in); // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const uint16_t* that_chars, int32_t that_offset, - int32_t that_length) const; + int32_t that_length) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Create a modified UTF-8 encoded std::string from a java/lang/String object. std::string ToModifiedUtf8() const; @@ -2343,6 +2436,7 @@ class MANAGED String : public Object { DISALLOW_IMPLICIT_CONSTRUCTORS(String); }; +// TODO: remove? only used in a unit test of itself. struct StringHashCode { int32_t operator()(String* string) const { return string->GetHashCode(); @@ -2425,13 +2519,13 @@ class MANAGED Throwable : public Object { String* GetDetailMessage() const { return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false); } - std::string Dump() const; + std::string Dump() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // This is a runtime version of initCause, you shouldn't use it if initCause may have been // overridden. Also it asserts rather than throwing exceptions. Currently this is only used // in cases like the verifier where the checks cannot fail and initCause isn't overridden. void SetCause(Throwable* cause); - bool IsCheckedException() const; + bool IsCheckedException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static Class* GetJavaLangThrowable() { DCHECK(java_lang_Throwable_ != NULL); @@ -2485,7 +2579,8 @@ class MANAGED StackTraceElement : public Object { static StackTraceElement* Alloc(String* declaring_class, String* method_name, String* file_name, - int32_t line_number); + int32_t line_number) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void SetClass(Class* java_lang_StackTraceElement); @@ -2511,20 +2606,20 @@ class MANAGED StackTraceElement : public Object { class MANAGED InterfaceEntry : public ObjectArray { public: - Class* GetInterface() const { + Class* GetInterface() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* interface = Get(kInterface)->AsClass(); DCHECK(interface != NULL); return interface; } - void SetInterface(Class* interface) { + void SetInterface(Class* interface) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(interface != NULL); DCHECK(interface->IsInterface()); DCHECK(Get(kInterface) == NULL); Set(kInterface, interface); } - size_t GetMethodArrayCount() const { + size_t GetMethodArrayCount() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectArray* method_array = down_cast*>(Get(kMethodArray)); if (method_array == NULL) { return 0; @@ -2532,13 +2627,15 @@ class MANAGED InterfaceEntry : public ObjectArray { return method_array->GetLength(); } - ObjectArray* GetMethodArray() const { + ObjectArray* GetMethodArray() const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ObjectArray* method_array = down_cast*>(Get(kMethodArray)); DCHECK(method_array != NULL); return method_array; } - void SetMethodArray(ObjectArray* new_ma) { + void SetMethodArray(ObjectArray* new_ma) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(new_ma != NULL); DCHECK(Get(kMethodArray) == NULL); Set(kMethodArray, new_ma); diff --git a/src/object_test.cc b/src/object_test.cc index c0049a35b5..bbb96c6e7a 100644 --- a/src/object_test.cc +++ b/src/object_test.cc @@ -34,7 +34,8 @@ class ObjectTest : public CommonTest { void AssertString(int32_t length, const char* utf8_in, const char* utf16_expected_le, - int32_t expected_hash) { + int32_t expected_hash) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { UniquePtr utf16_expected(new uint16_t[length]); for (int32_t i = 0; i < length; i++) { uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | @@ -74,6 +75,7 @@ TEST_F(ObjectTest, IsInSamePackage) { } TEST_F(ObjectTest, Clone) { + ScopedObjectAccess soa(Thread::Current()); SirtRef > a1(class_linker_->AllocObjectArray(256)); size_t s1 = a1->SizeOf(); Object* clone = a1->Clone(); @@ -82,6 +84,7 @@ TEST_F(ObjectTest, Clone) { } TEST_F(ObjectTest, AllocObjectArray) { + ScopedObjectAccess soa(Thread::Current()); SirtRef > oa(class_linker_->AllocObjectArray(2)); EXPECT_EQ(2, oa->GetLength()); EXPECT_TRUE(oa->Get(0) == NULL); @@ -114,6 +117,7 @@ TEST_F(ObjectTest, AllocObjectArray) { } TEST_F(ObjectTest, AllocArray) { + ScopedObjectAccess soa(Thread::Current()); Class* c = class_linker_->FindSystemClass("[I"); SirtRef a(Array::Alloc(c, 1)); ASSERT_TRUE(c == a->GetClass()); @@ -129,6 +133,7 @@ TEST_F(ObjectTest, AllocArray) { template void TestPrimitiveArray(ClassLinker* cl) { + ScopedObjectAccess soa(Thread::Current()); typedef typename ArrayT::ElementType T; ArrayT* a = ArrayT::Alloc(2); @@ -183,6 +188,7 @@ TEST_F(ObjectTest, PrimitiveArray_Short_Alloc) { TEST_F(ObjectTest, CheckAndAllocArrayFromCode) { // pretend we are trying to call 'new char[3]' from String.toCharArray + ScopedObjectAccess soa(Thread::Current()); Class* java_util_Arrays = class_linker_->FindSystemClass("Ljava/util/Arrays;"); Method* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V"); const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I"); @@ -200,11 +206,13 @@ TEST_F(ObjectTest, CheckAndAllocArrayFromCode) { TEST_F(ObjectTest, StaticFieldFromCode) { // pretend we are trying to access 'Static.s0' from StaticsFromCode. - SirtRef class_loader(LoadDex("StaticsFromCode")); - const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader.get())[0]; + ScopedObjectAccess soa(Thread::Current()); + jobject class_loader = LoadDex("StaticsFromCode"); + const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0]; CHECK(dex_file != NULL); - Class* klass = class_linker_->FindClass("LStaticsFromCode;", class_loader.get()); + Class* klass = + class_linker_->FindClass("LStaticsFromCode;", soa.Decode(class_loader)); Method* clinit = klass->FindDirectMethod("", "()V"); const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;"); ASSERT_TRUE(klass_string_id != NULL); @@ -242,6 +250,7 @@ TEST_F(ObjectTest, StaticFieldFromCode) { } TEST_F(ObjectTest, String) { + ScopedObjectAccess soa(Thread::Current()); // Test the empty string. AssertString(0, "", "", 0); @@ -265,6 +274,7 @@ TEST_F(ObjectTest, String) { } TEST_F(ObjectTest, StringEqualsUtf8) { + ScopedObjectAccess soa(Thread::Current()); SirtRef string(String::AllocFromModifiedUtf8("android")); EXPECT_TRUE(string->Equals("android")); EXPECT_FALSE(string->Equals("Android")); @@ -279,6 +289,7 @@ TEST_F(ObjectTest, StringEqualsUtf8) { } TEST_F(ObjectTest, StringEquals) { + ScopedObjectAccess soa(Thread::Current()); SirtRef string(String::AllocFromModifiedUtf8("android")); SirtRef string_2(String::AllocFromModifiedUtf8("android")); EXPECT_TRUE(string->Equals(string_2.get())); @@ -294,6 +305,7 @@ TEST_F(ObjectTest, StringEquals) { } TEST_F(ObjectTest, StringLength) { + ScopedObjectAccess soa(Thread::Current()); SirtRef string(String::AllocFromModifiedUtf8("android")); EXPECT_EQ(string->GetLength(), 7); EXPECT_EQ(string->GetUtfLength(), 7); @@ -306,10 +318,13 @@ TEST_F(ObjectTest, StringLength) { } TEST_F(ObjectTest, DescriptorCompare) { + ScopedObjectAccess soa(Thread::Current()); ClassLinker* linker = class_linker_; - SirtRef class_loader_1(LoadDex("ProtoCompare")); - SirtRef class_loader_2(LoadDex("ProtoCompare2")); + jobject jclass_loader_1 = LoadDex("ProtoCompare"); + jobject jclass_loader_2 = LoadDex("ProtoCompare2"); + SirtRef class_loader_1(soa.Decode(jclass_loader_1)); + SirtRef class_loader_2(soa.Decode(jclass_loader_2)); Class* klass1 = linker->FindClass("LProtoCompare;", class_loader_1.get()); ASSERT_TRUE(klass1 != NULL); @@ -365,6 +380,7 @@ TEST_F(ObjectTest, DescriptorCompare) { TEST_F(ObjectTest, StringHashCode) { + ScopedObjectAccess soa(Thread::Current()); SirtRef empty(String::AllocFromModifiedUtf8("")); SirtRef A(String::AllocFromModifiedUtf8("A")); SirtRef ABC(String::AllocFromModifiedUtf8("ABC")); @@ -375,7 +391,10 @@ TEST_F(ObjectTest, StringHashCode) { } TEST_F(ObjectTest, InstanceOf) { - SirtRef class_loader(LoadDex("XandY")); + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("XandY"); + SirtRef class_loader(soa.Decode(jclass_loader)); + Class* X = class_linker_->FindClass("LX;", class_loader.get()); Class* Y = class_linker_->FindClass("LY;", class_loader.get()); ASSERT_TRUE(X != NULL); @@ -406,7 +425,9 @@ TEST_F(ObjectTest, InstanceOf) { } TEST_F(ObjectTest, IsAssignableFrom) { - SirtRef class_loader(LoadDex("XandY")); + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("XandY"); + SirtRef class_loader(soa.Decode(jclass_loader)); Class* X = class_linker_->FindClass("LX;", class_loader.get()); Class* Y = class_linker_->FindClass("LY;", class_loader.get()); @@ -441,7 +462,9 @@ TEST_F(ObjectTest, IsAssignableFrom) { } TEST_F(ObjectTest, IsAssignableFromArray) { - SirtRef class_loader(LoadDex("XandY")); + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("XandY"); + SirtRef class_loader(soa.Decode(jclass_loader)); Class* X = class_linker_->FindClass("LX;", class_loader.get()); Class* Y = class_linker_->FindClass("LY;", class_loader.get()); ASSERT_TRUE(X != NULL); @@ -492,6 +515,7 @@ TEST_F(ObjectTest, IsAssignableFromArray) { } TEST_F(ObjectTest, FindInstanceField) { + ScopedObjectAccess soa(Thread::Current()); SirtRef s(String::AllocFromModifiedUtf8("ABC")); ASSERT_TRUE(s.get() != NULL); Class* c = s->GetClass(); @@ -524,6 +548,7 @@ TEST_F(ObjectTest, FindInstanceField) { } TEST_F(ObjectTest, FindStaticField) { + ScopedObjectAccess soa(Thread::Current()); SirtRef s(String::AllocFromModifiedUtf8("ABC")); ASSERT_TRUE(s.get() != NULL); Class* c = s->GetClass(); diff --git a/src/object_utils.h b/src/object_utils.h index 8b2aab9b2d..d523ecc1c5 100644 --- a/src/object_utils.h +++ b/src/object_utils.h @@ -32,29 +32,30 @@ namespace art { class ObjectLock { public: - explicit ObjectLock(Object* object) : self_(Thread::Current()), obj_(object) { + explicit ObjectLock(Object* object) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + : self_(Thread::Current()), obj_(object) { CHECK(object != NULL); obj_->MonitorEnter(self_); } - ~ObjectLock() { + ~ObjectLock() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { obj_->MonitorExit(self_); } - void Wait() { + void Wait() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return Monitor::Wait(self_, obj_, 0, 0, false); } - void Notify() { + void Notify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { obj_->Notify(); } - void NotifyAll() { + void NotifyAll() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { obj_->NotifyAll(); } private: - Thread* self_; + Thread* const self_; Object* obj_; DISALLOW_COPY_AND_ASSIGN(ObjectLock); }; @@ -62,6 +63,7 @@ class ObjectLock { class ClassHelper { public: ClassHelper(const Class* c = NULL, ClassLinker* l = NULL) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : class_def_(NULL), class_linker_(l), dex_cache_(NULL), @@ -73,7 +75,8 @@ class ClassHelper { } } - void ChangeClass(const Class* new_c) { + void ChangeClass(const Class* new_c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(new_c != NULL) << "klass_=" << klass_; // Log what we were changing from if any CHECK(new_c->IsClass()) << "new_c=" << new_c; if (dex_cache_ != NULL) { @@ -90,7 +93,7 @@ class ClassHelper { // The returned const char* is only guaranteed to be valid for the lifetime of the ClassHelper. // If you need it longer, copy it into a std::string. - const char* GetDescriptor() { + const char* GetDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK(klass_ != NULL); if (UNLIKELY(klass_->IsArrayClass())) { return GetArrayDescriptor(); @@ -106,7 +109,7 @@ class ClassHelper { } } - const char* GetArrayDescriptor() { + const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string result("["); const Class* saved_klass = klass_; CHECK(saved_klass != NULL); @@ -117,7 +120,8 @@ class ClassHelper { return descriptor_.c_str(); } - const DexFile::ClassDef* GetClassDef() { + const DexFile::ClassDef* GetClassDef() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile::ClassDef* result = class_def_; if (result == NULL) { result = GetDexFile().FindClassDef(GetDescriptor()); @@ -126,7 +130,7 @@ class ClassHelper { return result; } - uint32_t NumDirectInterfaces() { + uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(klass_ != NULL); if (klass_->IsPrimitive()) { return 0; @@ -144,14 +148,16 @@ class ClassHelper { } } - uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) { + uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); DCHECK(!klass_->IsArrayClass()); return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_; } - Class* GetDirectInterface(uint32_t idx) { + Class* GetDirectInterface(uint32_t idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); if (klass_->IsArrayClass()) { @@ -174,7 +180,7 @@ class ClassHelper { } } - const char* GetSourceFile() { + const char* GetSourceFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { std::string descriptor(GetDescriptor()); const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = dex_file.FindClassDef(descriptor); @@ -182,7 +188,7 @@ class ClassHelper { return dex_file.GetSourceFile(*dex_class_def); } - std::string GetLocation() { + std::string GetLocation() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DexCache* dex_cache = GetDexCache(); if (dex_cache != NULL && !klass_->IsProxyClass()) { return dex_cache->GetLocation()->ToModifiedUtf8(); @@ -192,7 +198,7 @@ class ClassHelper { } } - const DexFile& GetDexFile() { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -202,7 +208,7 @@ class ClassHelper { return *result; } - DexCache* GetDexCache() { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { DCHECK(klass_ != NULL); @@ -213,7 +219,8 @@ class ClassHelper { } private: - const DexFile::TypeList* GetInterfaceTypeList() { + const DexFile::TypeList* GetInterfaceTypeList() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile::TypeList* result = interface_type_list_; if (result == NULL) { const DexFile::ClassDef* class_def = GetClassDef(); @@ -263,7 +270,7 @@ class FieldHelper { } field_ = new_f; } - const char* GetName() { + const char* GetName() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -284,7 +291,7 @@ class FieldHelper { return Runtime::Current()->GetInternTable()->InternStrong(GetName()); } } - Class* GetType() { + Class* GetType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -299,7 +306,7 @@ class FieldHelper { return GetClassLinker()->FindSystemClass(GetTypeDescriptor()); } } - const char* GetTypeDescriptor() { + const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -312,27 +319,29 @@ class FieldHelper { return field_index == 0 ? "[Ljava/lang/Class;" : "[[Ljava/lang/Class;"; } } - Primitive::Type GetTypeAsPrimitiveType() { + Primitive::Type GetTypeAsPrimitiveType() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return Primitive::GetType(GetTypeDescriptor()[0]); } - bool IsPrimitiveType() { + bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Primitive::Type type = GetTypeAsPrimitiveType(); return type != Primitive::kPrimNot; } - size_t FieldSize() { + size_t FieldSize() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Primitive::Type type = GetTypeAsPrimitiveType(); return Primitive::FieldSize(type); } // The returned const char* is only guaranteed to be valid for the lifetime of the FieldHelper. // If you need it longer, copy it into a std::string. - const char* GetDeclaringClassDescriptor() { + const char* GetDeclaringClassDescriptor() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { uint16_t type_idx = field_->GetDeclaringClass()->GetDexTypeIndex(); if (type_idx != DexFile::kDexNoIndex16) { const DexFile& dex_file = GetDexFile(); return dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)); } else { - // Most likely a proxy class + // Most likely a proxy class. ClassHelper kh(field_->GetDeclaringClass()); declaring_class_descriptor_ = kh.GetDescriptor(); return declaring_class_descriptor_.c_str(); @@ -340,7 +349,7 @@ class FieldHelper { } private: - DexCache* GetDexCache() { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { result = field_->GetDeclaringClass()->GetDexCache(); @@ -356,7 +365,7 @@ class FieldHelper { } return result; } - const DexFile& GetDexFile() { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -378,22 +387,24 @@ class FieldHelper { class MethodHelper { public: MethodHelper() - : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), - shorty_len_(0) {} + : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), + shorty_len_(0) {} explicit MethodHelper(const Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } MethodHelper(const Method* m, ClassLinker* l) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } - void ChangeMethod(Method* new_m) { + void ChangeMethod(Method* new_m) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(new_m != NULL); if (dex_cache_ != NULL) { Class* klass = new_m->GetDeclaringClass(); @@ -412,7 +423,7 @@ class MethodHelper { shorty_ = NULL; } - const char* GetName() { + const char* GetName() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex16) { @@ -433,14 +444,15 @@ class MethodHelper { } } - String* GetNameAsString() { + String* GetNameAsString() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, GetDexCache()); } - const char* GetShorty() { + const char* GetShorty() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const char* result = shorty_; if (result == NULL) { const DexFile& dex_file = GetDexFile(); @@ -451,14 +463,14 @@ class MethodHelper { return result; } - uint32_t GetShortyLength() { + uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (shorty_ == NULL) { GetShorty(); } return shorty_len_; } - const std::string GetSignature() { + const std::string GetSignature() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex16) { @@ -468,17 +480,20 @@ class MethodHelper { } } - const DexFile::ProtoId& GetPrototype() { + const DexFile::ProtoId& GetPrototype() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); return dex_file.GetMethodPrototype(dex_file.GetMethodId(method_->GetDexMethodIndex())); } - const DexFile::TypeList* GetParameterTypeList() { + const DexFile::TypeList* GetParameterTypeList() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile::ProtoId& proto = GetPrototype(); return GetDexFile().GetProtoParameters(proto); } - ObjectArray* GetParameterTypes() { + ObjectArray* GetParameterTypes() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile::TypeList* params = GetParameterTypeList(); Class* array_class = GetClassLinker()->FindSystemClass("[Ljava/lang/Class;"); uint32_t num_params = params == NULL ? 0 : params->Size(); @@ -494,7 +509,7 @@ class MethodHelper { return result; } - Class* GetReturnType() { + Class* GetReturnType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -502,7 +517,8 @@ class MethodHelper { return GetClassFromTypeIdx(return_type_idx); } - const char* GetReturnTypeDescriptor() { + const char* GetReturnTypeDescriptor() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -510,7 +526,8 @@ class MethodHelper { return dex_file.GetTypeDescriptor(dex_file.GetTypeId(return_type_idx)); } - int32_t GetLineNumFromDexPC(uint32_t dex_pc) { + int32_t GetLineNumFromDexPC(uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (dex_pc == DexFile::kDexNoIndex) { return method_->IsNative() ? -2 : -1; } else { @@ -519,7 +536,8 @@ class MethodHelper { } } - const char* GetDeclaringClassDescriptor() { + const char* GetDeclaringClassDescriptor() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* klass = method_->GetDeclaringClass(); DCHECK(!klass->IsProxyClass()); uint16_t type_idx = klass->GetDexTypeIndex(); @@ -527,7 +545,8 @@ class MethodHelper { return dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)); } - const char* GetDeclaringClassSourceFile() { + const char* GetDeclaringClassSourceFile() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const char* descriptor = GetDeclaringClassDescriptor(); const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = dex_file.FindClassDef(descriptor); @@ -535,7 +554,8 @@ class MethodHelper { return dex_file.GetSourceFile(*dex_class_def); } - uint32_t GetClassDefIndex() { + uint32_t GetClassDefIndex() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const char* descriptor = GetDeclaringClassDescriptor(); const DexFile& dex_file = GetDexFile(); uint32_t index; @@ -543,26 +563,29 @@ class MethodHelper { return index; } - ClassLoader* GetClassLoader() { + ClassLoader* GetClassLoader() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return method_->GetDeclaringClass()->GetClassLoader(); } - bool IsStatic() { + bool IsStatic() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return method_->IsStatic(); } - bool IsClassInitializer() { + bool IsClassInitializer() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return IsStatic() && StringPiece(GetName()) == ""; } - size_t NumArgs() { + size_t NumArgs() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // "1 +" because the first in Args is the receiver. // "- 1" because we don't count the return type. return (IsStatic() ? 0 : 1) + GetShortyLength() - 1; } // Is the specified parameter a long or double, where parameter 0 is 'this' for instance methods - bool IsParamALongOrDouble(size_t param) { + bool IsParamALongOrDouble(size_t param) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty @@ -574,7 +597,7 @@ class MethodHelper { } // Is the specified parameter a reference, where parameter 0 is 'this' for instance methods - bool IsParamAReference(size_t param) { + bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { CHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty @@ -584,7 +607,8 @@ class MethodHelper { return GetShorty()[param] == 'L'; // An array also has a shorty character of 'L' (not '[') } - bool HasSameNameAndSignature(MethodHelper* other) { + bool HasSameNameAndSignature(MethodHelper* other) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (GetDexCache() == other->GetDexCache()) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& mid = dex_file.GetMethodId(method_->GetDexMethodIndex()); @@ -597,15 +621,18 @@ class MethodHelper { return name == other_name && GetSignature() == other->GetSignature(); } - const DexFile::CodeItem* GetCodeItem() { + const DexFile::CodeItem* GetCodeItem() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetDexFile().GetCodeItem(method_->GetCodeItemOffset()); } - bool IsResolvedTypeIdx(uint16_t type_idx) const { + bool IsResolvedTypeIdx(uint16_t type_idx) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return method_->GetDexCacheResolvedTypes()->Get(type_idx) != NULL; } - Class* GetClassFromTypeIdx(uint16_t type_idx) { + Class* GetClassFromTypeIdx(uint16_t type_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* type = method_->GetDexCacheResolvedTypes()->Get(type_idx); if (type == NULL) { type = GetClassLinker()->ResolveType(type_idx, method_); @@ -614,16 +641,18 @@ class MethodHelper { return type; } - const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx) { + const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile& dex_file = GetDexFile(); return dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)); } - Class* GetDexCacheResolvedType(uint16_t type_idx) { + Class* GetDexCacheResolvedType(uint16_t type_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetDexCache()->GetResolvedType(type_idx); } - const DexFile& GetDexFile() { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -633,7 +662,7 @@ class MethodHelper { return *result; } - DexCache* GetDexCache() { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { Class* klass = method_->GetDeclaringClass(); @@ -646,7 +675,8 @@ class MethodHelper { private: // Set the method_ field, for proxy methods looking up the interface method via the resolved // methods table. - void SetMethod(const Method* method) { + void SetMethod(const Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (method != NULL) { Class* klass = method->GetDeclaringClass(); if (klass->IsProxyClass()) { diff --git a/src/reference_table.cc b/src/reference_table.cc index ee1760b42b..a2b54d6a14 100644 --- a/src/reference_table.cc +++ b/src/reference_table.cc @@ -60,7 +60,10 @@ static size_t GetElementCount(const Object* obj) { } struct ObjectComparator { - bool operator()(const Object* obj1, const Object* obj2) { + bool operator()(const Object* obj1, const Object* obj2) + // TODO: enable analysis when analysis can work with the STL. + NO_THREAD_SAFETY_ANALYSIS { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); // Ensure null references and cleared jweaks appear at the end. if (obj1 == NULL) { return true; @@ -75,8 +78,7 @@ struct ObjectComparator { // Sort by class... if (obj1->GetClass() != obj2->GetClass()) { - return reinterpret_cast(obj1->GetClass()) < - reinterpret_cast(obj2->GetClass()); + return obj1->GetClass()->IdentityHashCode() < obj2->IdentityHashCode(); } else { // ...then by size... size_t count1 = obj1->SizeOf(); @@ -84,9 +86,8 @@ struct ObjectComparator { if (count1 != count2) { return count1 < count2; } else { - // ...and finally by address. - return reinterpret_cast(obj1) < - reinterpret_cast(obj2); + // ...and finally by identity hash code. + return obj1->IdentityHashCode() < obj2->IdentityHashCode(); } } } @@ -97,7 +98,9 @@ struct ObjectComparator { // Pass in the number of elements in the array (or 0 if this is not an // array object), and the number of additional objects that are identical // or equivalent to the original. -static void DumpSummaryLine(std::ostream& os, const Object* obj, size_t element_count, int identical, int equiv) { +static void DumpSummaryLine(std::ostream& os, const Object* obj, size_t element_count, + int identical, int equiv) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (obj == NULL) { os << " NULL reference (count=" << equiv << ")\n"; return; diff --git a/src/reference_table.h b/src/reference_table.h index 28af887d18..de9d45d31e 100644 --- a/src/reference_table.h +++ b/src/reference_table.h @@ -43,13 +43,14 @@ class ReferenceTable { size_t Size() const; - void Dump(std::ostream& os) const; + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg); private: typedef std::vector Table; - static void Dump(std::ostream& os, const Table& entries); + static void Dump(std::ostream& os, const Table& entries) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); friend class IndirectReferenceTable; // For Dump. std::string name_; diff --git a/src/reference_table_test.cc b/src/reference_table_test.cc index c7c1cc6248..4bb5c97ec4 100644 --- a/src/reference_table_test.cc +++ b/src/reference_table_test.cc @@ -24,6 +24,7 @@ class ReferenceTableTest : public CommonTest { }; TEST_F(ReferenceTableTest, Basics) { + ScopedObjectAccess soa(Thread::Current()); Object* o1 = String::AllocFromModifiedUtf8("hello"); Object* o2 = ShortArray::Alloc(0); diff --git a/src/reflection.cc b/src/reflection.cc index 7726998d51..542f1a2e44 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -20,35 +20,15 @@ #include "jni_internal.h" #include "object.h" #include "object_utils.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" +#include "well_known_classes.h" namespace art { -Method* gBoolean_valueOf; -Method* gByte_valueOf; -Method* gCharacter_valueOf; -Method* gDouble_valueOf; -Method* gFloat_valueOf; -Method* gInteger_valueOf; -Method* gLong_valueOf; -Method* gShort_valueOf; - -void InitBoxingMethods() { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - gBoolean_valueOf = class_linker->FindSystemClass("Ljava/lang/Boolean;")->FindDeclaredDirectMethod("valueOf", "(Z)Ljava/lang/Boolean;"); - gByte_valueOf = class_linker->FindSystemClass("Ljava/lang/Byte;")->FindDeclaredDirectMethod("valueOf", "(B)Ljava/lang/Byte;"); - gCharacter_valueOf = class_linker->FindSystemClass("Ljava/lang/Character;")->FindDeclaredDirectMethod("valueOf", "(C)Ljava/lang/Character;"); - gDouble_valueOf = class_linker->FindSystemClass("Ljava/lang/Double;")->FindDeclaredDirectMethod("valueOf", "(D)Ljava/lang/Double;"); - gFloat_valueOf = class_linker->FindSystemClass("Ljava/lang/Float;")->FindDeclaredDirectMethod("valueOf", "(F)Ljava/lang/Float;"); - gInteger_valueOf = class_linker->FindSystemClass("Ljava/lang/Integer;")->FindDeclaredDirectMethod("valueOf", "(I)Ljava/lang/Integer;"); - gLong_valueOf = class_linker->FindSystemClass("Ljava/lang/Long;")->FindDeclaredDirectMethod("valueOf", "(J)Ljava/lang/Long;"); - gShort_valueOf = class_linker->FindSystemClass("Ljava/lang/Short;")->FindDeclaredDirectMethod("valueOf", "(S)Ljava/lang/Short;"); -} - -jobject InvokeMethod(const ScopedJniThreadState& ts, jobject javaMethod, jobject javaReceiver, +jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject javaReceiver, jobject javaArgs) { - jmethodID mid = ts.Env()->FromReflectedMethod(javaMethod); - Method* m = ts.DecodeMethod(mid); + jmethodID mid = soa.Env()->FromReflectedMethod(javaMethod); + Method* m = soa.DecodeMethod(mid); Class* declaring_class = m->GetDeclaringClass(); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaring_class, true, true)) { @@ -58,24 +38,24 @@ jobject InvokeMethod(const ScopedJniThreadState& ts, jobject javaMethod, jobject Object* receiver = NULL; if (!m->IsStatic()) { // Check that the receiver is non-null and an instance of the field's declaring class. - receiver = ts.Decode(javaReceiver); + receiver = soa.Decode(javaReceiver); if (!VerifyObjectInClass(receiver, declaring_class)) { return NULL; } // Find the actual implementation of the virtual method. m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m); - mid = ts.EncodeMethod(m); + mid = soa.EncodeMethod(m); } // Get our arrays of arguments and their types, and check they're the same size. - ObjectArray* objects = ts.Decode*>(javaArgs); + ObjectArray* objects = soa.Decode*>(javaArgs); MethodHelper mh(m); const DexFile::TypeList* classes = mh.GetParameterTypeList(); uint32_t classes_size = classes == NULL ? 0 : classes->Size(); uint32_t arg_count = (objects != NULL) ? objects->GetLength() : 0; if (arg_count != classes_size) { - ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", + soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", "wrong number of arguments; expected %d, got %d", classes_size, arg_count); return NULL; @@ -91,27 +71,27 @@ jobject InvokeMethod(const ScopedJniThreadState& ts, jobject javaMethod, jobject return NULL; } if (!dst_class->IsPrimitive()) { - args[i].l = ts.AddLocalReference(arg); + args[i].l = soa.AddLocalReference(arg); } } // Invoke the method. - JValue value(InvokeWithJValues(ts, javaReceiver, mid, args.get())); + JValue value(InvokeWithJValues(soa, javaReceiver, mid, args.get())); // Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early. - if (ts.Self()->IsExceptionPending()) { - jthrowable th = ts.Env()->ExceptionOccurred(); - ts.Env()->ExceptionClear(); - jclass exception_class = ts.Env()->FindClass("java/lang/reflect/InvocationTargetException"); - jmethodID mid = ts.Env()->GetMethodID(exception_class, "", "(Ljava/lang/Throwable;)V"); - jobject exception_instance = ts.Env()->NewObject(exception_class, mid, th); - ts.Env()->Throw(reinterpret_cast(exception_instance)); + if (soa.Self()->IsExceptionPending()) { + jthrowable th = soa.Env()->ExceptionOccurred(); + soa.Env()->ExceptionClear(); + jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException"); + jmethodID mid = soa.Env()->GetMethodID(exception_class, "", "(Ljava/lang/Throwable;)V"); + jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th); + soa.Env()->Throw(reinterpret_cast(exception_instance)); return NULL; } // Box if necessary and return. BoxPrimitive(mh.GetReturnType()->GetPrimitiveType(), value); - return ts.AddLocalReference(value.GetL()); + return soa.AddLocalReference(value.GetL()); } bool VerifyObjectInClass(Object* o, Class* c) { @@ -220,31 +200,31 @@ void BoxPrimitive(Primitive::Type src_class, JValue& value) { return; } - Method* m = NULL; + jmethodID m = NULL; switch (src_class) { case Primitive::kPrimBoolean: - m = gBoolean_valueOf; + m = WellKnownClasses::java_lang_Boolean_valueOf; break; case Primitive::kPrimByte: - m = gByte_valueOf; + m = WellKnownClasses::java_lang_Byte_valueOf; break; case Primitive::kPrimChar: - m = gCharacter_valueOf; + m = WellKnownClasses::java_lang_Character_valueOf; break; case Primitive::kPrimDouble: - m = gDouble_valueOf; + m = WellKnownClasses::java_lang_Double_valueOf; break; case Primitive::kPrimFloat: - m = gFloat_valueOf; + m = WellKnownClasses::java_lang_Float_valueOf; break; case Primitive::kPrimInt: - m = gInteger_valueOf; + m = WellKnownClasses::java_lang_Integer_valueOf; break; case Primitive::kPrimLong: - m = gLong_valueOf; + m = WellKnownClasses::java_lang_Long_valueOf; break; case Primitive::kPrimShort: - m = gShort_valueOf; + m = WellKnownClasses::java_lang_Short_valueOf; break; case Primitive::kPrimVoid: // There's no such thing as a void field, and void methods invoked via reflection return null. @@ -254,13 +234,17 @@ void BoxPrimitive(Primitive::Type src_class, JValue& value) { LOG(FATAL) << static_cast(src_class); } - Thread* self = Thread::Current(); - ScopedThreadStateChange tsc(self, kRunnable); + if (kIsDebugBuild) { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(Thread::Current()->GetState(), kRunnable); + } + ScopedObjectAccessUnchecked soa(Thread::Current()); JValue args[1] = { value }; - m->Invoke(self, NULL, args, &value); + soa.DecodeMethod(m)->Invoke(soa.Self(), NULL, args, &value); } -static std::string UnboxingFailureKind(Method* m, int index, Field* f) { +static std::string UnboxingFailureKind(Method* m, int index, Field* f) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (m != NULL && index != -1) { ++index; // Humans count from 1. return StringPrintf("method %s argument %d", PrettyMethod(m, false).c_str(), index); @@ -271,7 +255,9 @@ static std::string UnboxingFailureKind(Method* m, int index, Field* f) { return "result"; } -static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, int index, Field* f) { +static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, + int index, Field* f) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (!dst_class->IsPrimitive()) { if (o != NULL && !o->InstanceOf(dst_class)) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", diff --git a/src/reflection.h b/src/reflection.h index 03847f8a8e..2979e5bdee 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -27,19 +27,27 @@ class Field; union JValue; class Method; class Object; -class ScopedJniThreadState; - -void InitBoxingMethods(); -void BoxPrimitive(Primitive::Type src_class, JValue& value); -bool UnboxPrimitiveForArgument(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, size_t index); -bool UnboxPrimitiveForField(Object* o, Class* dst_class, JValue& unboxed_value, Field* f); -bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value); - -bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, JValue& dst); - -jobject InvokeMethod(const ScopedJniThreadState& ts, jobject method, jobject receiver, jobject args); - -bool VerifyObjectInClass(Object* o, Class* c); +class ScopedObjectAccess; + +void BoxPrimitive(Primitive::Type src_class, JValue& value) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +bool UnboxPrimitiveForArgument(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, + size_t index) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +bool UnboxPrimitiveForField(Object* o, Class* dst_class, JValue& unboxed_value, Field* f) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + +bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, + JValue& dst) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + +jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver, jobject args) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + +bool VerifyObjectInClass(Object* o, Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); } // namespace art diff --git a/src/runtime.cc b/src/runtime.cc index 6447ede6a0..5230b77465 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -35,9 +35,8 @@ #include "jni_internal.h" #include "monitor.h" #include "oat_file.h" -#include "scoped_heap_lock.h" -#include "scoped_jni_thread_state.h" #include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" #include "signal_catcher.h" #include "signal_set.h" #include "space.h" @@ -118,10 +117,7 @@ Runtime::~Runtime() { } // Make sure to let the GC complete if it is running. - { - ScopedHeapLock heap_lock; - heap_->WaitForConcurrentGcToComplete(); - } + heap_->WaitForConcurrentGcToComplete(); // Make sure our internal threads are dead before we start tearing down things they're using. Dbg::StopJdwp(); @@ -162,6 +158,10 @@ struct AbortState { if (self == NULL) { os << "(Aborting thread was not attached to runtime!)\n"; } else { + // TODO: we're aborting and the ScopedObjectAccess may attempt to acquire the mutator_lock_ + // which may block indefinitely if there's a misbehaving thread holding it exclusively. + // The code below should be made robust to this. + ScopedObjectAccess soa(self); self->Dump(os); if (self->IsExceptionPending()) { os << "Pending " << PrettyTypeOf(self->GetException()) << " on thread:\n" @@ -171,15 +171,10 @@ struct AbortState { } }; -static Mutex& GetAbortLock() { - static Mutex abort_lock("abort lock"); - return abort_lock; -} - void Runtime::Abort() { // Ensure that we don't have multiple threads trying to abort at once, // which would result in significantly worse diagnostics. - MutexLock mu(GetAbortLock()); + MutexLock mu(*GlobalSynchronization::abort_lock_); // Get any pending output out of the way. fflush(NULL); @@ -315,15 +310,6 @@ size_t ParseIntegerOrDie(const std::string& s) { return result; } -void LoadJniLibrary(JavaVMExt* vm, const char* name) { - std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, name)); - std::string reason; - if (!vm->LoadNativeLibrary(mapped_name, NULL, reason)) { - LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " - << reason; - } -} - Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, bool ignore_unrecognized) { UniquePtr parsed(new ParsedOptions()); const char* boot_class_path_string = getenv("BOOTCLASSPATH"); @@ -543,17 +529,19 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b return parsed.release(); } -Runtime* Runtime::Create(const Options& options, bool ignore_unrecognized) { +bool Runtime::Create(const Options& options, bool ignore_unrecognized) { // TODO: acquire a static mutex on Runtime to avoid racing. if (Runtime::instance_ != NULL) { - return NULL; + return false; } + GlobalSynchronization::Init(); instance_ = new Runtime; if (!instance_->Init(options, ignore_unrecognized)) { delete instance_; instance_ = NULL; + return false; } - return instance_; + return true; } static void CreateSystemClassLoader() { @@ -561,28 +549,28 @@ static void CreateSystemClassLoader() { return; } - ScopedJniThreadState ts(Thread::Current()); + ScopedObjectAccess soa(Thread::Current()); - Class* class_loader_class = ts.Decode(WellKnownClasses::java_lang_ClassLoader); + Class* class_loader_class = soa.Decode(WellKnownClasses::java_lang_ClassLoader); CHECK(Runtime::Current()->GetClassLinker()->EnsureInitialized(class_loader_class, true, true)); Method* getSystemClassLoader = class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;"); CHECK(getSystemClassLoader != NULL); ClassLoader* class_loader = - down_cast(InvokeWithJValues(ts, NULL, getSystemClassLoader, NULL).GetL()); + down_cast(InvokeWithJValues(soa, NULL, getSystemClassLoader, NULL).GetL()); CHECK(class_loader != NULL); - ts.Self()->SetClassLoaderOverride(class_loader); + soa.Self()->SetClassLoaderOverride(class_loader); - Class* thread_class = ts.Decode(WellKnownClasses::java_lang_Thread); + Class* thread_class = soa.Decode(WellKnownClasses::java_lang_Thread); CHECK(Runtime::Current()->GetClassLinker()->EnsureInitialized(thread_class, true, true)); Field* contextClassLoader = thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;"); CHECK(contextClassLoader != NULL); - contextClassLoader->SetObject(ts.Self()->GetPeer(), class_loader); + contextClassLoader->SetObject(soa.Self()->GetPeer(), class_loader); } void Runtime::Start() { @@ -593,16 +581,16 @@ void Runtime::Start() { // Relocate the OatFiles (ELF images). class_linker_->RelocateExecutable(); - // Restore main thread state to kNative as expected by native code. - Thread* self = Thread::Current(); - self->SetState(kNative); - // Pre-allocate an OutOfMemoryError for the double-OOME case. + Thread* self = Thread::Current(); self->ThrowNewException("Ljava/lang/OutOfMemoryError;", "OutOfMemoryError thrown while trying to throw OutOfMemoryError; no stack available"); pre_allocated_OutOfMemoryError_ = self->GetException(); self->ClearException(); + // Restore main thread state to kNative as expected by native code. + self->TransitionFromRunnableToSuspended(kNative); + started_ = true; // InitNativeMethods needs to be after started_ so that the classes @@ -651,7 +639,10 @@ void Runtime::StartDaemonThreads() { Thread* self = Thread::Current(); // Must be in the kNative state for calling native methods. - CHECK_EQ(self->GetState(), kNative); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(self->GetState(), kNative); + } JNIEnv* env = self->GetJniEnv(); env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_start); @@ -700,7 +691,8 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) { heap_ = new Heap(options->heap_initial_size_, options->heap_growth_limit_, options->heap_maximum_size_, - options->image_); + options->image_, + options->is_concurrent_gc_enabled_); BlockSignals(); InitPlatformSignalHandlers(); @@ -714,7 +706,7 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) { Thread::Attach("main", false, NULL); // Set us to runnable so tools using a runtime can allocate and GC by default - Thread::Current()->SetState(kRunnable); + Thread::Current()->TransitionFromSuspendedToRunnable(); // Now we're attached, we can take the heap lock and validate the heap. GetHeap()->EnableObjectValidation(); @@ -747,7 +739,10 @@ void Runtime::InitNativeMethods() { JNIEnv* env = self->GetJniEnv(); // Must be in the kNative state for calling native methods (JNI_OnLoad code). - CHECK_EQ(self->GetState(), kNative); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(self->GetState(), kNative); + } // First set up JniConstants, which is used by both the runtime's built-in native // methods and libcore. @@ -760,7 +755,15 @@ void Runtime::InitNativeMethods() { // Then set up libcore, which is just a regular JNI library with a regular JNI_OnLoad. // Most JNI libraries can just use System.loadLibrary, but libcore can't because it's // the library that implements System.loadLibrary! - LoadJniLibrary(instance_->GetJavaVM(), "javacore"); + { + std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore")); + std::string reason; + self->TransitionFromSuspendedToRunnable(); + if (!instance_->java_vm_->LoadNativeLibrary(mapped_name, NULL, reason)) { + LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason; + } + self->TransitionFromRunnableToSuspended(kNative); + } VLOG(startup) << "Runtime::InitNativeMethods exiting"; } @@ -826,12 +829,12 @@ void Runtime::DumpForSigQuit(std::ostream& os) { } void Runtime::DumpLockHolders(std::ostream& os) { - pid_t heap_lock_owner = GetHeap()->GetLockOwner(); + uint64_t mutator_lock_owner = GlobalSynchronization::mutator_lock_->GetExclusiveOwnerTid(); pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner(); pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner(); pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner(); - if ((heap_lock_owner | thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) { - os << "Heap lock owner tid: " << heap_lock_owner << "\n" + if ((thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) { + os << "Mutator lock exclusive owner tid: " << mutator_lock_owner << "\n" << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n" << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n" << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n"; @@ -913,7 +916,7 @@ void Runtime::DetachCurrentThread() { if (self->HasManagedStack()) { LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code"; } - thread_list_->Unregister(); + thread_list_->Unregister(self); } void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { @@ -1031,7 +1034,7 @@ void Runtime::DisableMethodTracing() { tracer_ = NULL; } -const std::vector& Runtime::GetCompileTimeClassPath(const ClassLoader* class_loader) { +const std::vector& Runtime::GetCompileTimeClassPath(jobject class_loader) { if (class_loader == NULL) { return GetClassLinker()->GetBootClassPath(); } @@ -1041,7 +1044,7 @@ const std::vector& Runtime::GetCompileTimeClassPath(const ClassL return it->second; } -void Runtime::SetCompileTimeClassPath(const ClassLoader* class_loader, std::vector& class_path) { +void Runtime::SetCompileTimeClassPath(jobject class_loader, std::vector& class_path) { CHECK(!IsStarted()); use_compile_time_class_path_ = true; compile_time_class_paths_.Put(class_loader, class_path); diff --git a/src/runtime.h b/src/runtime.h index 544dcf4173..3b9919ce40 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -28,6 +28,7 @@ #include "globals.h" #include "heap.h" #include "instruction_set.h" +#include "jobject_comparator.h" #include "macros.h" #include "runtime_stats.h" #include "safe_map.h" @@ -95,7 +96,8 @@ class Runtime { }; // Creates and initializes a new runtime. - static Runtime* Create(const Options& options, bool ignore_unrecognized); + static bool Create(const Options& options, bool ignore_unrecognized) + SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_); bool IsCompiler() const { return is_compiler_; @@ -115,7 +117,7 @@ class Runtime { } // Starts a runtime, which may cause threads to be started and code to run. - void Start(); + void Start() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_); bool IsShuttingDown() const { return shutting_down_; @@ -138,7 +140,7 @@ class Runtime { // This isn't marked ((noreturn)) because then gcc will merge multiple calls // in a single function together. This reduces code size slightly, but means // that the native stack trace we get may point at the wrong call site. - static void Abort(); + static void Abort() LOCKS_EXCLUDED(GlobalSynchronization::abort_lock_); // Returns the "main" ThreadGroup, used when attaching user threads. jobject GetMainThreadGroup() const; @@ -152,9 +154,10 @@ class Runtime { void CallExitHook(jint status); // Detaches the current native thread from the runtime. - void DetachCurrentThread(); + void DetachCurrentThread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); - void DumpForSigQuit(std::ostream& os); + void DumpForSigQuit(std::ostream& os) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void DumpLockHolders(std::ostream& os); ~Runtime(); @@ -207,7 +210,8 @@ class Runtime { return "2.0.0"; } - void VisitRoots(Heap::RootVisitor* visitor, void* arg) const; + void VisitRoots(Heap::RootVisitor* visitor, void* arg) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool HasJniDlsymLookupStub() const { return jni_stub_array_ != NULL; @@ -263,7 +267,7 @@ class Runtime { resolution_method_ = method; } - Method* CreateResolutionMethod(); + Method* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -284,10 +288,14 @@ class Runtime { void SetCalleeSaveMethod(Method* method, CalleeSaveType type); - Method* CreateCalleeSaveMethod(InstructionSet instruction_set, CalleeSaveType type); + Method* CreateCalleeSaveMethod(InstructionSet instruction_set, CalleeSaveType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set); - Method* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set); + Method* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + Method* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); int32_t GetStat(int kind); @@ -322,8 +330,8 @@ class Runtime { return use_compile_time_class_path_; } - const std::vector& GetCompileTimeClassPath(const ClassLoader* class_loader); - void SetCompileTimeClassPath(const ClassLoader* class_loader, std::vector& class_path); + const std::vector& GetCompileTimeClassPath(jobject class_loader); + void SetCompileTimeClassPath(jobject class_loader, std::vector& class_path); private: static void InitPlatformSignalHandlers(); @@ -332,8 +340,9 @@ class Runtime { void BlockSignals(); - bool Init(const Options& options, bool ignore_unrecognized); - void InitNativeMethods(); + bool Init(const Options& options, bool ignore_unrecognized) + SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_); + void InitNativeMethods() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); void InitThreadGroups(Thread* self); void RegisterRuntimeNativeMethods(JNIEnv* env); @@ -417,7 +426,7 @@ class Runtime { size_t method_trace_file_size_; Trace* tracer_; - typedef SafeMap > CompileTimeClassPaths; + typedef SafeMap, JobjectComparator> CompileTimeClassPaths; CompileTimeClassPaths compile_time_class_paths_; bool use_compile_time_class_path_; diff --git a/src/runtime_linux.cc b/src/runtime_linux.cc index a84dfc9ae8..8365079cb6 100644 --- a/src/runtime_linux.cc +++ b/src/runtime_linux.cc @@ -226,8 +226,7 @@ struct UContext { }; static void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) { - static Mutex unexpected_signal_lock("unexpected signal lock"); - MutexLock mu(unexpected_signal_lock); + MutexLock mu(*GlobalSynchronization::unexpected_signal_lock_); bool has_address = (signal_number == SIGILL || signal_number == SIGBUS || signal_number == SIGFPE || signal_number == SIGSEGV); diff --git a/src/runtime_support.cc b/src/runtime_support.cc index c0677653a2..0caccf6623 100644 --- a/src/runtime_support.cc +++ b/src/runtime_support.cc @@ -293,7 +293,8 @@ std::string MethodNameFromIndex(const Method* method, uint32_t ref, } static std::string ClassNameFromIndex(const Method* method, uint32_t ref, - verifier::VerifyErrorRefType ref_type, bool access) { + verifier::VerifyErrorRefType ref_type, bool access) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); const DexFile& dex_file = class_linker->FindDexFile(method->GetDeclaringClass()->GetDexCache()); diff --git a/src/runtime_support.h b/src/runtime_support.h index d96cab9301..bba91615e7 100644 --- a/src/runtime_support.h +++ b/src/runtime_support.h @@ -44,37 +44,50 @@ class Method; class Object; // Helpers to give consistent descriptive exception messages -void ThrowNewIllegalAccessErrorClass(Thread* self, Class* referrer, Class* accessed); +void ThrowNewIllegalAccessErrorClass(Thread* self, Class* referrer, Class* accessed) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void ThrowNewIllegalAccessErrorClassForMethodDispatch(Thread* self, Class* referrer, Class* accessed, const Method* caller, const Method* called, - InvokeType type); + InvokeType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void ThrowNewIncompatibleClassChangeErrorClassForInterfaceDispatch(Thread* self, const Method* referrer, const Method* interface_method, - Object* this_object); -void ThrowNewIllegalAccessErrorField(Thread* self, Class* referrer, Field* accessed); -void ThrowNewIllegalAccessErrorFinalField(Thread* self, const Method* referrer, Field* accessed); - -void ThrowNewIllegalAccessErrorMethod(Thread* self, Class* referrer, Method* accessed); -void ThrowNullPointerExceptionForFieldAccess(Thread* self, Field* field, bool is_read); + Object* this_object) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +void ThrowNewIllegalAccessErrorField(Thread* self, Class* referrer, Field* accessed) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +void ThrowNewIllegalAccessErrorFinalField(Thread* self, const Method* referrer, Field* accessed) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + +void ThrowNewIllegalAccessErrorMethod(Thread* self, Class* referrer, Method* accessed) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +void ThrowNullPointerExceptionForFieldAccess(Thread* self, Field* field, bool is_read) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void ThrowNullPointerExceptionForMethodAccess(Thread* self, Method* caller, uint32_t method_idx, - InvokeType type); -void ThrowNullPointerExceptionFromDexPC(Thread* self, Method* caller, uint32_t dex_pc); -void ThrowVerificationError(Thread* self, const Method* method, int32_t kind, int32_t ref); + InvokeType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +void ThrowNullPointerExceptionFromDexPC(Thread* self, Method* caller, uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); +void ThrowVerificationError(Thread* self, const Method* method, int32_t kind, int32_t ref) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); std::string FieldNameFromIndex(const Method* method, uint32_t ref, - verifier::VerifyErrorRefType ref_type, bool access); + verifier::VerifyErrorRefType ref_type, bool access) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); std::string MethodNameFromIndex(const Method* method, uint32_t ref, - verifier::VerifyErrorRefType ref_type, bool access); + verifier::VerifyErrorRefType ref_type, bool access) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. // When verification/compiler hasn't been able to verify access, optionally perform an access // check. static inline Object* AllocObjectFromCode(uint32_t type_idx, Method* method, Thread* self, - bool access_check) { + bool access_check) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); Runtime* runtime = Runtime::Current(); if (UNLIKELY(klass == NULL)) { @@ -108,7 +121,8 @@ static inline Object* AllocObjectFromCode(uint32_t type_idx, Method* method, Thr // When verification/compiler hasn't been able to verify access, optionally perform an access // check. static inline Array* AllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, - Thread* self, bool access_check) { + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (UNLIKELY(component_count < 0)) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); @@ -134,15 +148,18 @@ static inline Array* AllocArrayFromCode(uint32_t type_idx, Method* method, int32 } extern Array* CheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, - Thread* self, bool access_check); + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); extern Field* FindFieldFromCode(uint32_t field_idx, const Method* referrer, Thread* self, bool is_static, bool is_primitive, bool is_set, - size_t expected_size); + size_t expected_size) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Fast path field resolution that can't throw exceptions static inline Field* FindFieldFast(uint32_t field_idx, const Method* referrer, bool is_primitive, - size_t expected_size, bool is_set) { + size_t expected_size, bool is_set) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { return NULL; @@ -170,7 +187,8 @@ static inline Field* FindFieldFast(uint32_t field_idx, const Method* referrer, b // Fast path method resolution that can't throw exceptions static inline Method* FindMethodFast(uint32_t method_idx, Object* this_object, const Method* referrer, - bool access_check, InvokeType type) { + bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { bool is_direct = type == kStatic || type == kDirect; if (UNLIKELY(this_object == NULL && !is_direct)) { return NULL; @@ -204,12 +222,15 @@ static inline Method* FindMethodFast(uint32_t method_idx, Object* this_object, c } extern Method* FindMethodFromCode(uint32_t method_idx, Object* this_object, const Method* referrer, - Thread* self, bool access_check, InvokeType type); + Thread* self, bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); extern Class* ResolveVerifyAndClinit(uint32_t type_idx, const Method* referrer, Thread* self, - bool can_run_clinit, bool verify_access); + bool can_run_clinit, bool verify_access) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); -static inline String* ResolveStringFromCode(const Method* referrer, uint32_t string_idx) { +static inline String* ResolveStringFromCode(const Method* referrer, uint32_t string_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); return class_linker->ResolveString(string_idx, referrer); } diff --git a/src/scoped_heap_lock.h b/src/scoped_heap_lock.h deleted file mode 100644 index 0dee5897be..0000000000 --- a/src/scoped_heap_lock.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_SRC_SCOPED_HEAP_LOCK_H_ -#define ART_SRC_SCOPED_HEAP_LOCK_H_ - -#include "heap.h" -#include "macros.h" -#include "runtime.h" - -namespace art { - -class ScopedHeapLock { - public: - ScopedHeapLock() { - Runtime::Current()->GetHeap()->Lock(); - } - - ~ScopedHeapLock() { - Runtime::Current()->GetHeap()->Unlock(); - } - - private: - DISALLOW_COPY_AND_ASSIGN(ScopedHeapLock); -}; - -} // namespace art - -#endif // ART_SRC_SCOPED_HEAP_LOCK_H_ diff --git a/src/scoped_jni_thread_state.h b/src/scoped_jni_thread_state.h deleted file mode 100644 index 1c9ab2ce86..0000000000 --- a/src/scoped_jni_thread_state.h +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "casts.h" -#include "jni_internal.h" -#include "thread.h" - -namespace art { - -// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions). -// -// This class performs the necessary thread state switching to and from Runnable and lets us -// amortize the cost of working out the current thread. Additionally it lets us check (and repair) -// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects -// into jobjects via methods of this class. Performing this here enforces the Runnable thread state -// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code -// is also manipulating the Object. -// -// The destructor transitions back to the previous thread state, typically Native. In this case -// GC and thread suspension may occur. -class ScopedJniThreadState { - public: - explicit ScopedJniThreadState(JNIEnv* env, ThreadState new_state = kRunnable) - : env_(reinterpret_cast(env)), vm_(env_->vm), self_(ThreadForEnv(env)), - old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) { - self_->VerifyStack(); - } - - explicit ScopedJniThreadState(Thread* self, ThreadState new_state = kRunnable) - : env_(reinterpret_cast(self->GetJniEnv())), vm_(env_->vm), self_(self), - old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) { - if (!Vm()->work_around_app_jni_bugs && self != Thread::Current()) { - UnexpectedThreads(self, Thread::Current()); - } - self_->VerifyStack(); - } - - // Used when we want a scoped JNI thread state but have no thread/JNIEnv. - explicit ScopedJniThreadState(JavaVM* vm) - : env_(NULL), vm_(reinterpret_cast(vm)), self_(NULL), - old_thread_state_(kTerminated), thread_state_(kTerminated) {} - - ~ScopedJniThreadState() { - if (self_ != NULL) { - self_->SetState(old_thread_state_); - } - } - - JNIEnvExt* Env() const { - return env_; - } - - Thread* Self() const { - return self_; - } - - JavaVMExt* Vm() const { - return vm_; - } - - /* - * Add a local reference for an object to the indirect reference table associated with the - * current stack frame. When the native function returns, the reference will be discarded. - * Part of the ScopedJniThreadState as native code shouldn't be working on raw Object* without - * having transitioned its state. - * - * We need to allow the same reference to be added multiple times. - * - * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and - * it's best if we don't grab a mutex. - * - * Returns the local reference (currently just the same pointer that was - * passed in), or NULL on failure. - */ - template - T AddLocalReference(Object* obj) const { - DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. - if (obj == NULL) { - return NULL; - } - - DCHECK_NE((reinterpret_cast(obj) & 0xffff0000), 0xebad0000); - - IndirectReferenceTable& locals = Env()->locals; - - uint32_t cookie = Env()->local_ref_cookie; - IndirectRef ref = locals.Add(cookie, obj); - - #if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on. - if (Env()->check_jni) { - size_t entry_count = locals.Capacity(); - if (entry_count > 16) { - LOG(WARNING) << "Warning: more than 16 JNI local references: " - << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n" - << Dumpable(locals); - // TODO: LOG(FATAL) in a later release? - } - } - #endif - - if (Vm()->work_around_app_jni_bugs) { - // Hand out direct pointers to support broken old apps. - return reinterpret_cast(obj); - } - - return reinterpret_cast(ref); - } - - template - T Decode(jobject obj) const { - DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. - return down_cast(Self()->DecodeJObject(obj)); - } - - Field* DecodeField(jfieldID fid) const { - DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. - #ifdef MOVING_GARBAGE_COLLECTOR - // TODO: we should make these unique weak globals if Field instances can ever move. - UNIMPLEMENTED(WARNING); - #endif - return reinterpret_cast(fid); - } - - jfieldID EncodeField(Field* field) const { - DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. - #ifdef MOVING_GARBAGE_COLLECTOR - UNIMPLEMENTED(WARNING); - #endif - return reinterpret_cast(field); - } - - Method* DecodeMethod(jmethodID mid) const { - DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. - #ifdef MOVING_GARBAGE_COLLECTOR - // TODO: we should make these unique weak globals if Method instances can ever move. - UNIMPLEMENTED(WARNING); - #endif - return reinterpret_cast(mid); - } - - jmethodID EncodeMethod(Method* method) const { - DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. - #ifdef MOVING_GARBAGE_COLLECTOR - UNIMPLEMENTED(WARNING); - #endif - return reinterpret_cast(method); - } - - private: - static Thread* ThreadForEnv(JNIEnv* env) { - JNIEnvExt* full_env(reinterpret_cast(env)); - bool work_around_app_jni_bugs = full_env->vm->work_around_app_jni_bugs; - Thread* env_self = full_env->self; - Thread* self = work_around_app_jni_bugs ? Thread::Current() : env_self; - if (!work_around_app_jni_bugs && self != env_self) { - UnexpectedThreads(env_self, self); - } - return self; - } - - static void UnexpectedThreads(Thread* found_self, Thread* expected_self) { - // TODO: pass through function name so we can use it here instead of NULL... - JniAbortF(NULL, "JNIEnv for %s used on %s", - found_self != NULL ? ToStr(*found_self).c_str() : "NULL", - expected_self != NULL ? ToStr(*expected_self).c_str() : "NULL"); - - } - - // The full JNIEnv. - JNIEnvExt* const env_; - // The full JavaVM. - JavaVMExt* const vm_; - // Cached current thread derived from the JNIEnv. - Thread* const self_; - // Previous thread state, most likely kNative. - const ThreadState old_thread_state_; - // Local cache of thread state to enable quick sanity checks. - const ThreadState thread_state_; - DISALLOW_COPY_AND_ASSIGN(ScopedJniThreadState); -}; - -} // namespace art diff --git a/src/scoped_thread_list_lock.cc b/src/scoped_thread_list_lock.cc deleted file mode 100644 index 269c97ed3b..0000000000 --- a/src/scoped_thread_list_lock.cc +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "scoped_thread_list_lock.h" - -#include "runtime.h" -#include "thread_list.h" - -namespace art { - -ScopedThreadListLock::ScopedThreadListLock() { - // Avoid deadlock between two threads trying to SuspendAll - // simultaneously by going to kVmWait if the lock cannot be - // immediately acquired. - ThreadList* thread_list = Runtime::Current()->GetThreadList(); - if (!thread_list->thread_list_lock_.TryLock()) { - Thread* self = Thread::Current(); - if (self == NULL) { - // Self may be null during shutdown, but in that case there's no point going to kVmWait. - thread_list->thread_list_lock_.Lock(); - } else { - ThreadState old_thread_state = self->SetState(kVmWait); - thread_list->thread_list_lock_.Lock(); - // If we have the lock, by definition there's no GC in progress (though we - // might be taking the lock in order to start one). We avoid the suspend - // check here so we don't risk going to sleep on the thread suspend count lock - // while holding the thread list lock. - self->SetStateWithoutSuspendCheck(old_thread_state); - } - } -} - -ScopedThreadListLock::~ScopedThreadListLock() { - Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock(); -} - -} // namespace art diff --git a/src/scoped_thread_list_lock.h b/src/scoped_thread_list_lock.h deleted file mode 100644 index 8650c57f23..0000000000 --- a/src/scoped_thread_list_lock.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_SRC_SCOPED_THREAD_LIST_LOCK_H_ -#define ART_SRC_SCOPED_THREAD_LIST_LOCK_H_ - -#include "macros.h" - -namespace art { - -class ScopedThreadListLock { - public: - ScopedThreadListLock(); - ~ScopedThreadListLock(); - - private: - DISALLOW_COPY_AND_ASSIGN(ScopedThreadListLock); -}; - -} // namespace art - -#endif // ART_SRC_SCOPED_THREAD_LIST_LOCK_H_ diff --git a/src/scoped_thread_list_lock_releaser.cc b/src/scoped_thread_list_lock_releaser.cc deleted file mode 100644 index d15eae5351..0000000000 --- a/src/scoped_thread_list_lock_releaser.cc +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "scoped_thread_list_lock_releaser.h" - -#include "runtime.h" -#include "thread_list.h" - -namespace art { - -ScopedThreadListLockReleaser::ScopedThreadListLockReleaser() : unlocked_(false) { - if (Thread::Current() == NULL) { - CHECK(Runtime::Current()->IsShuttingDown()); - return; - } - - if (Thread::Current()->held_mutexes_[kThreadListLock] > 0) { - Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock(); - unlocked_ = true; - } -} - -ScopedThreadListLockReleaser::~ScopedThreadListLockReleaser() { - if (unlocked_) { - Runtime::Current()->GetThreadList()->thread_list_lock_.Lock(); - } -} - -} // namespace art diff --git a/src/scoped_thread_list_lock_releaser.h b/src/scoped_thread_list_lock_releaser.h deleted file mode 100644 index af656d5dda..0000000000 --- a/src/scoped_thread_list_lock_releaser.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_SRC_SCOPED_THREAD_LIST_LOCK_RELEASER_H_ -#define ART_SRC_SCOPED_THREAD_LIST_LOCK_RELEASER_H_ - -#include "macros.h" - -namespace art { - -class ScopedThreadListLockReleaser { - public: - ScopedThreadListLockReleaser(); - ~ScopedThreadListLockReleaser(); - - private: - // Whether or not we unlocked the thread list lock. - bool unlocked_; - - DISALLOW_COPY_AND_ASSIGN(ScopedThreadListLockReleaser); -}; - -} // namespace art - -#endif // ART_SRC_SCOPED_THREAD_LIST_LOCK_RELEASER_H_ diff --git a/src/scoped_thread_state_change.h b/src/scoped_thread_state_change.h new file mode 100644 index 0000000000..745e2d6803 --- /dev/null +++ b/src/scoped_thread_state_change.h @@ -0,0 +1,315 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_ +#define ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_ + +#include "casts.h" +#include "thread.h" + +namespace art { + +// Scoped change into and out of a particular state. Handles Runnable transitions that require +// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and +// ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects, +// the unchecked variant doesn't aid annotalysis. +class ScopedThreadStateChange { + public: + ScopedThreadStateChange(Thread* self, ThreadState new_thread_state) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) { + if (self_ == NULL) { + // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL. + old_thread_state_ = kTerminated; + CHECK(!Runtime::Current()->IsStarted() || Runtime::Current()->IsShuttingDown()); + } else { + bool runnable_transition; + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + old_thread_state_ = self->GetState(); + runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable; + if (!runnable_transition) { + self_->SetState(new_thread_state); + } + } + if (runnable_transition && old_thread_state_ != new_thread_state) { + if (new_thread_state == kRunnable) { + self_->TransitionFromSuspendedToRunnable(); + } else { + DCHECK_EQ(old_thread_state_, kRunnable); + self_->TransitionFromRunnableToSuspended(new_thread_state); + } + } + } + } + + ~ScopedThreadStateChange() LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) { + if (self_ == NULL) { + if (!expected_has_no_thread_) { + CHECK(Runtime::Current()->IsShuttingDown()); + } + } else { + if (old_thread_state_ != thread_state_) { + if (old_thread_state_ == kRunnable) { + self_->TransitionFromSuspendedToRunnable(); + } else if (thread_state_ == kRunnable) { + self_->TransitionFromRunnableToSuspended(old_thread_state_); + } else { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + self_->SetState(old_thread_state_); + } + } + } + } + + Thread* Self() const { + return self_; + } + + protected: + // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*. + ScopedThreadStateChange() + : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated), + expected_has_no_thread_(true) {} + + Thread* const self_; + const ThreadState thread_state_; + + private: + ThreadState old_thread_state_; + const bool expected_has_no_thread_; + + DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange); +}; + +// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions). +// +// This class performs the necessary thread state switching to and from Runnable and lets us +// amortize the cost of working out the current thread. Additionally it lets us check (and repair) +// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects +// into jobjects via methods of this class. Performing this here enforces the Runnable thread state +// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code +// is also manipulating the Object. +// +// The destructor transitions back to the previous thread state, typically Native. In this state +// GC and thread suspension may occur. +// +// For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of +// the mutator_lock_ will be acquired on construction. +class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { + public: + explicit ScopedObjectAccessUnchecked(JNIEnv* env) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + : ScopedThreadStateChange(ThreadForEnv(env), kRunnable), + env_(reinterpret_cast(env)), vm_(env_->vm) { + self_->VerifyStack(); + } + + explicit ScopedObjectAccessUnchecked(Thread* self) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + : ScopedThreadStateChange(self, kRunnable), + env_(reinterpret_cast(self->GetJniEnv())), + vm_(env_ != NULL ? env_->vm : NULL) { + if (Vm() != NULL && !Vm()->work_around_app_jni_bugs && self != Thread::Current()) { + UnexpectedThreads(self, Thread::Current()); + } + self_->VerifyStack(); + } + + // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't + // change into Runnable or acquire a share on the mutator_lock_. + explicit ScopedObjectAccessUnchecked(JavaVM* vm) + : ScopedThreadStateChange(), env_(NULL), vm_(reinterpret_cast(vm)) {} + + JNIEnvExt* Env() const { + return env_; + } + + JavaVMExt* Vm() const { + return vm_; + } + + /* + * Add a local reference for an object to the indirect reference table associated with the + * current stack frame. When the native function returns, the reference will be discarded. + * Part of the ScopedJniThreadState as native code shouldn't be working on raw Object* without + * having transitioned its state. + * + * We need to allow the same reference to be added multiple times. + * + * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and + * it's best if we don't grab a mutex. + * + * Returns the local reference (currently just the same pointer that was + * passed in), or NULL on failure. + */ + template + T AddLocalReference(Object* obj) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + if (obj == NULL) { + return NULL; + } + + DCHECK_NE((reinterpret_cast(obj) & 0xffff0000), 0xebad0000); + + IndirectReferenceTable& locals = Env()->locals; + + uint32_t cookie = Env()->local_ref_cookie; + IndirectRef ref = locals.Add(cookie, obj); + +#if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on. + if (Env()->check_jni) { + size_t entry_count = locals.Capacity(); + if (entry_count > 16) { + LOG(WARNING) << "Warning: more than 16 JNI local references: " + << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n" + << Dumpable(locals); + // TODO: LOG(FATAL) in a later release? + } + } +#endif + + if (Vm()->work_around_app_jni_bugs) { + // Hand out direct pointers to support broken old apps. + return reinterpret_cast(obj); + } + + return reinterpret_cast(ref); + } + + template + T Decode(jobject obj) const + LOCKS_EXCLUDED(JavaVMExt::globals_lock, + JavaVMExt::weak_globals_lock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. + return down_cast(Self()->DecodeJObject(obj)); + } + + Field* DecodeField(jfieldID fid) const + LOCKS_EXCLUDED(JavaVMExt::globals_lock, + JavaVMExt::weak_globals_lock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. +#ifdef MOVING_GARBAGE_COLLECTOR + // TODO: we should make these unique weak globals if Field instances can ever move. + UNIMPLEMENTED(WARNING); +#endif + return reinterpret_cast(fid); + } + + jfieldID EncodeField(Field* field) const + LOCKS_EXCLUDED(JavaVMExt::globals_lock, + JavaVMExt::weak_globals_lock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. +#ifdef MOVING_GARBAGE_COLLECTOR + UNIMPLEMENTED(WARNING); +#endif + return reinterpret_cast(field); + } + + Method* DecodeMethod(jmethodID mid) const + LOCKS_EXCLUDED(JavaVMExt::globals_lock, + JavaVMExt::weak_globals_lock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. +#ifdef MOVING_GARBAGE_COLLECTOR + // TODO: we should make these unique weak globals if Method instances can ever move. + UNIMPLEMENTED(WARNING); +#endif + return reinterpret_cast(mid); + } + + jmethodID EncodeMethod(Method* method) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. +#ifdef MOVING_GARBAGE_COLLECTOR + UNIMPLEMENTED(WARNING); +#endif + return reinterpret_cast(method); + } + + private: + static Thread* ThreadForEnv(JNIEnv* env) { + JNIEnvExt* full_env(reinterpret_cast(env)); + bool work_around_app_jni_bugs = full_env->vm->work_around_app_jni_bugs; + Thread* env_self = full_env->self; + Thread* self = work_around_app_jni_bugs ? Thread::Current() : env_self; + if (!work_around_app_jni_bugs && self != env_self) { + UnexpectedThreads(env_self, self); + } + return self; + } + + static void UnexpectedThreads(Thread* found_self, Thread* expected_self) { + // TODO: pass through function name so we can use it here instead of NULL... + JniAbortF(NULL, "JNIEnv for %s used on %s", + found_self != NULL ? ToStr(*found_self).c_str() : "NULL", + expected_self != NULL ? ToStr(*expected_self).c_str() : "NULL"); + + } + + // The full JNIEnv. + JNIEnvExt* const env_; + // The full JavaVM. + JavaVMExt* const vm_; + + DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked); +}; + +// Annotalysis helping variant of the above. +class ScopedObjectAccess : public ScopedObjectAccessUnchecked { + public: + explicit ScopedObjectAccess(JNIEnv* env) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + : ScopedObjectAccessUnchecked(env) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + } + + explicit ScopedObjectAccess(Thread* self) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + : ScopedObjectAccessUnchecked(self) { + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + } + + ~ScopedObjectAccess() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + // Base class will release share of lock. Invoked after this destructor. + } + + private: + // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that + // routines operating with just a VM are sound, they are not, but when you have just a VM + // you cannot call the unsound routines. + explicit ScopedObjectAccess(JavaVM* vm) + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + : ScopedObjectAccessUnchecked(vm) {} + + friend class ScopedCheck; + DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess); +}; + +} // namespace art + +#endif // ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_ diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc index 919923eb84..156aec6a63 100644 --- a/src/signal_catcher.cc +++ b/src/signal_catcher.cc @@ -30,7 +30,7 @@ #include "heap.h" #include "os.h" #include "runtime.h" -#include "scoped_heap_lock.h" +#include "scoped_thread_state_change.h" #include "signal_set.h" #include "thread.h" #include "thread_list.h" @@ -99,7 +99,7 @@ void SignalCatcher::Output(const std::string& s) { return; } - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); + ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput); int fd = open(stack_trace_file_.c_str(), O_APPEND | O_CREAT | O_WRONLY, 0666); if (fd == -1) { PLOG(ERROR) << "Unable to open stack trace file '" << stack_trace_file_ << "'"; @@ -118,17 +118,27 @@ void SignalCatcher::HandleSigQuit() { Runtime* runtime = Runtime::Current(); ThreadList* thread_list = runtime->GetThreadList(); - // We take the heap lock before suspending all threads so we don't end up in a situation where - // one of the suspended threads suspended via the implicit FullSuspendCheck on the slow path of - // Heap::Lock, which is the only case where a thread can be suspended while holding the heap lock. - // (We need the heap lock when we dump the thread list. We could probably fix this by duplicating - // more state from java.lang.Thread in struct Thread.) - ScopedHeapLock heap_lock; thread_list->SuspendAll(); + // We should exclusively hold the mutator lock, set state to Runnable without a pending + // suspension to avoid giving away or trying re-acquire the mutator lock. + GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Thread* self = Thread::Current(); + ThreadState old_state; + int suspend_count; + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + suspend_count = self->GetSuspendCount(); + if (suspend_count != 0) { + CHECK_EQ(suspend_count, 1); + self->ModifySuspendCount(-1, false); + } + old_state = self->SetState(kRunnable); + } + std::ostringstream os; os << "\n" - << "----- pid " << getpid() << " at " << GetIsoDate() << " -----\n"; + << "----- pid " << getpid() << " at " << GetIsoDate() << " -----\n"; DumpCmdLine(os); @@ -144,7 +154,13 @@ void SignalCatcher::HandleSigQuit() { } os << "----- end " << getpid() << " -----\n"; - + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + self->SetState(old_state); + if (suspend_count != 0) { + self->ModifySuspendCount(+1, false); + } + } thread_list->ResumeAll(); Output(os.str()); @@ -156,7 +172,7 @@ void SignalCatcher::HandleSigUsr1() { } int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) { - ScopedThreadStateChange tsc(self, kVmWait); + ScopedThreadStateChange tsc(self, kWaitingInMainSignalCatcherLoop); // Signals for sigwait() must be blocked but not ignored. We // block signals like SIGQUIT for all threads, so the condition @@ -183,7 +199,6 @@ void* SignalCatcher::Run(void* arg) { runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup()); Thread* self = Thread::Current(); - self->SetState(kRunnable); { MutexLock mu(signal_catcher->lock_); diff --git a/src/signal_catcher.h b/src/signal_catcher.h index 35e035f404..11a2c09382 100644 --- a/src/signal_catcher.h +++ b/src/signal_catcher.h @@ -35,7 +35,10 @@ class SignalCatcher { explicit SignalCatcher(const std::string& stack_trace_file); ~SignalCatcher(); - void HandleSigQuit(); + void HandleSigQuit() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + private: static void* Run(void* arg); diff --git a/src/space.cc b/src/space.cc index 02230e146d..a828d91d8d 100644 --- a/src/space.cc +++ b/src/space.cc @@ -45,7 +45,8 @@ size_t AllocSpace::bitmap_index_ = 0; AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end, size_t growth_limit) - : Space(name, mem_map, begin, end, GCRP_ALWAYS_COLLECT), mspace_(mspace), growth_limit_(growth_limit) { + : Space(name, mem_map, begin, end, GCRP_ALWAYS_COLLECT), lock_("allocation space lock"), + mspace_(mspace), growth_limit_(growth_limit) { CHECK(mspace != NULL); size_t bitmap_index = bitmap_index_++; @@ -155,29 +156,37 @@ void AllocSpace::SwapBitmaps() { mark_bitmap_.reset(temp_live_bitmap); } -Object* AllocSpace::AllocWithoutGrowth(size_t num_bytes) { +Object* AllocSpace::AllocWithoutGrowthLocked(size_t num_bytes) { Object* result = reinterpret_cast(mspace_calloc(mspace_, 1, num_bytes)); #if DEBUG_SPACES if (result != NULL) { CHECK(Contains(result)) << "Allocation (" << reinterpret_cast(result) - << ") not in bounds of heap " << *this; + << ") not in bounds of allocation space " << *this; } #endif return result; } +Object* AllocSpace::AllocWithoutGrowth(size_t num_bytes) { + MutexLock mu(lock_); + return AllocWithoutGrowthLocked(num_bytes); +} + Object* AllocSpace::AllocWithGrowth(size_t num_bytes) { + MutexLock mu(lock_); // Grow as much as possible within the mspace. size_t max_allowed = Capacity(); mspace_set_footprint_limit(mspace_, max_allowed); // Try the allocation. - void* ptr = AllocWithoutGrowth(num_bytes); + void* ptr = AllocWithoutGrowthLocked(num_bytes); // Shrink back down as small as possible. size_t footprint = mspace_footprint(mspace_); mspace_set_footprint_limit(mspace_, footprint); // Return the new allocation or NULL. Object* result = reinterpret_cast(ptr); +#if DEBUG_SPACES CHECK(result == NULL || Contains(result)); +#endif return result; } @@ -228,6 +237,7 @@ AllocSpace* AllocSpace::CreateZygoteSpace() { } void AllocSpace::Free(Object* ptr) { + MutexLock mu(lock_); #if DEBUG_SPACES CHECK(ptr != NULL); CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this; @@ -236,6 +246,7 @@ void AllocSpace::Free(Object* ptr) { } void AllocSpace::FreeList(size_t num_ptrs, Object** ptrs) { + MutexLock mu(lock_); #if DEBUG_SPACES CHECK(ptrs != NULL); size_t num_broken_ptrs = 0; @@ -275,6 +286,7 @@ extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) { } void* AllocSpace::MoreCore(intptr_t increment) { + lock_.AssertHeld(); byte* original_end = end_; if (increment != 0) { VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment); @@ -330,6 +342,7 @@ void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) } void AllocSpace::Trim() { + MutexLock mu(lock_); // Trim to release memory at the end of the space. mspace_trim(mspace_, 0); // Visit space looking for page-sized holes to advise the kernel we don't need. @@ -338,14 +351,17 @@ void AllocSpace::Trim() { void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), void* arg) { + MutexLock mu(lock_); mspace_inspect_all(mspace_, callback, arg); } size_t AllocSpace::GetFootprintLimit() { + MutexLock mu(lock_); return mspace_footprint_limit(mspace_); } void AllocSpace::SetFootprintLimit(size_t new_size) { + MutexLock mu(lock_); VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size); // Compare against the actual footprint, rather than the Size(), because the heap may not have // grown all the way to the allowed size yet. diff --git a/src/space.h b/src/space.h index 1aeb33e5b9..6ab3302c95 100644 --- a/src/space.h +++ b/src/space.h @@ -52,7 +52,8 @@ class Space { byte* requested_begin); // create a Space from an image file. cannot be used for future allocation or collected. - static ImageSpace* CreateImageSpace(const std::string& image); + static ImageSpace* CreateImageSpace(const std::string& image) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); virtual ~Space() {} @@ -122,7 +123,8 @@ class Space { } protected: - Space(const std::string& name, MemMap* mem_map, byte* begin, byte* end, GcRetentionPolicy gc_retention_policy) + Space(const std::string& name, MemMap* mem_map, byte* begin, byte* end, + GcRetentionPolicy gc_retention_policy) : name_(name), mem_map_(mem_map), begin_(begin), @@ -229,6 +231,8 @@ class AllocSpace : public Space { AllocSpace* CreateZygoteSpace(); private: + Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_); + friend class Space; UniquePtr live_bitmap_; @@ -245,6 +249,9 @@ class AllocSpace : public Space { // The boundary tag overhead. static const size_t kChunkOverhead = kWordSize; + // Used to ensure mutual exclusion when the allocation spaces data structures are being modified. + Mutex lock_; + // Underlying malloc space void* const mspace_; @@ -272,7 +279,8 @@ class ImageSpace : public Space { } // Mark the objects defined in this space in the given live bitmap - void RecordImageAllocations(SpaceBitmap* live_bitmap) const; + void RecordImageAllocations(SpaceBitmap* live_bitmap) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); virtual bool IsAllocSpace() const { return false; diff --git a/src/space_bitmap.cc b/src/space_bitmap.cc index 7da8146a14..438237d0e8 100644 --- a/src/space_bitmap.cc +++ b/src/space_bitmap.cc @@ -225,7 +225,8 @@ static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callb // Walk instance fields of the given Class. Separate function to allow recursion on the super // class. static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, - Class* klass, void* arg) { + Class* klass, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Visit fields of parent classes first. Class* super = klass->GetSuperClass(); if (super != NULL) { @@ -249,7 +250,8 @@ static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* call // For an unvisited object, visit it then all its children found via fields. static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, - void* arg) { + void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (visited->Test(obj)) { return; } diff --git a/src/space_bitmap.h b/src/space_bitmap.h index adf1996afe..bbf60f346a 100644 --- a/src/space_bitmap.h +++ b/src/space_bitmap.h @@ -111,7 +111,8 @@ class SpaceBitmap { } template - void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const { + void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { DCHECK_LT(visit_begin, visit_end); const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment; @@ -177,7 +178,8 @@ class SpaceBitmap { void Walk(Callback* callback, void* arg); - void InOrderWalk(Callback* callback, void* arg); + void InOrderWalk(Callback* callback, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void ScanWalk(uintptr_t base, uintptr_t max, ScanCallback* thunk, void* arg); diff --git a/src/stack.cc b/src/stack.cc index dd319bc0af..9795a77cbe 100644 --- a/src/stack.cc +++ b/src/stack.cc @@ -24,37 +24,6 @@ namespace art { -class StackGetter { - public: - StackGetter(const ScopedJniThreadState& ts, Thread* thread) - : ts_(ts), thread_(thread), trace_(NULL) { - } - - static void Callback(void* arg) { - reinterpret_cast(arg)->Callback(); - } - - jobject GetTrace() { - return trace_; - } - - private: - void Callback() { - trace_ = thread_->CreateInternalStackTrace(ts_); - } - - const ScopedJniThreadState& ts_; - Thread* const thread_; - jobject trace_; -}; - -jobject GetThreadStack(const ScopedJniThreadState& ts, Thread* thread) { - ThreadList* thread_list = Runtime::Current()->GetThreadList(); - StackGetter stack_getter(ts, thread); - thread_list->RunWhileSuspended(thread, StackGetter::Callback, &stack_getter); - return stack_getter.GetTrace(); -} - void ManagedStack::PushManagedStackFragment(ManagedStack* fragment) { // Copy this top fragment into given fragment. memcpy(fragment, this, sizeof(ManagedStack)); @@ -201,7 +170,7 @@ size_t StackVisitor::ComputeNumFrames() const { return visitor.frames; } -void StackVisitor::SanityCheckFrame() { +void StackVisitor::SanityCheckFrame() const { #ifndef NDEBUG Method* method = GetMethod(); CHECK(method->GetClass() == Method::GetMethodClass() || diff --git a/src/stack.h b/src/stack.h index fb0bc486b7..578c631141 100644 --- a/src/stack.h +++ b/src/stack.h @@ -32,11 +32,9 @@ class Method; class Object; class ShadowFrame; class StackIndirectReferenceTable; -class ScopedJniThreadState; +class ScopedObjectAccess; class Thread; -jobject GetThreadStack(const ScopedJniThreadState&, Thread*); - class ShadowFrame { public: // Number of references contained within this shadow frame @@ -217,6 +215,7 @@ class StackVisitor { protected: StackVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : stack_start_(stack), trace_stack_(trace_stack), cur_shadow_frame_(NULL), cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0), context_(context) {} @@ -225,9 +224,10 @@ class StackVisitor { virtual ~StackVisitor() {} // Return 'true' if we should continue to visit more frames, 'false' to stop. - virtual bool VisitFrame() = 0; + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) = 0; - void WalkStack(bool include_transitions = false); + void WalkStack(bool include_transitions = false) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); Method* GetMethod() const { if (cur_shadow_frame_ != NULL) { @@ -255,28 +255,30 @@ class StackVisitor { return *reinterpret_cast(save_addr); } - uint32_t GetDexPc() const; + uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns the height of the stack in the managed stack frames, including transitions. - size_t GetFrameHeight() { + size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetNumFrames() - cur_depth_; } // Returns a frame ID for JDWP use, starting from 1. - size_t GetFrameId() { + size_t GetFrameId() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return GetFrameHeight() + 1; } - size_t GetNumFrames() { + size_t GetNumFrames() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (num_frames_ == 0) { num_frames_ = ComputeNumFrames(); } return num_frames_; } - uint32_t GetVReg(Method* m, int vreg) const; + uint32_t GetVReg(Method* m, int vreg) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void SetVReg(Method* m, int vreg, uint32_t new_value); + void SetVReg(Method* m, int vreg, uint32_t new_value) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); uintptr_t GetGPR(uint32_t reg) const; @@ -368,13 +370,13 @@ class StackVisitor { } private: - size_t ComputeNumFrames() const; + size_t ComputeNumFrames() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); TraceStackFrame GetTraceStackFrame(uint32_t depth) const { return trace_stack_->at(trace_stack_->size() - depth - 1); } - void SanityCheckFrame(); + void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); const ManagedStack* const stack_start_; const std::vector* const trace_stack_; diff --git a/src/thread.cc b/src/thread.cc index e5ade4d785..3bae0af0e5 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -40,7 +40,7 @@ #include "reflection.h" #include "runtime.h" #include "runtime_support.h" -#include "scoped_jni_thread_state.h" +#include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "space.h" #include "stack.h" @@ -53,6 +53,7 @@ namespace art { pthread_key_t Thread::pthread_key_self_; +ConditionVariable* Thread::resume_cond_; static const char* kThreadNameDuringStartup = ""; @@ -101,15 +102,10 @@ void* Thread::CreateCallback(void* arg) { Thread* self = reinterpret_cast(arg); self->Init(); - // Wait until it's safe to start running code. (There may have been a suspend-all - // in progress while we were starting up.) - Runtime* runtime = Runtime::Current(); - runtime->GetThreadList()->WaitForGo(); - { - ScopedJniThreadState ts(self); + ScopedObjectAccess soa(self); { - SirtRef thread_name(self->GetThreadName(ts)); + SirtRef thread_name(self->GetThreadName(soa)); self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); } @@ -119,29 +115,37 @@ void* Thread::CreateCallback(void* arg) { CHECK(self->peer_ != NULL); Object* receiver = self->peer_; jmethodID mid = WellKnownClasses::java_lang_Thread_run; - Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(ts.DecodeMethod(mid)); + Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid)); m->Invoke(self, receiver, NULL, NULL); } - // Detach. - runtime->GetThreadList()->Unregister(); + // Detach and delete self. + Runtime::Current()->GetThreadList()->Unregister(self); return NULL; } -static void SetVmData(const ScopedJniThreadState& ts, Object* managed_thread, - Thread* native_thread) { - Field* f = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData); +static void SetVmData(const ScopedObjectAccess& soa, Object* managed_thread, + Thread* native_thread) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); f->SetInt(managed_thread, reinterpret_cast(native_thread)); } -Thread* Thread::FromManagedThread(const ScopedJniThreadState& ts, Object* thread_peer) { - Field* f = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData); - return reinterpret_cast(static_cast(f->GetInt(thread_peer))); +Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object* thread_peer) { + Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); + Thread* result = reinterpret_cast(static_cast(f->GetInt(thread_peer))); + // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ + // to stop it from going away. + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + if (result != NULL && !result->IsSuspended()) { + GlobalSynchronization::thread_list_lock_->AssertHeld(); + } + return result; } -Thread* Thread::FromManagedThread(const ScopedJniThreadState& ts, jobject java_thread) { - return FromManagedThread(ts, ts.Decode(java_thread)); +Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) { + return FromManagedThread(soa, soa.Decode(java_thread)); } static size_t FixStackSize(size_t stack_size) { @@ -210,42 +214,38 @@ static void TearDownAlternateSignalStack() { void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool daemon) { Thread* native_thread = new Thread(daemon); { - ScopedJniThreadState ts(env); - Object* peer = ts.Decode(java_peer); + ScopedObjectAccess soa(env); + Object* peer = soa.Decode(java_peer); CHECK(peer != NULL); native_thread->peer_ = peer; stack_size = FixStackSize(stack_size); - // Thread.start is synchronized, so we know that vmData is 0, - // and know that we're not racing to assign it. - SetVmData(ts, peer, native_thread); - - int pthread_create_result = 0; - { - ScopedThreadStateChange tsc(Thread::Current(), kVmWait); - pthread_t new_pthread; - pthread_attr_t attr; - CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); - CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); - CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); - pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, native_thread); - CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); - } - - if (pthread_create_result != 0) { - // pthread_create(3) failed, so clean up. - SetVmData(ts, peer, 0); - delete native_thread; - - std::string msg(StringPrintf("pthread_create (%s stack) failed: %s", - PrettySize(stack_size).c_str(), strerror(pthread_create_result))); - Thread::Current()->ThrowOutOfMemoryError(msg.c_str()); - return; - } + // Thread.start is synchronized, so we know that vmData is 0, and know that we're not racing to + // assign it. + SetVmData(soa, peer, native_thread); + } + + pthread_t new_pthread; + pthread_attr_t attr; + CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); + CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); + CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); + int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, native_thread); + CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); + + if (UNLIKELY(pthread_create_result != 0)) { + // pthread_create(3) failed, so clean up. + ScopedObjectAccess soa(env); + Object* peer = soa.Decode(java_peer); + SetVmData(soa, peer, 0); + delete native_thread; + + std::string msg(StringPrintf("pthread_create (%s stack) failed: %s", + PrettySize(stack_size).c_str(), strerror(pthread_create_result))); + Thread::Current()->ThrowOutOfMemoryError(msg.c_str()); + return; } - // Let the child know when it's safe to start running. - Runtime::Current()->GetThreadList()->SignalGo(native_thread); } void Thread::Init() { @@ -262,7 +262,9 @@ void Thread::Init() { Runtime* runtime = Runtime::Current(); CHECK(runtime != NULL); - + if (runtime->IsShuttingDown()) { + UNIMPLEMENTED(WARNING) << "Thread attaching whilst runtime is shutting down"; + } thin_lock_id_ = runtime->GetThreadList()->AllocThreadId(); pthread_self_ = pthread_self(); @@ -273,14 +275,18 @@ void Thread::Init() { jni_env_ = new JNIEnvExt(this, runtime->GetJavaVM()); - runtime->GetThreadList()->Register(); + runtime->GetThreadList()->Register(this); } Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group) { Thread* self = new Thread(as_daemon); self->Init(); - self->SetState(kNative); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_NE(self->GetState(), kRunnable); + self->SetState(kNative); + } // If we're the main thread, ClassLinker won't be created until after we're attached, // so that thread needs a two-stage attach. Regular threads don't need this hack. @@ -313,30 +319,33 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) jboolean thread_is_daemon = as_daemon; ScopedLocalRef peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); - peer_ = DecodeJObject(peer.get()); - if (peer_ == NULL) { - CHECK(IsExceptionPending()); - return; + { + ScopedObjectAccess soa(env); + peer_ = DecodeJObject(peer.get()); + if (peer_ == NULL) { + CHECK(IsExceptionPending()); + return; + } } env->CallNonvirtualVoidMethod(peer.get(), WellKnownClasses::java_lang_Thread, WellKnownClasses::java_lang_Thread_init, thread_group, thread_name.get(), thread_priority, thread_is_daemon); - CHECK(!IsExceptionPending()) << " " << PrettyTypeOf(GetException()); + AssertNoPendingException(); - ScopedJniThreadState ts(this); - SetVmData(ts, peer_, Thread::Current()); - SirtRef peer_thread_name(GetThreadName(ts)); + ScopedObjectAccess soa(this); + SetVmData(soa, peer_, Thread::Current()); + SirtRef peer_thread_name(GetThreadName(soa)); if (peer_thread_name.get() == NULL) { // The Thread constructor should have set the Thread.name to a // non-null value. However, because we can run without code // available (in the compiler, in tests), we manually assign the // fields the constructor should have set. - ts.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->SetBoolean(peer_, thread_is_daemon); - ts.DecodeField(WellKnownClasses::java_lang_Thread_group)->SetObject(peer_, ts.Decode(thread_group)); - ts.DecodeField(WellKnownClasses::java_lang_Thread_name)->SetObject(peer_, ts.Decode(thread_name.get())); - ts.DecodeField(WellKnownClasses::java_lang_Thread_priority)->SetInt(peer_, thread_priority); - peer_thread_name.reset(GetThreadName(ts)); + soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->SetBoolean(peer_, thread_is_daemon); + soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->SetObject(peer_, soa.Decode(thread_group)); + soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->SetObject(peer_, soa.Decode(thread_name.get())); + soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->SetInt(peer_, thread_priority); + peer_thread_name.reset(GetThreadName(soa)); } // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. if (peer_thread_name.get() != NULL) { @@ -403,27 +412,27 @@ void Thread::InitStackHwm() { CHECK_GT(&stack_variable, reinterpret_cast(stack_end_)); } -void Thread::Dump(std::ostream& os, bool full) const { - if (full) { - DumpState(os); - DumpStack(os); - } else { - os << "Thread["; - if (GetThinLockId() != 0) { - // If we're in kStarting, we won't have a thin lock id or tid yet. - os << GetThinLockId() - << ",tid=" << GetTid() << ','; - } - os << GetState() - << ",Thread*=" << this - << ",peer=" << peer_ - << ",\"" << *name_ << "\"" - << "]"; +void Thread::ShortDump(std::ostream& os) const { + os << "Thread["; + if (GetThinLockId() != 0) { + // If we're in kStarting, we won't have a thin lock id or tid yet. + os << GetThinLockId() + << ",tid=" << GetTid() << ','; } + os << GetStateUnsafe() + << ",Thread*=" << this + << ",peer=" << peer_ + << ",\"" << *name_ << "\"" + << "]"; } -String* Thread::GetThreadName(const ScopedJniThreadState& ts) const { - Field* f = ts.DecodeField(WellKnownClasses::java_lang_Thread_name); +void Thread::Dump(std::ostream& os) const { + DumpState(os); + DumpStack(os); +} + +String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const { + Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); return (peer_ != NULL) ? reinterpret_cast(f->GetObject(peer_)) : NULL; } @@ -431,19 +440,177 @@ void Thread::GetThreadName(std::string& name) const { name.assign(*name_); } +// Attempt to rectify locks so that we dump thread list with required locks before exiting. +static void UnsafeLogFatalForSuspendCount(Thread* self) NO_THREAD_SAFETY_ANALYSIS { + GlobalSynchronization::thread_suspend_count_lock_->Unlock(); + GlobalSynchronization::mutator_lock_->SharedTryLock(); + if (!GlobalSynchronization::mutator_lock_->IsSharedHeld()) { + LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; + } + GlobalSynchronization::thread_list_lock_->TryLock(); + if (!GlobalSynchronization::thread_list_lock_->IsExclusiveHeld()) { + LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; + } + std::ostringstream ss; + Runtime::Current()->GetThreadList()->DumpLocked(ss); + LOG(FATAL) << self << " suspend count already zero.\n" << ss.str(); +} + +void Thread::ModifySuspendCount(int delta, bool for_debugger) { + DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_) + << delta << " " << debug_suspend_count_ << " " << this; + DCHECK_GE(suspend_count_, debug_suspend_count_) << this; + GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + + if (delta == -1 && suspend_count_ <= 0) { + // This is expected if you attach a thread during a GC. + if (UNLIKELY(!IsStillStarting())) { + UnsafeLogFatalForSuspendCount(this); + } + return; + } + suspend_count_ += delta; + if (for_debugger) { + debug_suspend_count_ += delta; + } +} + +void Thread::FullSuspendCheck() { + VLOG(threads) << this << " self-suspending"; + // Make thread appear suspended to other threads, release mutator_lock_. + TransitionFromRunnableToSuspended(kSuspended); + // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_. + TransitionFromSuspendedToRunnable(); + VLOG(threads) << this << " self-reviving"; +} + +void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { + AssertThreadSuspensionIsAllowable(); + CHECK_NE(new_state, kRunnable); + CHECK_EQ(this, Thread::Current()); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_EQ(GetState(), kRunnable); + SetState(new_state); + } + // Release share on mutator_lock_. + GlobalSynchronization::mutator_lock_->SharedUnlock(); +} + +ThreadState Thread::TransitionFromSuspendedToRunnable() { + bool done = false; + ThreadState old_state; + do { + { + // Wait while our suspend count is non-zero. + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + old_state = GetState(); + CHECK_NE(old_state, kRunnable); + GlobalSynchronization::mutator_lock_->AssertNotHeld(); // Otherwise we starve GC.. + while (GetSuspendCount() != 0) { + // Re-check when Thread::resume_cond_ is notified. + Thread::resume_cond_->Wait(*GlobalSynchronization::thread_suspend_count_lock_); + } + } + // Re-acquire shared mutator_lock_ access. + GlobalSynchronization::mutator_lock_->SharedLock(); + // Holding the mutator_lock_, synchronize with any thread trying to raise the suspend count + // and change state to Runnable if no suspend is pending. + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + if (GetSuspendCount() == 0) { + SetState(kRunnable); + done = true; + } else { + // Release shared mutator_lock_ access and try again. + GlobalSynchronization::mutator_lock_->SharedUnlock(); + } + } while (!done); + return old_state; +} + +Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timeout) { + static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. + useconds_t total_delay_us = 0; + useconds_t delay_us = 0; + bool did_suspend_request = false; + *timeout = false; + while (true) { + Thread* thread; + { + ScopedObjectAccess soa(Thread::Current()); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + thread = Thread::FromManagedThread(soa, peer); + if (thread == NULL) { + LOG(WARNING) << "No such thread for suspend: " << peer; + return NULL; + } + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + if (request_suspension) { + thread->ModifySuspendCount(+1, true /* for_debugger */); + request_suspension = false; + did_suspend_request = true; + } + // IsSuspended on the current thread will fail as the current thread is changed into + // Runnable above. As the suspend count is now raised if this is the current thread + // it will self suspend on transition to Runnable, making it hard to work with. Its simpler + // to just explicitly handle the current thread in the callers to this code. + CHECK_NE(thread, soa.Self()) << "Attempt to suspend for debugger the current thread"; + // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend + // count, or else we've waited and it has self suspended) or is the current thread, we're + // done. + if (thread->IsSuspended()) { + return thread; + } + if (total_delay_us >= kTimeoutUs) { + LOG(ERROR) << "Thread suspension timed out: " << peer; + if (did_suspend_request) { + thread->ModifySuspendCount(-1, true /* for_debugger */); + } + *timeout = true; + return NULL; + } + } + // Release locks and come out of runnable state. + } + for (int i = kMaxMutexLevel; i >= 0; --i) { + BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast(i)); + if (held_mutex != NULL) { + LOG(FATAL) << "Holding " << held_mutex->GetName() + << " while sleeping for thread suspension"; + } + } + { + useconds_t new_delay_us = delay_us * 2; + CHECK_GE(new_delay_us, delay_us); + if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s. + delay_us = new_delay_us; + } + } + if (delay_us == 0) { + sched_yield(); + // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep). + delay_us = 500; + } else { + usleep(delay_us); + total_delay_us += delay_us; + } + } +} + void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { std::string group_name; int priority; bool is_daemon = false; if (thread != NULL && thread->peer_ != NULL) { - ScopedJniThreadState ts(Thread::Current()); - priority = ts.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->peer_); - is_daemon = ts.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->peer_); + ScopedObjectAccess soa(Thread::Current()); + priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->peer_); + is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->peer_); - Object* thread_group = thread->GetThreadGroup(ts); + Object* thread_group = thread->GetThreadGroup(soa); if (thread_group != NULL) { - Field* group_name_field = ts.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); + Field* group_name_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); String* group_name_string = reinterpret_cast(group_name_field->GetObject(thread_group)); group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : ""; } @@ -461,6 +628,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { if (is_daemon) { os << " daemon"; } + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); os << " prio=" << priority << " tid=" << thread->GetThinLockId() << " " << thread->GetState() << "\n"; @@ -471,6 +639,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { } if (thread != NULL) { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); os << " | group=\"" << group_name << "\"" << " sCount=" << thread->suspend_count_ << " dsCount=" << thread->debug_suspend_count_ @@ -520,6 +689,7 @@ void Thread::DumpState(std::ostream& os) const { struct StackDumpVisitor : public StackVisitor { StackDumpVisitor(std::ostream& os, const Thread* thread, Context* context, bool can_allocate) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(thread->GetManagedStack(), thread->GetTraceStack(), context), os(os), thread(thread), can_allocate(can_allocate), last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) { @@ -531,7 +701,7 @@ struct StackDumpVisitor : public StackVisitor { } } - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; @@ -589,7 +759,12 @@ struct StackDumpVisitor : public StackVisitor { void Thread::DumpStack(std::ostream& os) const { // If we're currently in native code, dump that stack before dumping the managed stack. - if (GetState() == kNative || GetState() == kVmWait) { + ThreadState state; + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + state = GetState(); + } + if (state == kNative) { DumpKernelStack(os, GetTid(), " kernel: ", false); DumpNativeStack(os, GetTid(), " native: ", false); } @@ -598,148 +773,6 @@ void Thread::DumpStack(std::ostream& os) const { dumper.WalkStack(); } -void Thread::SetStateWithoutSuspendCheck(ThreadState new_state) { - DCHECK_EQ(this, Thread::Current()); - volatile void* raw = reinterpret_cast(&state_); - volatile int32_t* addr = reinterpret_cast(raw); - android_atomic_release_store(new_state, addr); -} - -ThreadState Thread::SetState(ThreadState new_state) { - if (new_state != kVmWait && new_state != kTerminated) { - // TODO: kVmWait is set by the parent thread to a child thread to indicate it can go. Similarly - // kTerminated may be set by a parent thread to its child if pthread creation fails. This - // overloaded use of the state variable means we cannot fully assert that only threads - // themselves modify their state. - DCHECK_EQ(this, Thread::Current()); - } - ThreadState old_state = state_; - if (old_state == kRunnable) { - // Non-runnable states are points where we expect thread suspension can occur. - AssertThreadSuspensionIsAllowable(); - } - - if (old_state == new_state) { - return old_state; - } - - volatile void* raw = reinterpret_cast(&state_); - volatile int32_t* addr = reinterpret_cast(raw); - - if (new_state == kRunnable) { - /* - * Change our status to kRunnable. The transition requires - * that we check for pending suspension, because the runtime considers - * us to be "asleep" in all other states, and another thread could - * be performing a GC now. - * - * The order of operations is very significant here. One way to - * do this wrong is: - * - * GCing thread Our thread (in kNative) - * ------------ ---------------------- - * check suspend count (== 0) - * SuspendAllThreads() - * grab suspend-count lock - * increment all suspend counts - * release suspend-count lock - * check thread state (== kNative) - * all are suspended, begin GC - * set state to kRunnable - * (continue executing) - * - * We can correct this by grabbing the suspend-count lock and - * performing both of our operations (check suspend count, set - * state) while holding it, now we need to grab a mutex on every - * transition to kRunnable. - * - * What we do instead is change the order of operations so that - * the transition to kRunnable happens first. If we then detect - * that the suspend count is nonzero, we switch to kSuspended. - * - * Appropriate compiler and memory barriers are required to ensure - * that the operations are observed in the expected order. - * - * This does create a small window of opportunity where a GC in - * progress could observe what appears to be a running thread (if - * it happens to look between when we set to kRunnable and when we - * switch to kSuspended). At worst this only affects assertions - * and thread logging. (We could work around it with some sort - * of intermediate "pre-running" state that is generally treated - * as equivalent to running, but that doesn't seem worthwhile.) - * - * We can also solve this by combining the "status" and "suspend - * count" fields into a single 32-bit value. This trades the - * store/load barrier on transition to kRunnable for an atomic RMW - * op on all transitions and all suspend count updates (also, all - * accesses to status or the thread count require bit-fiddling). - * It also eliminates the brief transition through kRunnable when - * the thread is supposed to be suspended. This is possibly faster - * on SMP and slightly more correct, but less convenient. - */ - AssertThreadSuspensionIsAllowable(); - android_atomic_acquire_store(new_state, addr); - ANNOTATE_IGNORE_READS_BEGIN(); - int suspend_count = suspend_count_; - ANNOTATE_IGNORE_READS_END(); - if (suspend_count != 0) { - Runtime::Current()->GetThreadList()->FullSuspendCheck(this); - } - } else { - /* - * Not changing to kRunnable. No additional work required. - * - * We use a releasing store to ensure that, if we were runnable, - * any updates we previously made to objects on the managed heap - * will be observed before the state change. - */ - android_atomic_release_store(new_state, addr); - } - - return old_state; -} - -bool Thread::IsSuspended() { - ANNOTATE_IGNORE_READS_BEGIN(); - int suspend_count = suspend_count_; - ANNOTATE_IGNORE_READS_END(); - return suspend_count != 0 && GetState() != kRunnable; -} - -static void ReportThreadSuspendTimeout(Thread* waiting_thread) { - Runtime* runtime = Runtime::Current(); - std::ostringstream ss; - ss << "Thread suspend timeout waiting for thread " << *waiting_thread << "\n"; - runtime->DumpLockHolders(ss); - ss << "\n"; - runtime->GetThreadList()->DumpLocked(ss); - LOG(FATAL) << ss.str(); -} - -void Thread::WaitUntilSuspended() { - static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. - - useconds_t total_delay = 0; - useconds_t delay = 0; - while (GetState() == kRunnable) { - if (total_delay >= kTimeoutUs) { - ReportThreadSuspendTimeout(this); - } - useconds_t new_delay = delay * 2; - CHECK_GE(new_delay, delay); - delay = new_delay; - if (delay == 0) { - sched_yield(); - // Default to 1 milliseconds (note that this gets multiplied by 2 before - // the first sleep) - delay = 500; - } else { - usleep(delay); - total_delay += delay; - } - } -} - void Thread::ThreadExitCallback(void* arg) { Thread* self = reinterpret_cast(arg); if (self->thread_exit_check_count_ == 0) { @@ -752,6 +785,11 @@ void Thread::ThreadExitCallback(void* arg) { } void Thread::Startup() { + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); // Keep GCC happy. + resume_cond_ = new ConditionVariable("Thread resumption condition variable"); + } + // Allocate a TLS slot. CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key"); @@ -764,13 +802,11 @@ void Thread::Startup() { void Thread::FinishStartup() { Runtime* runtime = Runtime::Current(); CHECK(runtime->IsStarted()); - Thread* self = Thread::Current(); // Finish attaching the main thread. - ScopedThreadStateChange tsc(self, kRunnable); + ScopedObjectAccess soa(Thread::Current()); Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); - InitBoxingMethods(); Runtime::Current()->GetClassLinker()->RunRootClinits(); } @@ -808,6 +844,7 @@ Thread::Thread(bool daemon) trace_stack_(new std::vector), name_(new std::string(kThreadNameDuringStartup)), daemon_(daemon), + pthread_self_(0), no_thread_suspension_(0), last_no_thread_suspension_cause_(NULL), thread_exit_check_count_(0) { @@ -825,36 +862,49 @@ bool Thread::IsStillStarting() const { return (*name_ == kThreadNameDuringStartup); } -static void MonitorExitVisitor(const Object* object, void*) { +void Thread::AssertNoPendingException() const { + if (UNLIKELY(IsExceptionPending())) { + ScopedObjectAccess soa(Thread::Current()); + Throwable* exception = GetException(); + LOG(FATAL) << "No pending exception expected: " << exception->Dump(); + } +} + +static void MonitorExitVisitor(const Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS { + Thread* self = reinterpret_cast(arg); Object* entered_monitor = const_cast(object); - LOG(WARNING) << "Calling MonitorExit on object " << object << " (" << PrettyTypeOf(object) << ")" - << " left locked by native thread " << *Thread::Current() << " which is detaching"; - entered_monitor->MonitorExit(Thread::Current()); + if (self->HoldsLock(entered_monitor)) { + LOG(WARNING) << "Calling MonitorExit on object " + << object << " (" << PrettyTypeOf(object) << ")" + << " left locked by native thread " + << *Thread::Current() << " which is detaching"; + entered_monitor->MonitorExit(self); + } } void Thread::Destroy() { // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. if (jni_env_ != NULL) { - jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL); + jni_env_->monitors.VisitRoots(MonitorExitVisitor, Thread::Current()); } if (peer_ != NULL) { Thread* self = this; // We may need to call user-supplied managed code. - ScopedJniThreadState ts(this); + ScopedObjectAccess soa(this); - HandleUncaughtExceptions(ts); - RemoveFromThreadGroup(ts); + HandleUncaughtExceptions(soa); + RemoveFromThreadGroup(soa); // this.vmData = 0; - SetVmData(ts, peer_, NULL); + SetVmData(soa, peer_, NULL); Dbg::PostThreadDeath(self); // Thread.join() is implemented as an Object.wait() on the Thread.lock // object. Signal anyone who is waiting. - Object* lock = ts.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(peer_); + Object* lock = soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(peer_); // (This conditional is only needed for tests, where Thread.lock won't have been set.) if (lock != NULL) { lock->MonitorEnter(self); @@ -868,7 +918,11 @@ Thread::~Thread() { delete jni_env_; jni_env_ = NULL; - SetState(kTerminated); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_NE(GetState(), kRunnable); + SetState(kTerminated); + } delete wait_cond_; delete wait_mutex_; @@ -884,7 +938,7 @@ Thread::~Thread() { TearDownAlternateSignalStack(); } -void Thread::HandleUncaughtExceptions(const ScopedJniThreadState& ts) { +void Thread::HandleUncaughtExceptions(const ScopedObjectAccess& soa) { if (!IsExceptionPending()) { return; } @@ -894,15 +948,15 @@ void Thread::HandleUncaughtExceptions(const ScopedJniThreadState& ts) { // If the thread has its own handler, use that. Object* handler = - ts.DecodeField(WellKnownClasses::java_lang_Thread_uncaughtHandler)->GetObject(peer_); + soa.DecodeField(WellKnownClasses::java_lang_Thread_uncaughtHandler)->GetObject(peer_); if (handler == NULL) { // Otherwise use the thread group's default handler. - handler = GetThreadGroup(ts); + handler = GetThreadGroup(soa); } // Call the handler. jmethodID mid = WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException; - Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(ts.DecodeMethod(mid)); + Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid)); JValue args[2]; args[0].SetL(peer_); args[1].SetL(exception); @@ -912,17 +966,17 @@ void Thread::HandleUncaughtExceptions(const ScopedJniThreadState& ts) { ClearException(); } -Object* Thread::GetThreadGroup(const ScopedJniThreadState& ts) const { - return ts.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer_); +Object* Thread::GetThreadGroup(const ScopedObjectAccessUnchecked& soa) const { + return soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer_); } -void Thread::RemoveFromThreadGroup(const ScopedJniThreadState& ts) { +void Thread::RemoveFromThreadGroup(const ScopedObjectAccess& soa) { // this.group.removeThread(this); // group can be null if we're in the compiler or a test. - Object* group = GetThreadGroup(ts); + Object* group = GetThreadGroup(soa); if (group != NULL) { jmethodID mid = WellKnownClasses::java_lang_ThreadGroup_removeThread; - Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(ts.DecodeMethod(mid)); + Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid)); JValue args[1]; args[0].SetL(peer_); m->Invoke(this, group, args, NULL); @@ -1023,10 +1077,11 @@ class CountStackDepthVisitor : public StackVisitor { public: CountStackDepthVisitor(const ManagedStack* stack, const std::vector* trace_stack) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), depth_(0), skip_depth_(0), skipping_(true) {} - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) @@ -1067,7 +1122,8 @@ class BuildInternalStackTraceVisitor : public StackVisitor { : StackVisitor(stack, trace_stack, NULL), skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {} - bool Init(int depth, const ScopedJniThreadState& ts) { + bool Init(int depth, const ScopedObjectAccess& soa) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { // Allocate method trace with an extra slot that will hold the PC trace SirtRef > method_trace(Runtime::Current()->GetClassLinker()->AllocObjectArray(depth + 1)); @@ -1083,7 +1139,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { method_trace->Set(depth, dex_pc_trace); // Set the Object*s and assert that no thread suspension is now possible. const char* last_no_suspend_cause = - ts.Self()->StartAssertNoThreadSuspension("Building internal stack trace"); + soa.Self()->StartAssertNoThreadSuspension("Building internal stack trace"); CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause; method_trace_ = method_trace.get(); dex_pc_trace_ = dex_pc_trace; @@ -1096,7 +1152,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { } } - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (method_trace_ == NULL || dex_pc_trace_ == NULL) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } @@ -1141,7 +1197,7 @@ StackIndirectReferenceTable* Thread::PopSirt() { return sirt; } -jobject Thread::CreateInternalStackTrace(const ScopedJniThreadState& ts) const { +jobject Thread::CreateInternalStackTrace(const ScopedObjectAccess& soa) const { // Compute depth of stack CountStackDepthVisitor count_visitor(GetManagedStack(), GetTraceStack()); count_visitor.WalkStack(); @@ -1151,19 +1207,19 @@ jobject Thread::CreateInternalStackTrace(const ScopedJniThreadState& ts) const { // Build internal stack trace BuildInternalStackTraceVisitor build_trace_visitor(GetManagedStack(), GetTraceStack(), skip_depth); - if (!build_trace_visitor.Init(depth, ts)) { + if (!build_trace_visitor.Init(depth, soa)) { return NULL; // Allocation failed } build_trace_visitor.WalkStack(); - return ts.AddLocalReference(build_trace_visitor.GetInternalStackTrace()); + return soa.AddLocalReference(build_trace_visitor.GetInternalStackTrace()); } jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, jobjectArray output_array, int* stack_depth) { // Transition into runnable state to work on Object*/Array* - ScopedJniThreadState ts(env); + ScopedObjectAccess soa(env); // Decode the internal stack trace into the depth, method trace and PC trace - ObjectArray* method_trace = ts.Decode*>(internal); + ObjectArray* method_trace = soa.Decode*>(internal); int32_t depth = method_trace->GetLength() - 1; IntArray* pc_trace = down_cast(method_trace->Get(depth)); @@ -1174,7 +1230,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job if (output_array != NULL) { // Reuse the array we were given. result = output_array; - java_traces = ts.Decode*>(output_array); + java_traces = soa.Decode*>(output_array); // ...adjusting the number of frames we'll write to not exceed the array length. depth = std::min(depth, java_traces->GetLength()); } else { @@ -1183,7 +1239,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job if (java_traces == NULL) { return NULL; } - result = ts.AddLocalReference(java_traces); + result = soa.AddLocalReference(java_traces); } if (stack_depth != NULL) { @@ -1223,8 +1279,8 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job } #ifdef MOVING_GARBAGE_COLLECTOR // Re-read after potential GC - java_traces = Decode*>(ts.Env(), result); - method_trace = down_cast*>(Decode(ts.Env(), internal)); + java_traces = Decode*>(soa.Env(), result); + method_trace = down_cast*>(Decode(soa.Env(), internal)); pc_trace = down_cast(method_trace->Get(depth)); #endif java_traces->Set(i, obj); @@ -1246,7 +1302,7 @@ void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const ch } void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) { - CHECK(!IsExceptionPending()); // Callers should either clear or call ThrowNewWrappedException. + AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException. ThrowNewWrappedException(exception_class_descriptor, msg); } @@ -1276,10 +1332,10 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, co ScopedLocalRef exception( env, reinterpret_cast(env->AllocObject(exception_class.get()))); if (exception.get() != NULL) { - ScopedJniThreadState ts(env); - Throwable* t = reinterpret_cast(ts.Self()->DecodeJObject(exception.get())); + ScopedObjectAccessUnchecked soa(env); + Throwable* t = reinterpret_cast(soa.Self()->DecodeJObject(exception.get())); t->SetDetailMessage(String::AllocFromModifiedUtf8(msg)); - ts.Self()->SetException(t); + soa.Self()->SetException(t); } else { LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI AllocObject failed: " << PrettyTypeOf(GetException()); @@ -1358,8 +1414,13 @@ static const EntryPointInfo gThreadEntryPointInfo[] = { ENTRY_POINT_INFO(pGetObjInstance), ENTRY_POINT_INFO(pGetObjStatic), ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), - ENTRY_POINT_INFO(pDecodeJObjectInThread), ENTRY_POINT_INFO(pFindNativeMethod), + ENTRY_POINT_INFO(pJniMethodStart), + ENTRY_POINT_INFO(pJniMethodStartSynchronized), + ENTRY_POINT_INFO(pJniMethodEnd), + ENTRY_POINT_INFO(pJniMethodEndSynchronized), + ENTRY_POINT_INFO(pJniMethodEndWithReference), + ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), ENTRY_POINT_INFO(pLockObjectFromCode), ENTRY_POINT_INFO(pUnlockObjectFromCode), ENTRY_POINT_INFO(pCmpgDouble), @@ -1452,6 +1513,7 @@ static const bool kDebugExceptionDelivery = false; class CatchBlockStackVisitor : public StackVisitor { public: CatchBlockStackVisitor(Thread* self, Throwable* exception) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(self->GetManagedStack(), self->GetTraceStack(), self->GetLongJumpContext()), self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL), throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL), @@ -1465,7 +1527,8 @@ class CatchBlockStackVisitor : public StackVisitor { LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. } - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* method = GetMethod(); if (method == NULL) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. @@ -1507,7 +1570,7 @@ class CatchBlockStackVisitor : public StackVisitor { return true; // Continue stack walk. } - void DoLongJump() { + void DoLongJump() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* catch_method = *handler_quick_frame_; Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_, catch_method, handler_dex_pc_, exception_); @@ -1587,9 +1650,10 @@ Method* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const { struct CurrentMethodVisitor : public StackVisitor { CurrentMethodVisitor(const ManagedStack* stack, const std::vector* trace_stack) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {} - virtual bool VisitFrame() { + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. @@ -1627,9 +1691,10 @@ class ReferenceMapVisitor : public StackVisitor { public: ReferenceMapVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, Heap::RootVisitor* root_visitor, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, context), root_visitor_(root_visitor), arg_(arg) {} - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) << StringPrintf("@ PC:%04x", GetDexPc()); @@ -1739,51 +1804,42 @@ void Thread::VerifyStack() { } #endif +// Set the stack end to that to be used during a stack overflow +void Thread::SetStackEndForStackOverflow() { + // During stack overflow we allow use of the full stack + if (stack_end_ == stack_begin_) { + DumpStack(std::cerr); + LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently " + << kStackOverflowReservedBytes << ")"; + } + + stack_end_ = stack_begin_; +} + std::ostream& operator<<(std::ostream& os, const Thread& thread) { - thread.Dump(os, false); + thread.ShortDump(os); return os; } -void Thread::CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking) { - if (this == NULL) { - CHECK(Runtime::Current()->IsShuttingDown()); - return; - } - if (is_locking) { - if (held_mutexes_[rank] == 0) { - bool bad_mutexes_held = false; - for (int i = kMaxMutexRank; i > rank; --i) { - if (held_mutexes_[i] != 0) { - LOG(ERROR) << "holding " << static_cast(i) << " while " << (is_locking ? "locking" : "unlocking") << " " << rank; +#ifndef NDEBUG +void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const { + CHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_; + if (check_locks) { + bool bad_mutexes_held = false; + for (int i = kMaxMutexLevel; i >= 0; --i) { + // We expect no locks except the mutator_lock_. + if (i != kMutatorLock) { + BaseMutex* held_mutex = GetHeldMutex(static_cast(i)); + if (held_mutex != NULL) { + LOG(ERROR) << "holding \"" << held_mutex->GetName() + << "\" at point where thread suspension is expected"; bad_mutexes_held = true; } } - CHECK(!bad_mutexes_held) << rank; } - ++held_mutexes_[rank]; - } else { - CHECK_GT(held_mutexes_[rank], 0U) << rank; - --held_mutexes_[rank]; + CHECK(!bad_mutexes_held); } } - -void Thread::CheckSafeToWait(MutexRank rank) { - if (this == NULL) { - CHECK(Runtime::Current()->IsShuttingDown()); - return; - } - bool bad_mutexes_held = false; - for (int i = kMaxMutexRank; i >= 0; --i) { - if (i != rank && held_mutexes_[i] != 0) { - LOG(ERROR) << "holding " << static_cast(i) << " while doing condition variable wait on " << rank; - bad_mutexes_held = true; - } - } - if (held_mutexes_[rank] == 0) { - LOG(ERROR) << "*not* holding " << rank << " while doing condition variable wait on it"; - bad_mutexes_held = true; - } - CHECK(!bad_mutexes_held); -} +#endif } // namespace art diff --git a/src/thread.h b/src/thread.h index 155c980603..48278d8dbc 100644 --- a/src/thread.h +++ b/src/thread.h @@ -51,6 +51,8 @@ class Method; class Monitor; class Object; class Runtime; +class ScopedObjectAccess; +class ScopedObjectAccessUnchecked; class ShadowFrame; class StackIndirectReferenceTable; class StackTraceElement; @@ -72,15 +74,23 @@ enum ThreadPriority { }; enum ThreadState { - kTerminated = 0, // Thread.TERMINATED JDWP TS_ZOMBIE - kRunnable = 1, // Thread.RUNNABLE JDWP TS_RUNNING - kTimedWaiting = 2, // Thread.TIMED_WAITING JDWP TS_WAIT - in Object.wait() with a timeout - kBlocked = 3, // Thread.BLOCKED JDWP TS_MONITOR - blocked on a monitor - kWaiting = 4, // Thread.WAITING JDWP TS_WAIT - in Object.wait() - kStarting = 5, // Thread.NEW - native thread started, not yet ready to run managed code - kNative = 6, // - running in a JNI native method - kVmWait = 7, // - waiting on an internal runtime resource - kSuspended = 8, // - suspended by GC or debugger + kTerminated = 0, // Thread.TERMINATED JDWP TS_ZOMBIE + kRunnable = 1, // Thread.RUNNABLE JDWP TS_RUNNING + kTimedWaiting = 2, // Thread.TIMED_WAITING JDWP TS_WAIT - in Object.wait() with a timeout + kBlocked = 3, // Thread.BLOCKED JDWP TS_MONITOR - blocked on a monitor + kWaiting = 4, // Thread.WAITING JDWP TS_WAIT - in Object.wait() + kWaitingForGcToComplete = 5, // Thread.WAITING JDWP TS_WAIT - blocked waiting for GC + kWaitingPerformingGc = 6, // Thread.WAITING JDWP TS_WAIT - performing GC + kWaitingForDebuggerSend = 7, // Thread.WAITING JDWP TS_WAIT - blocked waiting for events to be sent + kWaitingForDebuggerToAttach = 8, // Thread.WAITING JDWP TS_WAIT - blocked waiting for debugger to attach + kWaitingInMainDebuggerLoop = 9, // Thread.WAITING JDWP TS_WAIT - blocking/reading/processing debugger events + kWaitingForDebuggerSuspension = 10, // Thread.WAITING JDWP TS_WAIT - waiting for debugger suspend all + kWaitingForJniOnLoad = 11, // Thread.WAITING JDWP TS_WAIT - waiting for execution of dlopen and JNI on load code + kWaitingForSignalCatcherOutput = 12, // Thread.WAITING JDWP TS_WAIT - waiting for signal catcher IO to complete + kWaitingInMainSignalCatcherLoop = 13, // Thread.WAITING JDWP TS_WAIT - blocking/reading/processing signals + kStarting = 14, // Thread.NEW JDWP TS_WAIT - native thread started, not yet ready to run managed code + kNative = 15, // Thread.RUNNABLE JDWP TS_RUNNING - running in a JNI native method + kSuspended = 16, // Thread.RUNNABLE JDWP TS_RUNNING - suspended by GC or debugger }; class PACKED Thread { @@ -110,34 +120,90 @@ class PACKED Thread { return reinterpret_cast(thread); } - static Thread* FromManagedThread(const ScopedJniThreadState& ts, Object* thread_peer); - static Thread* FromManagedThread(const ScopedJniThreadState& ts, jobject thread); + static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Translates 172 to pAllocArrayFromCode and so on. static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers); - // When full == true, dumps the detailed thread state and the thread stack (used for SIGQUIT). - // When full == false, dumps a one-line summary of thread state (used for operator<<). - void Dump(std::ostream& os, bool full = true) const; + // Dumps a one-line summary of thread state (used for operator<<). + void ShortDump(std::ostream& os) const; + + // Dumps the detailed thread state and the thread stack (used for SIGQUIT). + void Dump(std::ostream& os) const + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which // case we use 'tid' to identify the thread, and we'll include as much information as we can. - static void DumpState(std::ostream& os, const Thread* thread, pid_t tid); + static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); - ThreadState GetState() const { + ThreadState GetState() const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); return state_; } - ThreadState SetState(ThreadState new_state); - void SetStateWithoutSuspendCheck(ThreadState new_state); + ThreadState SetState(ThreadState new_state) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + ThreadState old_state = state_; + if (new_state == kRunnable) { + // Sanity, should never become runnable with a pending suspension and should always hold + // share of mutator_lock_. + CHECK_EQ(GetSuspendCount(), 0); + GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + } + state_ = new_state; + return old_state; + } - bool IsDaemon() const { - return daemon_; + int GetSuspendCount() const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + return suspend_count_; } - bool IsSuspended(); + int GetDebugSuspendCount() const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + return debug_suspend_count_; + } + + bool IsSuspended() const + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + int suspend_count = GetSuspendCount(); + return suspend_count != 0 && GetState() != kRunnable; + } + + void ModifySuspendCount(int delta, bool for_debugger) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_); + + // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of + // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. + void FullSuspendCheck() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + + // Transition from non-runnable to runnable state acquiring share on mutator_lock_. + ThreadState TransitionFromSuspendedToRunnable() + SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + + // Transition from runnable into a state where mutator privileges are denied. Releases share of + // mutator lock. + void TransitionFromRunnableToSuspended(ThreadState new_state) + UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_); - void WaitUntilSuspended(); + // Wait for a debugger suspension on the thread associated with the given peer. Returns the + // thread on success, else NULL. If the thread should be suspended then request_suspension should + // be true on entry. If the suspension times out then *timeout is set to true. + static Thread* SuspendForDebugger(jobject peer, bool request_suspension, bool* timeout) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); // Once called thread suspension will cause an assertion failure. #ifndef NDEBUG @@ -168,9 +234,14 @@ class PACKED Thread { } #endif - void AssertThreadSuspensionIsAllowable() const { - DCHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_; + +#ifndef NDEBUG + void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; +#else + void AssertThreadSuspensionIsAllowable(bool check_locks = true) const { + check_locks = !check_locks; // Keep GCC happy about unused parameters. } +#endif bool CanAccessDirectReferences() const { #ifdef MOVING_GARBAGE_COLLECTOR @@ -179,6 +250,10 @@ class PACKED Thread { return true; } + bool IsDaemon() const { + return daemon_; + } + bool HoldsLock(Object*); /* @@ -206,41 +281,46 @@ class PACKED Thread { } // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. - String* GetThreadName(const ScopedJniThreadState& ts) const; + String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. void GetThreadName(std::string& name) const; // Sets the thread's name. - void SetThreadName(const char* name); + void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Object* GetPeer() const { + Object* GetPeer() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { return peer_; } - Object* GetThreadGroup(const ScopedJniThreadState& ts) const; + bool HasPeer() const { + return peer_ != NULL; + } + + Object* GetThreadGroup(const ScopedObjectAccessUnchecked& ts) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); RuntimeStats* GetStats() { return &stats_; } - int GetSuspendCount() const { - return suspend_count_; - } - bool IsStillStarting() const; bool IsExceptionPending() const { return exception_ != NULL; } - Throwable* GetException() const { + Throwable* GetException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(CanAccessDirectReferences()); return exception_; } - void SetException(Throwable* new_exception) { + void AssertNoPendingException() const; + + void SetException(Throwable* new_exception) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { DCHECK(CanAccessDirectReferences()); CHECK(new_exception != NULL); // TODO: CHECK(exception_ == NULL); @@ -252,7 +332,7 @@ class PACKED Thread { } // Find catch block and perform long jump to appropriate exception handle - void DeliverException(); + void DeliverException() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); Context* GetLongJumpContext(); void ReleaseLongJumpContext(Context* context) { @@ -260,7 +340,8 @@ class PACKED Thread { long_jump_context_ = context; } - Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const; + Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void SetTopOfStack(void* stack, uintptr_t pc) { Method** top_method = reinterpret_cast(stack); @@ -273,20 +354,25 @@ class PACKED Thread { } // If 'msg' is NULL, no detail message is set. - void ThrowNewException(const char* exception_class_descriptor, const char* msg); + void ThrowNewException(const char* exception_class_descriptor, const char* msg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be // used as the new exception's cause. - void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg); + void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) - __attribute__((format(printf, 3, 4))); + __attribute__((format(printf, 3, 4))) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap); + void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // OutOfMemoryError is special, because we need to pre-allocate an instance. // Only the GC should call this. - void ThrowOutOfMemoryError(const char* msg); + void ThrowOutOfMemoryError(const char* msg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc); @@ -305,7 +391,10 @@ class PACKED Thread { } // Convert a jobject into a Object* - Object* DecodeJObject(jobject obj); + Object* DecodeJObject(jobject obj) + LOCKS_EXCLUDED(JavaVMExt::globals_lock, + JavaVMExt::weak_globals_lock) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Implements java.lang.Thread.interrupted. bool Interrupted() { @@ -335,9 +424,9 @@ class PACKED Thread { NotifyLocked(); } - ClassLoader* GetClassLoaderOverride() { - // TODO: need to place the class_loader_override_ in a handle - // DCHECK(CanAccessDirectReferences()); + ClassLoader* GetClassLoaderOverride() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DCHECK(CanAccessDirectReferences()); return class_loader_override_; } @@ -347,7 +436,8 @@ class PACKED Thread { // Create the internal representation of a stack trace, that is more time // and space efficient to compute than the StackTraceElement[] - jobject CreateInternalStackTrace(const ScopedJniThreadState& ts) const; + jobject CreateInternalStackTrace(const ScopedObjectAccess& soa) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many @@ -356,7 +446,8 @@ class PACKED Thread { static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, jobjectArray output_array = NULL, int* stack_depth = NULL); - void VisitRoots(Heap::RootVisitor* visitor, void* arg); + void VisitRoots(Heap::RootVisitor* visitor, void* arg) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); #if VERIFY_OBJECT_ENABLED void VerifyStack(); @@ -398,16 +489,7 @@ class PACKED Thread { } // Set the stack end to that to be used during a stack overflow - void SetStackEndForStackOverflow() { - // During stack overflow we allow use of the full stack - if (stack_end_ == stack_begin_) { - DumpStack(std::cerr); - LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently " - << kStackOverflowReservedBytes << ")"; - } - - stack_end_ = stack_begin_; - } + void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Set the stack end to that to be used during regular execution void ResetDefaultStackEnd() { @@ -508,8 +590,13 @@ class PACKED Thread { return frame; } - void CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking); - void CheckSafeToWait(MutexRank rank); + BaseMutex* GetHeldMutex(MutexLevel level) const { + return held_mutexes_[level]; + } + + void SetHeldMutex(MutexLevel level, BaseMutex* mutex) { + held_mutexes_[level] = mutex; + } private: // We have no control over the size of 'bool', but want our boolean fields @@ -517,24 +604,36 @@ class PACKED Thread { typedef uint32_t bool32_t; explicit Thread(bool daemon); - ~Thread(); + ~Thread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_suspend_count_lock_); void Destroy(); friend class ThreadList; // For ~Thread and Destroy. void CreatePeer(const char* name, bool as_daemon, jobject thread_group); friend class Runtime; // For CreatePeer. + // TODO: remove, callers should use GetState and hold the appropriate locks. Used only by + // ShortDump. + ThreadState GetStateUnsafe() const NO_THREAD_SAFETY_ANALYSIS { + return state_; + } + void DumpState(std::ostream& os) const; - void DumpStack(std::ostream& os) const; + void DumpStack(std::ostream& os) const + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Out-of-line conveniences for debugging in gdb. static Thread* CurrentFromGdb(); // Like Thread::Current. - void DumpFromGdb() const; // Like Thread::Dump(std::cerr). + // Like Thread::Dump(std::cerr). + void DumpFromGdb() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void* CreateCallback(void* arg); - void HandleUncaughtExceptions(const ScopedJniThreadState& ts); - void RemoveFromThreadGroup(const ScopedJniThreadState& ts); + void HandleUncaughtExceptions(const ScopedObjectAccess& soa) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void RemoveFromThreadGroup(const ScopedObjectAccess& soa) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void Init(); void InitCardTable(); @@ -544,7 +643,7 @@ class PACKED Thread { void InitPthreadKeySelf(); void InitStackHwm(); - void NotifyLocked() { + void NotifyLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { if (wait_monitor_ != NULL) { wait_cond_->Signal(); } @@ -555,11 +654,16 @@ class PACKED Thread { // TLS key used to retrieve the Thread*. static pthread_key_t pthread_key_self_; + // Used to notify threads that they should attempt to resume, they will suspend again if + // their suspend count is > 0. + static ConditionVariable* resume_cond_ + GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + // --- Frequently accessed fields first for short offsets --- // A non-zero value is used to tell the current thread to enter a safe point // at the next poll. - int suspend_count_; + int suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); // The biased card table, see CardTable for details byte* card_table_; @@ -582,7 +686,7 @@ class PACKED Thread { // is hard. This field can be read off of Thread::Current to give the address. Thread* self_; - volatile ThreadState state_; + volatile ThreadState state_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); // Our managed peer (an instance of java.lang.Thread). Object* peer_; @@ -604,12 +708,12 @@ class PACKED Thread { pid_t tid_; // Guards the 'interrupted_' and 'wait_monitor_' members. - mutable Mutex* wait_mutex_; - ConditionVariable* wait_cond_; - // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_. - Monitor* wait_monitor_; - // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_. - bool32_t interrupted_; + mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER; + ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_); + // Pointer to the monitor lock we're currently waiting on (or NULL). + Monitor* wait_monitor_ GUARDED_BY(wait_mutex_); + // Thread "interrupted" status; stays raised until queried or thrown. + bool32_t interrupted_ GUARDED_BY(wait_mutex_); // The next thread in the wait set this thread is part of. Thread* wait_next_; // If we're blocked in MonitorEnter, this is the object we're trying to lock. @@ -636,7 +740,7 @@ class PACKED Thread { // How much of 'suspend_count_' is by request of the debugger, used to set things right // when the debugger detaches. Must be <= suspend_count_. - int debug_suspend_count_; + int debug_suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); // JDWP invoke-during-breakpoint support. DebugInvokeReq* debug_invoke_req_; @@ -654,8 +758,8 @@ class PACKED Thread { // A cached pthread_t for the pthread underlying this Thread*. pthread_t pthread_self_; - // Mutexes held by this thread, see CheckSafeToLockOrUnlock. - uint32_t held_mutexes_[kMaxMutexRank + 1]; + // Support for Mutex lock hierarchy bug detection. + BaseMutex* held_mutexes_[kMaxMutexLevel + 1]; // A positive value implies we're in a region where thread suspension isn't expected. uint32_t no_thread_suspension_; @@ -672,39 +776,12 @@ class PACKED Thread { // How many times has our pthread key's destructor been called? uint32_t thread_exit_check_count_; - friend class ScopedThreadListLockReleaser; DISALLOW_COPY_AND_ASSIGN(Thread); }; std::ostream& operator<<(std::ostream& os, const Thread& thread); std::ostream& operator<<(std::ostream& os, const ThreadState& state); -class ScopedThreadStateChange { - public: - ScopedThreadStateChange(Thread* thread, ThreadState new_state) : thread_(thread) { - if (thread_ == NULL) { - // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL. - old_thread_state_ = kTerminated; - CHECK(Runtime::Current()->IsShuttingDown()); - return; - } - old_thread_state_ = thread_->SetState(new_state); - } - - ~ScopedThreadStateChange() { - if (thread_ == NULL) { - CHECK(Runtime::Current()->IsShuttingDown()); - return; - } - thread_->SetState(old_thread_state_); - } - - private: - Thread* thread_; - ThreadState old_thread_state_; - DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange); -}; - } // namespace art #endif // ART_SRC_THREAD_H_ diff --git a/src/thread_list.cc b/src/thread_list.cc index ae744208bd..0bd587ca7b 100644 --- a/src/thread_list.cc +++ b/src/thread_list.cc @@ -21,8 +21,6 @@ #include #include "debugger.h" -#include "scoped_heap_lock.h" -#include "scoped_thread_list_lock.h" #include "timing_logger.h" #include "utils.h" @@ -30,11 +28,8 @@ namespace art { ThreadList::ThreadList() : allocated_ids_lock_("allocated thread ids lock"), - thread_list_lock_("thread list lock", kThreadListLock), - thread_start_cond_("thread start condition variable"), - thread_exit_cond_("thread exit condition variable"), - thread_suspend_count_lock_("thread suspend count lock", kThreadSuspendCountLock), - thread_suspend_count_cond_("thread suspend count condition variable") { + suspend_all_count_(0), debug_suspend_all_count_(0), + thread_exit_cond_("thread exit condition variable") { } ThreadList::~ThreadList() { @@ -46,6 +41,8 @@ ThreadList::~ThreadList() { } WaitForOtherNonDaemonThreadsToExit(); + // TODO: there's an unaddressed race here where a thread may attach during shutdown, see + // Thread::Init. SuspendAllDaemonThreads(); } @@ -63,12 +60,14 @@ bool ThreadList::Contains(pid_t tid) { } pid_t ThreadList::GetLockOwner() { - return thread_list_lock_.GetOwner(); + return GlobalSynchronization::thread_list_lock_->GetExclusiveOwnerTid(); } void ThreadList::DumpForSigQuit(std::ostream& os) { - ScopedThreadListLock thread_list_lock; - DumpLocked(os); + { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + DumpLocked(os); + } DumpUnattachedThreads(os); } @@ -94,14 +93,22 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) { while (!readdir_r(d, &de, &e) && e != NULL) { char* end; pid_t tid = strtol(de.d_name, &end, 10); - if (!*end && !Contains(tid)) { - DumpUnattachedThread(os, tid); + if (!*end) { + bool contains; + { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + contains = Contains(tid); + } + if (!contains) { + DumpUnattachedThread(os, tid); + } } } closedir(d); } void ThreadList::DumpLocked(std::ostream& os) { + GlobalSynchronization::thread_list_lock_->AssertHeld(); os << "DALVIK THREADS (" << list_.size() << "):\n"; for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->Dump(os); @@ -109,119 +116,171 @@ void ThreadList::DumpLocked(std::ostream& os) { } } -void ThreadList::ModifySuspendCount(Thread* thread, int delta, bool for_debugger) { -#ifndef NDEBUG - DCHECK(delta == -1 || delta == +1 || delta == -thread->debug_suspend_count_) - << delta << " " << thread->debug_suspend_count_ << " " << *thread; - DCHECK_GE(thread->suspend_count_, thread->debug_suspend_count_) << *thread; -#endif - if (delta == -1 && thread->suspend_count_ <= 0) { - // This is expected if you attach a thread during a GC. - if (UNLIKELY(!thread->IsStillStarting())) { - std::ostringstream ss; - Runtime::Current()->GetThreadList()->DumpLocked(ss); - LOG(FATAL) << *thread << " suspend count already zero.\n" << ss.str(); - } - return; - } - thread->suspend_count_ += delta; - if (for_debugger) { - thread->debug_suspend_count_ += delta; +void ThreadList::AssertThreadsAreSuspended() { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + for (It it = list_.begin(), end = list_.end(); it != end; ++it) { + Thread* thread = *it; + CHECK_NE(thread->GetState(), kRunnable); } } -void ThreadList::FullSuspendCheck(Thread* thread) { - CHECK(thread != NULL); - CHECK_GE(thread->suspend_count_, 0); +// Attempt to rectify locks so that we dump thread list with required locks before exiting. +static void UnsafeLogFatalForThreadSuspendAllTimeout() NO_THREAD_SAFETY_ANALYSIS { + Runtime* runtime = Runtime::Current(); + std::ostringstream ss; + ss << "Thread suspend timeout\n"; + runtime->DumpLockHolders(ss); + ss << "\n"; + GlobalSynchronization::mutator_lock_->SharedTryLock(); + if (!GlobalSynchronization::mutator_lock_->IsSharedHeld()) { + LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; + } + GlobalSynchronization::thread_list_lock_->TryLock(); + if (!GlobalSynchronization::thread_list_lock_->IsExclusiveHeld()) { + LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; + } + runtime->GetThreadList()->DumpLocked(ss); + LOG(FATAL) << ss.str(); +} - MutexLock mu(thread_suspend_count_lock_); - if (thread->suspend_count_ == 0) { - return; - } +void ThreadList::SuspendAll() { + Thread* self = Thread::Current(); - VLOG(threads) << *thread << " self-suspending"; + VLOG(threads) << *self << " SuspendAll starting..."; + + if (kIsDebugBuild) { + GlobalSynchronization::mutator_lock_->AssertNotHeld(); + GlobalSynchronization::thread_list_lock_->AssertNotHeld(); + GlobalSynchronization::thread_suspend_count_lock_->AssertNotHeld(); + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK_NE(self->GetState(), kRunnable); + } { - ScopedThreadStateChange tsc(thread, kSuspended); - while (thread->suspend_count_ != 0) { - /* - * Wait for wakeup signal, releasing lock. The act of releasing - * and re-acquiring the lock provides the memory barriers we - * need for correct behavior on SMP. - */ - thread_suspend_count_cond_.Wait(thread_suspend_count_lock_); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + { + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + // Update global suspend all state for attaching threads. + ++suspend_all_count_; + // Increment everybody's suspend count (except our own). + for (It it = list_.begin(), end = list_.end(); it != end; ++it) { + Thread* thread = *it; + if (thread == self) { + continue; + } + VLOG(threads) << "requesting thread suspend: " << *thread; + thread->ModifySuspendCount(+1, false); + } } - CHECK_EQ(thread->suspend_count_, 0); } - VLOG(threads) << *thread << " self-reviving"; + + // Block on the mutator lock until all Runnable threads release their share of access. Timeout + // if we wait more than 30 seconds. + timespec timeout; + clock_gettime(CLOCK_REALTIME, &timeout); + timeout.tv_sec += 30; + if (UNLIKELY(!GlobalSynchronization::mutator_lock_->ExclusiveLockWithTimeout(timeout))) { + UnsafeLogFatalForThreadSuspendAllTimeout(); + } + + // Debug check that all threads are suspended. + AssertThreadsAreSuspended(); + + VLOG(threads) << *self << " SuspendAll complete"; } -void ThreadList::SuspendAll(bool for_debugger) { +void ThreadList::ResumeAll() { Thread* self = Thread::Current(); - VLOG(threads) << *self << " SuspendAll starting..." << (for_debugger ? " (debugger)" : ""); - - CHECK_EQ(self->GetState(), kRunnable); - ScopedThreadListLock thread_list_lock; - Thread* debug_thread = Dbg::GetDebugThread(); + VLOG(threads) << *self << " ResumeAll starting"; { - // Increment everybody's suspend count (except our own). - MutexLock mu(thread_suspend_count_lock_); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + // Update global suspend all state for attaching threads. + --suspend_all_count_; + // Decrement the suspend counts for all threads. for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; - if (thread == self || (for_debugger && thread == debug_thread)) { + if (thread == self) { continue; } - VLOG(threads) << "requesting thread suspend: " << *thread; - ModifySuspendCount(thread, +1, for_debugger); + thread->ModifySuspendCount(-1, false); } + + // Broadcast a notification to all suspended threads, some or all of + // which may choose to wake up. No need to wait for them. + VLOG(threads) << *self << " ResumeAll waking others"; + Thread::resume_cond_->Broadcast(); } + GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + VLOG(threads) << *self << " ResumeAll complete"; +} - /* - * Wait for everybody in kRunnable state to stop. Other states - * indicate the code is either running natively or sleeping quietly. - * Any attempt to transition back to kRunnable will cause a check - * for suspension, so it should be impossible for anything to execute - * interpreted code or modify objects (assuming native code plays nicely). - * - * It's also okay if the thread transitions to a non-kRunnable state. - * - * Note we released the thread_suspend_count_lock_ before getting here, - * so if another thread is fiddling with its suspend count (perhaps - * self-suspending for the debugger) it won't block while we're waiting - * in here. - */ - for (It it = list_.begin(), end = list_.end(); it != end; ++it) { - Thread* thread = *it; - if (thread == self || (for_debugger && thread == debug_thread)) { - continue; +void ThreadList::Resume(Thread* thread, bool for_debugger) { + DCHECK(thread != Thread::Current()); + VLOG(threads) << "Resume(" << *thread << ") starting..." << (for_debugger ? " (debugger)" : ""); + + { + // To check Contains. + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + // To check IsSuspended. + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + CHECK(thread->IsSuspended()); + if (!Contains(thread)) { + return; } - thread->WaitUntilSuspended(); - VLOG(threads) << "thread suspended: " << *thread; + thread->ModifySuspendCount(-1, for_debugger); } - VLOG(threads) << *self << " SuspendAll complete"; -} + { + VLOG(threads) << "Resume(" << *thread << ") waking others"; + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + Thread::resume_cond_->Broadcast(); + } -void ThreadList::Suspend(Thread* thread, bool for_debugger) { - DCHECK(thread != Thread::Current()); - thread_list_lock_.AssertHeld(); + VLOG(threads) << "Resume(" << *thread << ") complete"; +} - // TODO: add another thread_suspend_lock_ to avoid GC/debugger races. +void ThreadList::SuspendAllForDebugger() { + Thread* self = Thread::Current(); + Thread* debug_thread = Dbg::GetDebugThread(); - VLOG(threads) << "Suspend(" << *thread << ") starting..." << (for_debugger ? " (debugger)" : ""); + VLOG(threads) << *self << " SuspendAllForDebugger starting..."; - if (!Contains(thread)) { - return; + { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + { + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + // Update global suspend all state for attaching threads. + ++suspend_all_count_; + ++debug_suspend_all_count_; + // Increment everybody's suspend count (except our own). + for (It it = list_.begin(), end = list_.end(); it != end; ++it) { + Thread* thread = *it; + if (thread == self || thread == debug_thread) { + continue; + } + VLOG(threads) << "requesting thread suspend: " << *thread; + thread->ModifySuspendCount(+1, true); + } + } } - { - MutexLock mu(thread_suspend_count_lock_); - ModifySuspendCount(thread, +1, for_debugger); + // Block on the mutator lock until all Runnable threads release their share of access. Timeout + // if we wait more than 30 seconds. + timespec timeout; + clock_gettime(CLOCK_REALTIME, &timeout); + timeout.tv_sec += 30; + if (!GlobalSynchronization::mutator_lock_->ExclusiveLockWithTimeout(timeout)) { + UnsafeLogFatalForThreadSuspendAllTimeout(); + } else { + // Debugger suspends all threads but doesn't hold onto the mutator_lock_. + GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); } - thread->WaitUntilSuspended(); + AssertThreadsAreSuspended(); - VLOG(threads) << "Suspend(" << *thread << ") complete"; + VLOG(threads) << *self << " SuspendAll complete"; } void ThreadList::SuspendSelfForDebugger() { @@ -235,8 +294,8 @@ void ThreadList::SuspendSelfForDebugger() { // Collisions with other suspends aren't really interesting. We want // to ensure that we're the only one fiddling with the suspend count // though. - MutexLock mu(thread_suspend_count_lock_); - ModifySuspendCount(self, +1, true); + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + self->ModifySuspendCount(+1, true); // Suspend ourselves. CHECK_GT(self->suspend_count_, 0); @@ -249,7 +308,7 @@ void ThreadList::SuspendSelfForDebugger() { Dbg::ClearWaitForEventThread(); while (self->suspend_count_ != 0) { - thread_suspend_count_cond_.Wait(thread_suspend_count_lock_); + Thread::resume_cond_->Wait(*GlobalSynchronization::thread_suspend_count_lock_); if (self->suspend_count_ != 0) { // The condition was signaled but we're still suspended. This // can happen if the debugger lets go while a SIGQUIT thread @@ -264,113 +323,114 @@ void ThreadList::SuspendSelfForDebugger() { VLOG(threads) << *self << " self-reviving (debugger)"; } -void ThreadList::ResumeAll(bool for_debugger) { +void ThreadList::UndoDebuggerSuspensions() { Thread* self = Thread::Current(); - VLOG(threads) << *self << " ResumeAll starting" << (for_debugger ? " (debugger)" : ""); + VLOG(threads) << *self << " UndoDebuggerSuspensions starting"; - // Decrement the suspend counts for all threads. No need for atomic - // writes, since nobody should be moving until we decrement the count. - // We do need to hold the thread list because of JNI attaches. { - ScopedThreadListLock thread_list_lock; - Thread* debug_thread = Dbg::GetDebugThread(); - MutexLock mu(thread_suspend_count_lock_); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + // Update global suspend all state for attaching threads. + suspend_all_count_ -= debug_suspend_all_count_; + debug_suspend_all_count_ = 0; + // Update running threads. for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; - if (thread == self || (for_debugger && thread == debug_thread)) { + if (thread == self || thread->debug_suspend_count_ == 0) { continue; } - ModifySuspendCount(thread, -1, for_debugger); + thread->ModifySuspendCount(-thread->debug_suspend_count_, true); } } - // Broadcast a notification to all suspended threads, some or all of - // which may choose to wake up. No need to wait for them. { - VLOG(threads) << *self << " ResumeAll waking others"; - MutexLock mu(thread_suspend_count_lock_); - thread_suspend_count_cond_.Broadcast(); + MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + Thread::resume_cond_->Broadcast(); } - VLOG(threads) << *self << " ResumeAll complete"; + VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete"; } -void ThreadList::Resume(Thread* thread, bool for_debugger) { - DCHECK(thread != Thread::Current()); - - if (!for_debugger) { // The debugger is very naughty. See Dbg::InvokeMethod. - thread_list_lock_.AssertHeld(); - } - - VLOG(threads) << "Resume(" << *thread << ") starting..." << (for_debugger ? " (debugger)" : ""); - - { - MutexLock mu(thread_suspend_count_lock_); - if (!Contains(thread)) { - return; +void ThreadList::WaitForOtherNonDaemonThreadsToExit() { + GlobalSynchronization::mutator_lock_->AssertNotHeld(); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + bool all_threads_are_daemons; + do { + all_threads_are_daemons = true; + for (It it = list_.begin(), end = list_.end(); it != end; ++it) { + // TODO: there's a race here with thread exit that's being worked around by checking if the + // thread has a peer. + Thread* thread = *it; + if (thread != Thread::Current() && thread->HasPeer() && !thread->IsDaemon()) { + all_threads_are_daemons = false; + break; + } } - ModifySuspendCount(thread, -1, for_debugger); - } - - { - VLOG(threads) << "Resume(" << *thread << ") waking others"; - MutexLock mu(thread_suspend_count_lock_); - thread_suspend_count_cond_.Broadcast(); - } - - VLOG(threads) << "Resume(" << *thread << ") complete"; -} - -void ThreadList::RunWhileSuspended(Thread* thread, void (*callback)(void*), void* arg) { // NOLINT - DCHECK(thread != NULL); - Thread* self = Thread::Current(); - if (thread != self) { - Suspend(thread); - } - callback(arg); - if (thread != self) { - Resume(thread); - } + if (!all_threads_are_daemons) { + // Wait for another thread to exit before re-checking. + thread_exit_cond_.Wait(*GlobalSynchronization::thread_list_lock_); + } + } while(!all_threads_are_daemons); } -void ThreadList::UndoDebuggerSuspensions() { - Thread* self = Thread::Current(); - - VLOG(threads) << *self << " UndoDebuggerSuspensions starting"; - - { - ScopedThreadListLock thread_list_lock; - MutexLock mu(thread_suspend_count_lock_); +void ThreadList::SuspendAllDaemonThreads() { + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + { // Tell all the daemons it's time to suspend. + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; - if (thread == self || thread->debug_suspend_count_ == 0) { - continue; + // This is only run after all non-daemon threads have exited, so the remainder should all be + // daemons. + CHECK(thread->IsDaemon()); + if (thread != Thread::Current()) { + ++thread->suspend_count_; } - ModifySuspendCount(thread, -thread->debug_suspend_count_, true); } } - - { - MutexLock mu(thread_suspend_count_lock_); - thread_suspend_count_cond_.Broadcast(); + // Give the threads a chance to suspend, complaining if they're slow. + bool have_complained = false; + for (int i = 0; i < 10; ++i) { + usleep(200 * 1000); + bool all_suspended = true; + for (It it = list_.begin(), end = list_.end(); it != end; ++it) { + Thread* thread = *it; + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + if (thread != Thread::Current() && thread->GetState() == kRunnable) { + if (!have_complained) { + LOG(WARNING) << "daemon thread not yet suspended: " << *thread; + have_complained = true; + } + all_suspended = false; + } + } + if (all_suspended) { + return; + } } - - VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete"; + LOG(ERROR) << "suspend all daemons failed"; } +void ThreadList::Register(Thread* self) { + DCHECK_EQ(self, Thread::Current()); -void ThreadList::Register() { - Thread* self = Thread::Current(); - - VLOG(threads) << "ThreadList::Register() " << *self << "\n" << Dumpable(*self); + if (VLOG_IS_ON(threads)) { + std::ostringstream oss; + self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump. + LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss; + } - ScopedThreadListLock thread_list_lock; + // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing + // SuspendAll requests. + MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + self->suspend_count_ = suspend_all_count_; + self->debug_suspend_count_ = debug_suspend_all_count_; CHECK(!Contains(self)); list_.push_back(self); } -void ThreadList::Unregister() { - Thread* self = Thread::Current(); +void ThreadList::Unregister(Thread* self) { + DCHECK_EQ(self, Thread::Current()); VLOG(threads) << "ThreadList::Unregister() " << *self; @@ -380,163 +440,40 @@ void ThreadList::Unregister() { { // Remove this thread from the list. - ScopedThreadListLock thread_list_lock; + MutexLock mu(*GlobalSynchronization::thread_list_lock_); CHECK(Contains(self)); list_.remove(self); } // Delete the Thread* and release the thin lock id. uint32_t thin_lock_id = self->thin_lock_id_; - delete self; ReleaseThreadId(thin_lock_id); + delete self; // Clear the TLS data, so that the underlying native thread is recognizably detached. // (It may wish to reattach later.) CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self"); // Signal that a thread just detached. + MutexLock mu(*GlobalSynchronization::thread_list_lock_); thread_exit_cond_.Signal(); } void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) { - ScopedThreadListLock thread_list_lock; for (It it = list_.begin(), end = list_.end(); it != end; ++it) { callback(*it, context); } } void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { - ScopedThreadListLock thread_list_lock; + MutexLock mu(*GlobalSynchronization::thread_list_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->VisitRoots(visitor, arg); } } -/* - * Tell a new thread it's safe to start. - * - * We must hold the thread list lock before messing with another thread. - * In the general case we would also need to verify that the new thread was - * still in the thread list, but in our case the thread has not started - * executing user code and therefore has not had a chance to exit. - * - * We move it to kVmWait, and it then shifts itself to kRunning, which - * comes with a suspend-pending check. We do this after - */ -void ThreadList::SignalGo(Thread* child) { - Thread* self = Thread::Current(); - CHECK(child != self); - - { - ScopedThreadListLock thread_list_lock; - VLOG(threads) << *self << " waiting for child " << *child << " to be in thread list..."; - - // We wait for the child to tell us that it's in the thread list. - while (child->GetState() != kStarting) { - thread_start_cond_.Wait(thread_list_lock_); - } - } - - // If we switch out of runnable and then back in, we know there's no pending suspend. - self->SetState(kVmWait); - self->SetState(kRunnable); - - // Tell the child that it's safe: it will see any future suspend request. - ScopedThreadListLock thread_list_lock; - VLOG(threads) << *self << " telling child " << *child << " it's safe to proceed..."; - child->SetState(kVmWait); - thread_start_cond_.Broadcast(); -} - -void ThreadList::WaitForGo() { - Thread* self = Thread::Current(); - - { - ScopedThreadListLock thread_list_lock; - DCHECK(Contains(self)); - - // Tell our parent that we're in the thread list. - VLOG(threads) << *self << " telling parent that we're now in thread list..."; - self->SetState(kStarting); - thread_start_cond_.Broadcast(); - - // Wait until our parent tells us there's no suspend still pending - // from before we were on the thread list. - VLOG(threads) << *self << " waiting for parent's go-ahead..."; - while (self->GetState() != kVmWait) { - thread_start_cond_.Wait(thread_list_lock_); - } - } - - // Enter the runnable state. We know that any pending suspend will affect us now. - VLOG(threads) << *self << " entering runnable state..."; - // Lock and unlock the heap lock. This ensures that if there was a GC in progress when we - // started, we wait until it's over. Which means that if there's now another GC pending, our - // suspend count is non-zero, so switching to the runnable state will suspend us. - // TODO: find a better solution! - { - ScopedHeapLock heap_lock; - } - self->SetState(kRunnable); -} - -bool ThreadList::AllOtherThreadsAreDaemons() { - for (It it = list_.begin(), end = list_.end(); it != end; ++it) { - // TODO: there's a race here with thread exit that's being worked around by checking if the peer - // is null. - Thread* thread = *it; - if (thread != Thread::Current() && thread->GetPeer() != NULL && !thread->IsDaemon()) { - return false; - } - } - return true; -} - -void ThreadList::WaitForOtherNonDaemonThreadsToExit() { - ScopedThreadListLock thread_list_lock; - while (!AllOtherThreadsAreDaemons()) { - thread_exit_cond_.Wait(thread_list_lock_); - } -} - -void ThreadList::SuspendAllDaemonThreads() { - ScopedThreadListLock thread_list_lock; - - // Tell all the daemons it's time to suspend. - { - MutexLock mu(thread_suspend_count_lock_); - for (It it = list_.begin(), end = list_.end(); it != end; ++it) { - Thread* thread = *it; - if (thread != Thread::Current()) { - ++thread->suspend_count_; - } - } - } - - // Give the threads a chance to suspend, complaining if they're slow. - bool have_complained = false; - for (int i = 0; i < 10; ++i) { - usleep(200 * 1000); - bool all_suspended = true; - for (It it = list_.begin(), end = list_.end(); it != end; ++it) { - Thread* thread = *it; - if (thread != Thread::Current() && thread->GetState() == kRunnable) { - if (!have_complained) { - LOG(WARNING) << "daemon thread not yet suspended: " << *thread; - have_complained = true; - } - all_suspended = false; - } - } - if (all_suspended) { - return; - } - } -} - uint32_t ThreadList::AllocThreadId() { MutexLock mu(allocated_ids_lock_); - //ScopedThreadListLock thread_list_lock; for (size_t i = 0; i < allocated_ids_.size(); ++i) { if (!allocated_ids_[i]) { allocated_ids_.set(i); diff --git a/src/thread_list.h b/src/thread_list.h index ef475feb6b..e5b911489a 100644 --- a/src/thread_list.h +++ b/src/thread_list.h @@ -33,66 +33,98 @@ class ThreadList { explicit ThreadList(); ~ThreadList(); - void DumpForSigQuit(std::ostream& os); - void DumpLocked(std::ostream& os); // For thread suspend timeout dumps. + void DumpForSigQuit(std::ostream& os) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void DumpLocked(std::ostream& os) // For thread suspend timeout dumps. + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); pid_t GetLockOwner(); // For SignalCatcher. // Thread suspension support. - void FullSuspendCheck(Thread* thread); - void ResumeAll(bool for_debugger = false); - void Resume(Thread* thread, bool for_debugger = false); - void RunWhileSuspended(Thread* thread, void (*callback)(void*), void* arg); // NOLINT - void SuspendAll(bool for_debugger = false); - void SuspendSelfForDebugger(); - void Suspend(Thread* thread, bool for_debugger = false); - void UndoDebuggerSuspensions(); + void ResumeAll() + UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + void Resume(Thread* thread, bool for_debugger = false) + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + + // Suspends all threads and gets exclusive access to the mutator_lock_. + void SuspendAll() + EXCLUSIVE_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + + // Suspends all threads + void SuspendAllForDebugger() + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + + void SuspendSelfForDebugger() + LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + + void UndoDebuggerSuspensions() + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); // Iterates over all the threads. - void ForEach(void (*callback)(Thread*, void*), void* context); + void ForEach(void (*callback)(Thread*, void*), void* context) + EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); - void Register(); - void Unregister(); + // Add/remove current thread from list. + void Register(Thread* self) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_list_lock_); + void Unregister(Thread* self) + LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, + GlobalSynchronization::thread_list_lock_); - void VisitRoots(Heap::RootVisitor* visitor, void* arg) const; + void VisitRoots(Heap::RootVisitor* visitor, void* arg) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - // Handshaking for new thread creation. - void SignalGo(Thread* child); - void WaitForGo(); + // Return a copy of the thread list. + std::list GetList() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_) { + return list_; + } private: typedef std::list::const_iterator It; // TODO: C++0x auto uint32_t AllocThreadId(); - void ReleaseThreadId(uint32_t id); + void ReleaseThreadId(uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_); - bool Contains(Thread* thread); - bool Contains(pid_t tid); + bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); + bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); - void DumpUnattachedThreads(std::ostream& os); + void DumpUnattachedThreads(std::ostream& os) + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); - bool AllOtherThreadsAreDaemons(); - void SuspendAllDaemonThreads(); - void WaitForOtherNonDaemonThreadsToExit(); + void SuspendAllDaemonThreads() + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); + void WaitForOtherNonDaemonThreadsToExit() + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); - static void ModifySuspendCount(Thread* thread, int delta, bool for_debugger); + void AssertThreadsAreSuspended() + LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, + GlobalSynchronization::thread_suspend_count_lock_); - mutable Mutex allocated_ids_lock_; + mutable Mutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::bitset allocated_ids_ GUARDED_BY(allocated_ids_lock_); - mutable Mutex thread_list_lock_; - std::list list_; // TODO: GUARDED_BY(thread_list_lock_); + // The actual list of all threads. + std::list list_ GUARDED_BY(GlobalSynchronization::thread_list_lock_); - ConditionVariable thread_start_cond_; - ConditionVariable thread_exit_cond_; + // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll. + int suspend_all_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int debug_suspend_all_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); - // This lock guards every thread's suspend_count_ field... - mutable Mutex thread_suspend_count_lock_; - // ...and is used in conjunction with this condition variable. - ConditionVariable thread_suspend_count_cond_ GUARDED_BY(thread_suspend_count_lock_); + // Signaled when threads terminate. Used to determine when all non-daemons have terminated. + ConditionVariable thread_exit_cond_ GUARDED_BY(GlobalSynchronization::thread_list_lock_); friend class Thread; - friend class ScopedThreadListLock; - friend class ScopedThreadListLockReleaser; DISALLOW_COPY_AND_ASSIGN(ThreadList); }; diff --git a/src/trace.cc b/src/trace.cc index cd594cf65b..5ac7e3d6f8 100644 --- a/src/trace.cc +++ b/src/trace.cc @@ -26,7 +26,7 @@ #endif #include "object_utils.h" #include "os.h" -#include "scoped_thread_list_lock.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "thread_list.h" @@ -158,7 +158,8 @@ static void Append8LE(uint8_t* buf, uint64_t val) { *buf++ = (uint8_t) (val >> 56); } -static bool InstallStubsClassVisitor(Class* klass, void*) { +static bool InstallStubsClassVisitor(Class* klass, void*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Trace* tracer = Runtime::Current()->GetTracer(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { Method* method = klass->GetDirectMethod(i); @@ -176,7 +177,8 @@ static bool InstallStubsClassVisitor(Class* klass, void*) { return true; } -static bool UninstallStubsClassVisitor(Class* klass, void*) { +static bool UninstallStubsClassVisitor(Class* klass, void*) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Trace* tracer = Runtime::Current()->GetTracer(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { Method* method = klass->GetDirectMethod(i); @@ -214,6 +216,7 @@ static void TraceRestoreStack(Thread* self, void*) { Thread* self_; }; + ScopedObjectAccess soa(self); RestoreStackVisitor visitor(self); visitor.WalkStack(); } @@ -266,7 +269,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int } ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Runtime::Current()->GetThreadList()->SuspendAll(false); + Runtime::Current()->GetThreadList()->SuspendAll(); // Open trace file if not going directly to ddms. File* trace_file = NULL; @@ -280,7 +283,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", StringPrintf("Unable to open trace file '%s'", trace_filename).c_str()); - Runtime::Current()->GetThreadList()->ResumeAll(false); + Runtime::Current()->GetThreadList()->ResumeAll(); return; } } @@ -296,7 +299,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int Runtime::Current()->EnableMethodTracing(tracer); tracer->BeginTracing(); - Runtime::Current()->GetThreadList()->ResumeAll(false); + Runtime::Current()->GetThreadList()->ResumeAll(); } void Trace::Stop() { @@ -306,12 +309,12 @@ void Trace::Stop() { } ScopedThreadStateChange tsc(Thread::Current(), kRunnable); - Runtime::Current()->GetThreadList()->SuspendAll(false); + Runtime::Current()->GetThreadList()->SuspendAll(); Runtime::Current()->GetTracer()->FinishTracing(); Runtime::Current()->DisableMethodTracing(); - Runtime::Current()->GetThreadList()->ResumeAll(false); + Runtime::Current()->GetThreadList()->ResumeAll(); } void Trace::Shutdown() { @@ -486,6 +489,8 @@ static void DumpThread(Thread* t, void* arg) { } void Trace::DumpThreadList(std::ostream& os) { + GlobalSynchronization::thread_list_lock_->AssertNotHeld(); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); } @@ -494,7 +499,9 @@ void Trace::InstallStubs() { } void Trace::UninstallStubs() { + GlobalSynchronization::thread_list_lock_->AssertNotHeld(); Runtime::Current()->GetClassLinker()->VisitClasses(UninstallStubsClassVisitor, NULL); + MutexLock mu(*GlobalSynchronization::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(TraceRestoreStack, NULL); } diff --git a/src/trace.h b/src/trace.h index 0042fc27fa..c60ae1576b 100644 --- a/src/trace.h +++ b/src/trace.h @@ -65,7 +65,7 @@ class Trace { static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, bool direct_to_ddms); static void Stop(); - static void Shutdown(); + static void Shutdown() NO_THREAD_SAFETY_ANALYSIS; // TODO: implement appropriate locking. bool UseWallClock(); bool UseThreadCpuClock(); @@ -83,18 +83,18 @@ class Trace { explicit Trace(File* trace_file, int buffer_size, int flags); void BeginTracing(); - void FinishTracing(); + void FinishTracing() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Replaces code of each method with a pointer to a stub for method tracing. void InstallStubs(); // Restores original code for each method and fixes the return values of each thread's stack. - void UninstallStubs(); + void UninstallStubs() LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); // Methods to output traced methods and threads. void GetVisitedMethods(size_t end_offset); - void DumpMethodList(std::ostream& os); - void DumpThreadList(std::ostream& os); + void DumpMethodList(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); // Maps a method to its original code pointer. SafeMap saved_code_map_; diff --git a/src/utf.h b/src/utf.h index 0758896125..dd5791c5a4 100644 --- a/src/utf.h +++ b/src/utf.h @@ -19,6 +19,7 @@ #include #include +#include "macros.h" /* * All UTF-8 in art is actually modified UTF-8. Mostly, this distinction @@ -63,7 +64,8 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t /* * The java.lang.String hashCode() algorithm. */ -int32_t ComputeUtf16Hash(const CharArray* chars, int32_t offset, size_t char_count); +int32_t ComputeUtf16Hash(const CharArray* chars, int32_t offset, size_t char_count) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count); /* diff --git a/src/utils.h b/src/utils.h index a31588b966..851c6b1bbe 100644 --- a/src/utils.h +++ b/src/utils.h @@ -167,30 +167,36 @@ bool EndsWith(const std::string& s, const char* suffix); std::string PrettyDescriptor(const String* descriptor); std::string PrettyDescriptor(const std::string& descriptor); std::string PrettyDescriptor(Primitive::Type type); -std::string PrettyDescriptor(const Class* klass); +std::string PrettyDescriptor(const Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or // "int a.b.C.f" (depending on the value of 'with_type'). -std::string PrettyField(const Field* f, bool with_type = true); +std::string PrettyField(const Field* f, bool with_type = true) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). -std::string PrettyMethod(const Method* m, bool with_signature = true); +std::string PrettyMethod(const Method* m, bool with_signature = true) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); // Returns a human-readable form of the name of the *class* of the given object. // So given an instance of java.lang.String, the output would // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class". -std::string PrettyTypeOf(const Object* obj); +std::string PrettyTypeOf(const Object* obj) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns a human-readable form of the name of the given class. // Given String.class, the output would be "java.lang.Class". -std::string PrettyClass(const Class* c); +std::string PrettyClass(const Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns a human-readable form of the name of the given class with its class loader. -std::string PrettyClassAndClassLoader(const Class* c); +std::string PrettyClassAndClassLoader(const Class* c) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns a human-readable size string such as "1MB". std::string PrettySize(size_t size_in_bytes); @@ -223,9 +229,11 @@ bool IsValidDescriptor(const char* s); // "Ljava/lang/String;" bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. -std::string JniShortName(const Method* m); +std::string JniShortName(const Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. -std::string JniLongName(const Method* m); +std::string JniLongName(const Method* m) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); diff --git a/src/utils_test.cc b/src/utils_test.cc index 311bd16aaa..e6ff118b8a 100644 --- a/src/utils_test.cc +++ b/src/utils_test.cc @@ -16,6 +16,7 @@ #include "object.h" #include "common_test.h" +#include "scoped_thread_state_change.h" #include "utils.h" namespace art { @@ -84,6 +85,7 @@ TEST_F(UtilsTest, PrettyReturnType) { } TEST_F(UtilsTest, PrettyTypeOf) { + ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyTypeOf(NULL)); SirtRef s(String::AllocFromModifiedUtf8("")); @@ -100,6 +102,7 @@ TEST_F(UtilsTest, PrettyTypeOf) { } TEST_F(UtilsTest, PrettyClass) { + ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyClass(NULL)); Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); ASSERT_TRUE(c != NULL); @@ -108,6 +111,7 @@ TEST_F(UtilsTest, PrettyClass) { } TEST_F(UtilsTest, PrettyClassAndClassLoader) { + ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyClassAndClassLoader(NULL)); Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); ASSERT_TRUE(c != NULL); @@ -116,6 +120,7 @@ TEST_F(UtilsTest, PrettyClassAndClassLoader) { } TEST_F(UtilsTest, PrettyField) { + ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyField(NULL)); Class* java_lang_String = class_linker_->FindSystemClass("Ljava/lang/String;"); @@ -177,6 +182,7 @@ TEST_F(UtilsTest, PrettyDuration) { } TEST_F(UtilsTest, MangleForJni) { + ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("hello_00024world", MangleForJni("hello$world")); EXPECT_EQ("hello_000a9world", MangleForJni("hello\xc2\xa9world")); EXPECT_EQ("hello_1world", MangleForJni("hello_world")); @@ -185,6 +191,7 @@ TEST_F(UtilsTest, MangleForJni) { } TEST_F(UtilsTest, JniShortName_JniLongName) { + ScopedObjectAccess soa(Thread::Current()); Class* c = class_linker_->FindSystemClass("Ljava/lang/String;"); ASSERT_TRUE(c != NULL); Method* m; diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc index 5e98b1e8a4..167e1e5388 100644 --- a/src/verifier/method_verifier.cc +++ b/src/verifier/method_verifier.cc @@ -313,7 +313,7 @@ void MethodVerifier::VerifyMethodAndDump(Method* method) { method, method->GetAccessFlags()); verifier.Verify(); verifier.DumpFailures(LOG(INFO) << "Dump of method " << PrettyMethod(method) << "\n") - << verifier.info_messages_.str() << Dumpable(verifier); + << verifier.info_messages_.str() << MutatorLockedDumpable(verifier); } MethodVerifier::MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, @@ -1026,7 +1026,8 @@ std::ostream& MethodVerifier::DumpFailures(std::ostream& os) { return os; } -extern "C" void MethodVerifierGdbDump(MethodVerifier* v) { +extern "C" void MethodVerifierGdbDump(MethodVerifier* v) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { v->Dump(std::cerr); } @@ -3327,13 +3328,15 @@ void MethodVerifier::VerifyGcMap(const std::vector& data) { } void MethodVerifier::SetGcMap(Compiler::MethodReference ref, const std::vector& gc_map) { - MutexLock mu(*gc_maps_lock_); - GcMapTable::iterator it = gc_maps_->find(ref); - if (it != gc_maps_->end()) { - delete it->second; - gc_maps_->erase(it); + { + MutexLock mu(*gc_maps_lock_); + GcMapTable::iterator it = gc_maps_->find(ref); + if (it != gc_maps_->end()) { + delete it->second; + gc_maps_->erase(it); + } + gc_maps_->Put(ref, &gc_map); } - gc_maps_->Put(ref, &gc_map); CHECK(GetGcMap(ref) != NULL); } @@ -3411,8 +3414,10 @@ void MethodVerifier::Shutdown() { } void MethodVerifier::AddRejectedClass(Compiler::ClassReference ref) { - MutexLock mu(*rejected_classes_lock_); - rejected_classes_->insert(ref); + { + MutexLock mu(*rejected_classes_lock_); + rejected_classes_->insert(ref); + } CHECK(IsClassRejected(ref)); } diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h index b2a88b75c2..a67366fb24 100644 --- a/src/verifier/method_verifier.h +++ b/src/verifier/method_verifier.h @@ -162,10 +162,12 @@ class MethodVerifier { }; /* Verify a class. Returns "kNoFailure" on success. */ - static FailureKind VerifyClass(const Class* klass, std::string& error); + static FailureKind VerifyClass(const Class* klass, std::string& error) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static FailureKind VerifyClass(const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, - std::string& error); + std::string& error) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); uint8_t EncodePcToReferenceMapData() const; @@ -191,13 +193,16 @@ class MethodVerifier { // Dump the state of the verifier, namely each instruction, what flags are set on it, register // information - void Dump(std::ostream& os); + void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static const std::vector* GetGcMap(Compiler::MethodReference ref); + static const std::vector* GetGcMap(Compiler::MethodReference ref) + LOCKS_EXCLUDED(gc_maps_lock_); // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding // to the locks held at 'dex_pc' in 'm'. - static void FindLocksAtDexPc(Method* m, uint32_t dex_pc, std::vector& monitor_enter_dex_pcs); + static void FindLocksAtDexPc(Method* m, uint32_t dex_pc, + std::vector& monitor_enter_dex_pcs) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static void Init(); static void Shutdown(); @@ -206,12 +211,14 @@ class MethodVerifier { static const InferredRegCategoryMap* GetInferredRegCategoryMap(Compiler::MethodReference ref); #endif - static bool IsClassRejected(Compiler::ClassReference ref); + static bool IsClassRejected(Compiler::ClassReference ref) + LOCKS_EXCLUDED(rejected_classes_lock_); private: explicit MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - uint32_t method_idx, Method* method, uint32_t access_flags); + uint32_t method_idx, Method* method, uint32_t access_flags) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -232,14 +239,16 @@ class MethodVerifier { */ static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - Method* method, uint32_t method_access_flags); - static void VerifyMethodAndDump(Method* method); + Method* method, uint32_t method_access_flags) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void VerifyMethodAndDump(Method* method) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Run verification on the method. Returns true if verification completes and false if the input // has an irrecoverable corruption. - bool Verify(); + bool Verify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void FindLocksAtDexPc(); + void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Compute the width of the instruction at each address in the instruction stream, and store it in @@ -267,7 +276,7 @@ class MethodVerifier { * Returns "false" if something in the exception table looks fishy, but we're expecting the * exception table to be somewhat sane. */ - bool ScanTryCatchBlocks(); + bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Perform static verification on all instructions in a method. @@ -373,11 +382,11 @@ class MethodVerifier { bool* selfOkay); /* Perform detailed code-flow analysis on a single method. */ - bool VerifyCodeFlow(); + bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Set the register types for the first instruction in the method based on the method signature. // This has the side-effect of validating the signature. - bool SetTypesFromSignature(); + bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Perform code flow on a method. @@ -425,7 +434,7 @@ class MethodVerifier { * reordering by specifying that you can't execute the new-instance instruction if a register * contains an uninitialized instance created by that same instruction. */ - bool CodeFlowVerifyMethod(); + bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Perform verification for a single instruction. @@ -436,54 +445,62 @@ class MethodVerifier { * point needs to be (re-)evaluated. Register changes are merged into "reg_types_" at the target * addresses. Does not set or clear any other flags in "insn_flags_". */ - bool CodeFlowVerifyInstruction(uint32_t* start_guess); + bool CodeFlowVerifyInstruction(uint32_t* start_guess) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Perform verification of a new array instruction void VerifyNewArray(const DecodedInstruction& dec_insn, bool is_filled, - bool is_range); + bool is_range) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Perform verification of an aget instruction. The destination register's type will be set to // be that of component type of the array unless the array type is unknown, in which case a // bottom type inferred from the type of instruction is used. is_primitive is false for an // aget-object. void VerifyAGet(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive); + bool is_primitive) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Perform verification of an aput instruction. void VerifyAPut(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive); + bool is_primitive) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Lookup instance field and fail for resolution violations - Field* GetInstanceField(const RegType& obj_type, int field_idx); + Field* GetInstanceField(const RegType& obj_type, int field_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Lookup static field and fail for resolution violations - Field* GetStaticField(int field_idx); + Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Perform verification of an iget or sget instruction. void VerifyISGet(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive, bool is_static); + bool is_primitive, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Perform verification of an iput or sput instruction. void VerifyISPut(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive, bool is_static); + bool is_primitive, bool is_static) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Resolves a class based on an index and performs access checks to ensure the referrer can // access the resolved class. - const RegType& ResolveClassAndCheckAccess(uint32_t class_idx); + const RegType& ResolveClassAndCheckAccess(uint32_t class_idx) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler * address, determine the Join of all exceptions that can land here. Fails if no matching * exception handler can be found or if the Join of exception types fails. */ - const RegType& GetCaughtExceptionType(); + const RegType& GetCaughtExceptionType() + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Resolves a method based on an index and performs access checks to ensure * the referrer can access the resolved method. * Does not throw exceptions. */ - Method* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type); + Method* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Verify the arguments to a method. We're executing in "method", making @@ -508,7 +525,8 @@ class MethodVerifier { * set appropriately). */ Method* VerifyInvocationArgs(const DecodedInstruction& dec_insn, - MethodType method_type, bool is_range, bool is_super); + MethodType method_type, bool is_range, bool is_super) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Verify that the target instruction is not "move-exception". It's important that the only way @@ -528,7 +546,8 @@ class MethodVerifier { * next_insn, and set the changed flag on the target address if any of the registers were changed. * Returns "false" if an error is encountered. */ - bool UpdateRegisters(uint32_t next_insn, const RegisterLine* merge_line); + bool UpdateRegisters(uint32_t next_insn, const RegisterLine* merge_line) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Is the method being verified a constructor? bool IsConstructor() const { @@ -541,10 +560,10 @@ class MethodVerifier { } // Return the register type for the method. - const RegType& GetMethodReturnType(); + const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Get a type representing the declaring class of the method. - const RegType& GetDeclaringClass(); + const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); #if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER) /* @@ -572,25 +591,27 @@ class MethodVerifier { // All the GC maps that the verifier has created typedef SafeMap*> GcMapTable; - static Mutex* gc_maps_lock_; - static GcMapTable* gc_maps_; - static void SetGcMap(Compiler::MethodReference ref, const std::vector& gc_map); + static Mutex* gc_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + static GcMapTable* gc_maps_ GUARDED_BY(gc_maps_lock_); + static void SetGcMap(Compiler::MethodReference ref, const std::vector& gc_map) + LOCKS_EXCLUDED(gc_maps_lock_); typedef std::set RejectedClassesTable; - static Mutex* rejected_classes_lock_; + static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; static RejectedClassesTable* rejected_classes_; #if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER) // All the inferred register category maps that the verifier has created. typedef SafeMap InferredRegCategoryMapTable; - static Mutex* inferred_reg_category_maps_lock_; + static Mutex* inferred_reg_category_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; static InferredRegCategoryMapTable* inferred_reg_category_maps_; static void SetInferredRegCategoryMap(Compiler::MethodReference ref, const InferredRegCategoryMap& m); #endif - static void AddRejectedClass(Compiler::ClassReference ref); + static void AddRejectedClass(Compiler::ClassReference ref) + LOCKS_EXCLUDED(rejected_classes_lock_); RegTypeCache reg_types_; @@ -607,11 +628,14 @@ class MethodVerifier { UniquePtr saved_line_; uint32_t method_idx_; // The method we're working on. - Method* foo_method_; // Its object representation if known. + // Its object representation if known. + Method* foo_method_ GUARDED_BY(GlobalSynchronization::mutator_lock_); uint32_t method_access_flags_; // Method's access flags. const DexFile* dex_file_; // The dex file containing the method. - DexCache* dex_cache_; // The dex_cache for the declaring class of the method. - ClassLoader* class_loader_; // The class loader for the declaring class of the method. + // The dex_cache for the declaring class of the method. + DexCache* dex_cache_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + // The class loader for the declaring class of the method. + ClassLoader* class_loader_ GUARDED_BY(GlobalSynchronization::mutator_lock_); uint32_t class_def_idx_; // The class def index of the declaring class of the method. const DexFile::CodeItem* code_item_; // The code item containing the code for the method. UniquePtr insn_flags_; // Instruction widths and flags, one entry per code unit. diff --git a/src/verifier/method_verifier_test.cc b/src/verifier/method_verifier_test.cc index 5c23e9fb79..e52feb3258 100644 --- a/src/verifier/method_verifier_test.cc +++ b/src/verifier/method_verifier_test.cc @@ -27,7 +27,8 @@ namespace verifier { class MethodVerifierTest : public CommonTest { protected: - void VerifyClass(const std::string& descriptor) { + void VerifyClass(const std::string& descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); @@ -36,7 +37,8 @@ class MethodVerifierTest : public CommonTest { ASSERT_TRUE(MethodVerifier::VerifyClass(klass, error_msg) == MethodVerifier::kNoFailure) << error_msg; } - void VerifyDexFile(const DexFile* dex) { + void VerifyDexFile(const DexFile* dex) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { ASSERT_TRUE(dex != NULL); // Verify all the classes defined in this file @@ -49,12 +51,14 @@ class MethodVerifierTest : public CommonTest { }; TEST_F(MethodVerifierTest, LibCore) { + ScopedObjectAccess soa(Thread::Current()); VerifyDexFile(java_lang_dex_file_); } TEST_F(MethodVerifierTest, IntMath) { - SirtRef class_loader(LoadDex("IntMath")); - Class* klass = class_linker_->FindClass("LIntMath;", class_loader.get()); + ScopedObjectAccess soa(Thread::Current()); + jobject class_loader = LoadDex("IntMath"); + Class* klass = class_linker_->FindClass("LIntMath;", soa.Decode(class_loader)); std::string error_msg; ASSERT_TRUE(MethodVerifier::VerifyClass(klass, error_msg) == MethodVerifier::kNoFailure) << error_msg; } diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc index 8d1df22c65..281d96e6b4 100644 --- a/src/verifier/reg_type.cc +++ b/src/verifier/reg_type.cc @@ -384,7 +384,8 @@ Class* RegType::ClassJoin(Class* s, Class* t) { } } -std::ostream& operator<<(std::ostream& os, const RegType& rhs) { +std::ostream& operator<<(std::ostream& os, const RegType& rhs) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { os << rhs.Dump(); return os; } diff --git a/src/verifier/reg_type.h b/src/verifier/reg_type.h index 7e8fca1051..a0e2ff8525 100644 --- a/src/verifier/reg_type.h +++ b/src/verifier/reg_type.h @@ -117,7 +117,8 @@ class RegType { } // The high half that corresponds to this low half - const RegType& HighHalf(RegTypeCache* cache) const; + const RegType& HighHalf(RegTypeCache* cache) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool IsConstant() const { return type_ == kRegTypeConst; } bool IsLongConstant() const { return type_ == kRegTypeConstLo; } @@ -208,7 +209,7 @@ class RegType { return IsReference() && GetClass()->IsObjectClass(); } - bool IsArrayTypes() const { + bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { return GetDescriptor()->CharAt(0) == '['; } else if (IsReference()) { @@ -218,7 +219,7 @@ class RegType { } } - bool IsObjectArrayTypes() const { + bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { // Primitive arrays will always resolve DCHECK(GetDescriptor()->CharAt(1) == 'L' || GetDescriptor()->CharAt(1) == '['); @@ -293,22 +294,28 @@ class RegType { return static_cast(allocation_pc_or_constant_or_merged_types_ & 0xFFFF); } - const RegType& GetSuperClass(RegTypeCache* cache) const; + const RegType& GetSuperClass(RegTypeCache* cache) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - std::string Dump(const RegTypeCache* reg_types = NULL) const; + std::string Dump(const RegTypeCache* reg_types = NULL) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Can this type access other? - bool CanAccess(const RegType& other) const; + bool CanAccess(const RegType& other) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Can this type access a member with the given properties? - bool CanAccessMember(Class* klass, uint32_t access_flags) const; + bool CanAccessMember(Class* klass, uint32_t access_flags) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Can this type be assigned by src? - bool IsAssignableFrom(const RegType& src) const; + bool IsAssignableFrom(const RegType& src) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); bool Equals(const RegType& other) const { return GetId() == other.GetId(); } // Compute the merge of this register from one edge (path) with incoming_type from another. - const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const; + const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is @@ -326,7 +333,8 @@ class RegType { * * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ - static Class* ClassJoin(Class* s, Class* t); + static Class* ClassJoin(Class* s, Class* t) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: friend class RegTypeCache; diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h index 4ba667b368..1287388e07 100644 --- a/src/verifier/reg_type_cache.h +++ b/src/verifier/reg_type_cache.h @@ -40,32 +40,68 @@ class RegTypeCache { return *result; } - const RegType& From(RegType::Type type, ClassLoader* loader, const char* descriptor); - const RegType& FromClass(Class* klass); + const RegType& From(RegType::Type type, ClassLoader* loader, const char* descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const RegType& FromClass(Class* klass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); const RegType& FromCat1Const(int32_t value); - const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor); - const RegType& FromType(RegType::Type); + const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const RegType& FromType(RegType::Type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right); const RegType& FromUnresolvedSuperClass(const RegType& child); - const RegType& Boolean() { return FromType(RegType::kRegTypeBoolean); } - const RegType& Byte() { return FromType(RegType::kRegTypeByte); } - const RegType& Char() { return FromType(RegType::kRegTypeChar); } - const RegType& Short() { return FromType(RegType::kRegTypeShort); } - const RegType& Integer() { return FromType(RegType::kRegTypeInteger); } - const RegType& Float() { return FromType(RegType::kRegTypeFloat); } - const RegType& Long() { return FromType(RegType::kRegTypeLongLo); } - const RegType& Double() { return FromType(RegType::kRegTypeDoubleLo); } - - const RegType& JavaLangClass() { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Class;"); } - const RegType& JavaLangObject() { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Object;"); } - const RegType& JavaLangString() { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/String;"); } - const RegType& JavaLangThrowable() { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Throwable;"); } - - const RegType& Undefined() { return FromType(RegType::kRegTypeUndefined); } - const RegType& Conflict() { return FromType(RegType::kRegTypeConflict); } - const RegType& ConstLo() { return FromType(RegType::kRegTypeConstLo); } - const RegType& Zero() { return FromCat1Const(0); } + const RegType& Boolean() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeBoolean); + } + const RegType& Byte() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeByte); + } + const RegType& Char() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeChar); + } + const RegType& Short() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeShort); + } + const RegType& Integer() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeInteger); + } + const RegType& Float() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeFloat); + } + const RegType& Long() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeLongLo); + } + const RegType& Double() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeDoubleLo); + } + + const RegType& JavaLangClass() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Class;"); + } + const RegType& JavaLangObject() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Object;"); + } + const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return From(RegType::kRegTypeReference, NULL, "Ljava/lang/String;"); + } + const RegType& JavaLangThrowable() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Throwable;"); + } + + const RegType& Undefined() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeUndefined); + } + const RegType& Conflict() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeConflict); + } + const RegType& ConstLo() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromType(RegType::kRegTypeConstLo); + } + const RegType& Zero() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + return FromCat1Const(0); + } const RegType& Uninitialized(const RegType& type, uint32_t allocation_pc); // Create an uninitialized 'this' argument for the given type. @@ -79,7 +115,8 @@ class RegTypeCache { const RegType& ShortConstant() { return FromCat1Const(std::numeric_limits::min()); } const RegType& IntConstant() { return FromCat1Const(std::numeric_limits::max()); } - const RegType& GetComponentType(const RegType& array, ClassLoader* loader); + const RegType& GetComponentType(const RegType& array, ClassLoader* loader) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); private: // The allocated entries diff --git a/src/verifier/reg_type_test.cc b/src/verifier/reg_type_test.cc index 18c165597e..6bdf886569 100644 --- a/src/verifier/reg_type_test.cc +++ b/src/verifier/reg_type_test.cc @@ -26,6 +26,7 @@ namespace verifier { class RegTypeTest : public CommonTest {}; TEST_F(RegTypeTest, Primitives) { + ScopedObjectAccess soa(Thread::Current()); RegTypeCache cache; const RegType& bool_reg_type = cache.Boolean(); diff --git a/src/verifier/register_line.cc b/src/verifier/register_line.cc index 1d4366f42a..ec7891edce 100644 --- a/src/verifier/register_line.cc +++ b/src/verifier/register_line.cc @@ -367,7 +367,8 @@ void RegisterLine::WriteReferenceBitMap(std::vector& data, size_t max_b } } -std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs) { +std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { os << rhs.Dump(); return os; } diff --git a/src/verifier/register_line.h b/src/verifier/register_line.h index 9664a5b3e4..1d131ad8f4 100644 --- a/src/verifier/register_line.h +++ b/src/verifier/register_line.h @@ -61,19 +61,23 @@ class RegisterLine { } // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst". - void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat); + void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This // copies both halves of the register. - void CopyRegister2(uint32_t vdst, uint32_t vsrc); + void CopyRegister2(uint32_t vdst, uint32_t vsrc) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Implement "move-result". Copy the category-1 value from the result register to another // register, and reset the result register. - void CopyResultRegister1(uint32_t vdst, bool is_reference); + void CopyResultRegister1(uint32_t vdst, bool is_reference) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Implement "move-result-wide". Copy the category-2 value from the result register to another // register, and reset the result register. - void CopyResultRegister2(uint32_t vdst); + void CopyResultRegister2(uint32_t vdst) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Set the invisible result register to unknown void SetResultTypeToUnknown(); @@ -81,15 +85,18 @@ class RegisterLine { // Set the type of register N, verifying that the register is valid. If "newType" is the "Lo" // part of a 64-bit value, register N+1 will be set to "newType+1". // The register index was validated during the static pass, so we don't need to check it here. - bool SetRegisterType(uint32_t vdst, const RegType& new_type); + bool SetRegisterType(uint32_t vdst, const RegType& new_type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* Set the type of the "result" register. */ - void SetResultRegisterType(const RegType& new_type); + void SetResultRegisterType(const RegType& new_type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Get the type of register vsrc. const RegType& GetRegisterType(uint32_t vsrc) const; - bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type); + bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void CopyFromLine(const RegisterLine* src) { DCHECK_EQ(num_regs_, src->num_regs_); @@ -98,7 +105,7 @@ class RegisterLine { reg_to_lock_depths_ = src->reg_to_lock_depths_; } - std::string Dump() const; + std::string Dump() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); void FillWithGarbage() { memset(line_.get(), 0xf1, num_regs_ * sizeof(uint16_t)); @@ -114,7 +121,8 @@ class RegisterLine { * to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and * the new ones at the same time). */ - void MarkUninitRefsAsInvalid(const RegType& uninit_type); + void MarkUninitRefsAsInvalid(const RegType& uninit_type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Update all registers holding "uninit_type" to instead hold the corresponding initialized @@ -152,14 +160,16 @@ class RegisterLine { * The argument count is in vA, and the first argument is in vC, for both "simple" and "range" * versions. We just need to make sure vA is >= 1 and then return vC. */ - const RegType& GetInvocationThis(const DecodedInstruction& dec_insn); + const RegType& GetInvocationThis(const DecodedInstruction& dec_insn) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Verify types for a simple two-register instruction (e.g. "neg-int"). * "dst_type" is stored into vA, and "src_type" is verified against vB. */ void CheckUnaryOp(const DecodedInstruction& dec_insn, - const RegType& dst_type, const RegType& src_type); + const RegType& dst_type, const RegType& src_type) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Verify types for a simple three-register instruction (e.g. "add-int"). @@ -168,7 +178,8 @@ class RegisterLine { */ void CheckBinaryOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, - bool check_boolean_op); + bool check_boolean_op) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Verify types for a binary "2addr" operation. "src_type1"/"src_type2" @@ -177,7 +188,8 @@ class RegisterLine { void CheckBinaryOp2addr(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, - bool check_boolean_op); + bool check_boolean_op) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); /* * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8"). @@ -186,7 +198,8 @@ class RegisterLine { * If "check_boolean_op" is set, we use the constant value in vC. */ void CheckLiteralOp(const DecodedInstruction& dec_insn, - const RegType& dst_type, const RegType& src_type, bool check_boolean_op); + const RegType& dst_type, const RegType& src_type, bool check_boolean_op) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx. void PushMonitor(uint32_t reg_idx, int32_t insn_idx); @@ -203,7 +216,8 @@ class RegisterLine { // is empty, failing and returning false if not. bool VerifyMonitorStackEmpty(); - bool MergeRegisters(const RegisterLine* incoming_line); + bool MergeRegisters(const RegisterLine* incoming_line) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); size_t GetMaxNonZeroReferenceReg(size_t max_ref_reg) { size_t i = static_cast(max_ref_reg) < 0 ? 0 : max_ref_reg; diff --git a/src/well_known_classes.cc b/src/well_known_classes.cc index a0397fff21..cae7369127 100644 --- a/src/well_known_classes.cc +++ b/src/well_known_classes.cc @@ -25,6 +25,7 @@ namespace art { jclass WellKnownClasses::com_android_dex_Dex; +jclass WellKnownClasses::dalvik_system_PathClassLoader; jclass WellKnownClasses::java_lang_ClassLoader; jclass WellKnownClasses::java_lang_ClassNotFoundException; jclass WellKnownClasses::java_lang_Daemons; @@ -43,14 +44,22 @@ jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk; jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer; jmethodID WellKnownClasses::com_android_dex_Dex_create; -jmethodID WellKnownClasses::java_lang_ClassNotFoundException_init; +jmethodID WellKnownClasses::java_lang_Boolean_valueOf; +jmethodID WellKnownClasses::java_lang_Byte_valueOf; +jmethodID WellKnownClasses::java_lang_Character_valueOf; jmethodID WellKnownClasses::java_lang_ClassLoader_loadClass; +jmethodID WellKnownClasses::java_lang_ClassNotFoundException_init; jmethodID WellKnownClasses::java_lang_Daemons_requestGC; jmethodID WellKnownClasses::java_lang_Daemons_requestHeapTrim; jmethodID WellKnownClasses::java_lang_Daemons_start; +jmethodID WellKnownClasses::java_lang_Double_valueOf; +jmethodID WellKnownClasses::java_lang_Float_valueOf; +jmethodID WellKnownClasses::java_lang_Integer_valueOf; +jmethodID WellKnownClasses::java_lang_Long_valueOf; jmethodID WellKnownClasses::java_lang_ref_FinalizerReference_add; jmethodID WellKnownClasses::java_lang_ref_ReferenceQueue_add; jmethodID WellKnownClasses::java_lang_reflect_InvocationHandler_invoke; +jmethodID WellKnownClasses::java_lang_Short_valueOf; jmethodID WellKnownClasses::java_lang_Thread_init; jmethodID WellKnownClasses::java_lang_Thread_run; jmethodID WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException; @@ -102,8 +111,15 @@ static jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* return mid; } -void WellKnownClasses::Init(JNIEnv* env) { +static jmethodID CachePrimitiveBoxingMethod(JNIEnv* env, char prim_name, const char* boxed_name) { + ScopedLocalRef boxed_class(env, env->FindClass(boxed_name)); + return CacheMethod(env, boxed_class.get(), true, "valueOf", + StringPrintf("(%c)L%s;", prim_name, boxed_name).c_str()); +} + +void WellKnownClasses::InitClasses(JNIEnv* env) { com_android_dex_Dex = CacheClass(env, "com/android/dex/Dex"); + dalvik_system_PathClassLoader = CacheClass(env, "dalvik/system/PathClassLoader"); java_lang_ClassLoader = CacheClass(env, "java/lang/ClassLoader"); java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException"); java_lang_Daemons = CacheClass(env, "java/lang/Daemons"); @@ -120,6 +136,10 @@ void WellKnownClasses::Init(JNIEnv* env) { java_nio_ReadWriteDirectByteBuffer = CacheClass(env, "java/nio/ReadWriteDirectByteBuffer"); org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk"); org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer"); +} + +void WellKnownClasses::Init(JNIEnv* env) { + InitClasses(env); com_android_dex_Dex_create = CacheMethod(env, com_android_dex_Dex, true, "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;"); java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "", "(Ljava/lang/String;Ljava/lang/Throwable;)V"); @@ -161,6 +181,15 @@ void WellKnownClasses::Init(JNIEnv* env) { org_apache_harmony_dalvik_ddmc_Chunk_length = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "length", "I"); org_apache_harmony_dalvik_ddmc_Chunk_offset = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "offset", "I"); org_apache_harmony_dalvik_ddmc_Chunk_type = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "type", "I"); + + java_lang_Boolean_valueOf = CachePrimitiveBoxingMethod(env, 'Z', "java/lang/Boolean"); + java_lang_Byte_valueOf = CachePrimitiveBoxingMethod(env, 'B', "java/lang/Byte"); + java_lang_Character_valueOf = CachePrimitiveBoxingMethod(env, 'C', "java/lang/Character"); + java_lang_Double_valueOf = CachePrimitiveBoxingMethod(env, 'D', "java/lang/Double"); + java_lang_Float_valueOf = CachePrimitiveBoxingMethod(env, 'F', "java/lang/Float"); + java_lang_Integer_valueOf = CachePrimitiveBoxingMethod(env, 'I', "java/lang/Integer"); + java_lang_Long_valueOf = CachePrimitiveBoxingMethod(env, 'J', "java/lang/Long"); + java_lang_Short_valueOf = CachePrimitiveBoxingMethod(env, 'S', "java/lang/Short"); } Class* WellKnownClasses::ToClass(jclass global_jclass) { diff --git a/src/well_known_classes.h b/src/well_known_classes.h index d2c4959e11..15961e28ae 100644 --- a/src/well_known_classes.h +++ b/src/well_known_classes.h @@ -18,6 +18,7 @@ #define ART_SRC_WELL_KNOWN_CLASSES_H_ #include "jni.h" +#include "mutex.h" namespace art { @@ -28,11 +29,14 @@ class Class; // we keep them separate). struct WellKnownClasses { + static void InitClasses(JNIEnv* env); static void Init(JNIEnv* env); - static Class* ToClass(jclass global_jclass); + static Class* ToClass(jclass global_jclass) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); static jclass com_android_dex_Dex; + static jclass dalvik_system_PathClassLoader; static jclass java_lang_ClassLoader; static jclass java_lang_ClassNotFoundException; static jclass java_lang_Daemons; @@ -51,14 +55,22 @@ struct WellKnownClasses { static jclass org_apache_harmony_dalvik_ddmc_DdmServer; static jmethodID com_android_dex_Dex_create; + static jmethodID java_lang_Boolean_valueOf; + static jmethodID java_lang_Byte_valueOf; + static jmethodID java_lang_Character_valueOf; static jmethodID java_lang_ClassLoader_loadClass; static jmethodID java_lang_ClassNotFoundException_init; static jmethodID java_lang_Daemons_requestGC; static jmethodID java_lang_Daemons_requestHeapTrim; static jmethodID java_lang_Daemons_start; + static jmethodID java_lang_Double_valueOf; + static jmethodID java_lang_Float_valueOf; + static jmethodID java_lang_Integer_valueOf; + static jmethodID java_lang_Long_valueOf; static jmethodID java_lang_ref_FinalizerReference_add; static jmethodID java_lang_ref_ReferenceQueue_add; static jmethodID java_lang_reflect_InvocationHandler_invoke; + static jmethodID java_lang_Short_valueOf; static jmethodID java_lang_Thread_init; static jmethodID java_lang_Thread_run; static jmethodID java_lang_Thread$UncaughtExceptionHandler_uncaughtException; diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index ddda260cd5..0aefa97022 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -20,6 +20,7 @@ #include "class_linker.h" #include "object.h" #include "object_utils.h" +#include "scoped_thread_state_change.h" #include "thread.h" #include "jni.h" #include "verifier/gc_map.h" @@ -42,11 +43,12 @@ namespace art { struct ReferenceMap2Visitor : public StackVisitor { explicit ReferenceMap2Visitor(const ManagedStack* stack, - const std::vector* trace_stack) : - StackVisitor(stack, trace_stack, NULL) { + const std::vector* trace_stack) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + : StackVisitor(stack, trace_stack, NULL) { } - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* m = GetMethod(); if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) { return true; @@ -62,7 +64,7 @@ struct ReferenceMap2Visitor : public StackVisitor { // Enable this to dump reference map to LOG(INFO) if (false) { - ScopedThreadStateChange tsc(Thread::Current(), kRunnable); + ScopedObjectAccess ts(Thread::Current()); art::verifier::MethodVerifier::VerifyMethodAndDump(m); } const uint8_t* ref_bitmap = NULL; @@ -281,6 +283,7 @@ struct ReferenceMap2Visitor : public StackVisitor { extern "C" JNIEXPORT jint JNICALL Java_ReferenceMap_refmap(JNIEnv*, jobject, jint count) { // Visitor + ScopedObjectAccess ts(Thread::Current()); ReferenceMap2Visitor mapper(Thread::Current()->GetManagedStack(), Thread::Current()->GetTraceStack()); mapper.WalkStack(); diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 9382b8ffab..41e8d583ca 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -21,6 +21,7 @@ #include "object.h" #include "object_utils.h" #include "jni.h" +#include "scoped_thread_state_change.h" #include "verifier/gc_map.h" namespace art { @@ -41,10 +42,11 @@ static int gJava_StackWalk_refmap_calls = 0; struct TestReferenceMapVisitor : public StackVisitor { explicit TestReferenceMapVisitor(const ManagedStack* stack, const std::vector* trace_stack) + SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { } - bool VisitFrame() { + bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { Method* m = GetMethod(); CHECK(m != NULL); LOG(INFO) << "At " << PrettyMethod(m, false); @@ -100,6 +102,7 @@ struct TestReferenceMapVisitor : public StackVisitor { }; extern "C" JNIEXPORT jint JNICALL Java_StackWalk_refmap(JNIEnv*, jobject, jint count) { + ScopedObjectAccess ts(Thread::Current()); CHECK_EQ(count, 0); gJava_StackWalk_refmap_calls++; @@ -112,6 +115,7 @@ extern "C" JNIEXPORT jint JNICALL Java_StackWalk_refmap(JNIEnv*, jobject, jint c } extern "C" JNIEXPORT jint JNICALL Java_StackWalk2_refmap2(JNIEnv*, jobject, jint count) { + ScopedObjectAccess ts(Thread::Current()); gJava_StackWalk_refmap_calls++; // Visitor -- cgit v1.2.3-59-g8ed1b From b726dcb581bf72da46527378ccb6889020f0e6e9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 5 Sep 2012 08:57:23 -0700 Subject: Rename GlobalSynchronization to Locks Also address some review comments in common_throws relating to ToStr and exception detail messages. Change-Id: Ibf2c0f147689fa236d349bd7f01eed3c2522552b --- src/card_table.h | 4 +- src/check_jni.cc | 34 +-- src/class_linker.cc | 50 ++-- src/class_linker.h | 204 ++++++++-------- src/class_linker_test.cc | 22 +- src/common_test.h | 10 +- src/common_throws.cc | 17 +- src/common_throws.h | 26 +-- src/compiler.cc | 26 +-- src/compiler.h | 48 ++-- src/compiler_llvm/runtime_support_llvm.cc | 94 ++++---- src/compiler_test.cc | 4 +- src/debugger.cc | 114 ++++----- src/debugger.h | 158 ++++++------- src/dex2oat.cc | 16 +- src/dex_cache.h | 44 ++-- src/dex_file.h | 6 +- src/heap.cc | 67 +++--- src/heap.h | 48 ++-- src/heap_bitmap.h | 12 +- src/hprof/hprof.cc | 22 +- src/image.h | 2 +- src/image_writer.cc | 20 +- src/image_writer.h | 44 ++-- src/indirect_reference_table.h | 4 +- src/intern_table.h | 18 +- src/intern_table_test.cc | 2 +- src/jdwp/jdwp.h | 40 ++-- src/jdwp/jdwp_event.cc | 2 +- src/jdwp/jdwp_handler.cc | 148 ++++++------ src/jdwp/jdwp_main.cc | 8 +- src/jni_compiler_test.cc | 24 +- src/jni_internal.cc | 56 ++--- src/jni_internal.h | 12 +- src/jni_internal_test.cc | 26 +-- src/logging.cc | 4 +- src/logging.h | 8 +- src/mark_sweep.cc | 20 +- src/mark_sweep.h | 149 ++++++------ src/mod_union_table.cc | 18 +- src/mod_union_table.h | 20 +- src/monitor.cc | 10 +- src/monitor.h | 36 +-- src/mutex.cc | 20 +- src/mutex.h | 2 +- src/native/dalvik_system_DexFile.cc | 2 +- src/native/dalvik_system_VMRuntime.cc | 2 +- src/native/java_lang_Class.cc | 8 +- src/native/java_lang_System.cc | 2 +- src/native/java_lang_Thread.cc | 12 +- src/native/java_lang_reflect_Array.cc | 2 +- src/native/java_lang_reflect_Field.cc | 6 +- ...org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 6 +- src/oat/runtime/callee_save_frame.h | 4 +- src/oat/runtime/stub.h | 18 +- src/oat/runtime/support_alloc.cc | 12 +- src/oat/runtime/support_cast.cc | 6 +- src/oat/runtime/support_debug.cc | 4 +- src/oat/runtime/support_dexcache.cc | 8 +- src/oat/runtime/support_field.cc | 24 +- src/oat/runtime/support_fillarray.cc | 2 +- src/oat/runtime/support_invoke.cc | 14 +- src/oat/runtime/support_jni.cc | 16 +- src/oat/runtime/support_proxy.cc | 2 +- src/oat/runtime/support_stubs.cc | 8 +- src/oat/runtime/support_thread.cc | 4 +- src/oat/runtime/support_throw.cc | 14 +- src/oat_test.cc | 2 +- src/oat_writer.h | 12 +- src/oatdump.cc | 26 +-- src/object.cc | 4 +- src/object.h | 260 ++++++++++----------- src/object_test.cc | 2 +- src/object_utils.h | 120 +++++----- src/reference_table.cc | 4 +- src/reference_table.h | 4 +- src/reflection.cc | 6 +- src/reflection.h | 14 +- src/runtime.cc | 10 +- src/runtime.h | 24 +- src/runtime_linux.cc | 2 +- src/runtime_support.h | 22 +- src/scoped_thread_state_change.h | 50 ++-- src/signal_catcher.cc | 6 +- src/signal_catcher.h | 6 +- src/space.h | 4 +- src/space_bitmap.cc | 4 +- src/space_bitmap.h | 8 +- src/stack.h | 22 +- src/thread.cc | 76 +++--- src/thread.h | 116 ++++----- src/thread_list.cc | 92 ++++---- src/thread_list.h | 74 +++--- src/trace.cc | 12 +- src/trace.h | 8 +- src/utf.h | 2 +- src/utils.h | 16 +- src/verifier/method_verifier.cc | 2 +- src/verifier/method_verifier.h | 62 ++--- src/verifier/method_verifier_test.cc | 4 +- src/verifier/reg_type.cc | 2 +- src/verifier/reg_type.h | 20 +- src/verifier/reg_type_cache.h | 42 ++-- src/verifier/register_line.cc | 2 +- src/verifier/register_line.h | 30 +-- src/well_known_classes.h | 2 +- test/ReferenceMap/stack_walk_refmap_jni.cc | 4 +- test/StackWalk/stack_walk_jni.cc | 4 +- 108 files changed, 1535 insertions(+), 1542 deletions(-) (limited to 'src/native/java_lang_System.cc') diff --git a/src/card_table.h b/src/card_table.h index a6284e3887..9dc720184a 100644 --- a/src/card_table.h +++ b/src/card_table.h @@ -76,8 +76,8 @@ class CardTable { template void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor, const FingerVisitor& finger_visitor) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(bitmap->HasAddress(scan_begin)); DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan. byte* card_cur = CardFromAddr(scan_begin); diff --git a/src/check_jni.cc b/src/check_jni.cc index 0e521af5aa..c53a1e31dc 100644 --- a/src/check_jni.cc +++ b/src/check_jni.cc @@ -124,7 +124,7 @@ static const char* gBuiltInPrefixes[] = { }; static bool ShouldTrace(JavaVMExt* vm, const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages // when a native method that matches the -Xjnitrace argument calls a JNI function // such as NewByteArray. @@ -151,7 +151,7 @@ class ScopedCheck { public: // For JNIEnv* functions. explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : soa_(env) { Init(flags, functionName, true); CheckThread(flags); @@ -160,12 +160,12 @@ class ScopedCheck { // For JavaVM* functions. // TODO: it's not correct that this is a lock function, but making it so aids annotalysis. explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : soa_(vm) { Init(kFlag_Invocation, functionName, has_method); } - ~ScopedCheck() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {} + ~ScopedCheck() UNLOCK_FUNCTION(Locks::mutator_lock_) {} const ScopedObjectAccess& soa() { return soa_; @@ -195,7 +195,7 @@ class ScopedCheck { * Works for both static and instance fields. */ void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* f = CheckFieldID(fid); if (f == NULL) { return; @@ -241,7 +241,7 @@ class ScopedCheck { * Assumes "jobj" has already been validated. */ void CheckInstanceFieldID(jobject java_object, jfieldID fid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = soa_.Decode(java_object); if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) { JniAbortF(function_name_, "field operation on invalid %s: %p", @@ -275,7 +275,7 @@ class ScopedCheck { * 'expectedType' will be "L" for all objects, including arrays. */ void CheckSig(jmethodID mid, const char* expectedType, bool isStatic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -301,7 +301,7 @@ class ScopedCheck { * Assumes "java_class" has already been validated. */ void CheckStaticFieldID(jclass java_class, jfieldID fid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa_.Decode(java_class); const Field* f = CheckFieldID(fid); if (f == NULL) { @@ -323,7 +323,7 @@ class ScopedCheck { * Instances of "java_class" must be instances of the method's declaring class. */ void CheckStaticMethod(jclass java_class, jmethodID mid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -343,7 +343,7 @@ class ScopedCheck { * will be handled automatically by the instanceof check.) */ void CheckVirtualMethod(jobject java_object, jmethodID mid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -392,7 +392,7 @@ class ScopedCheck { * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ void Check(bool entry, const char* fmt0, ...) - SHARED_LOCKS_REQUIRED (GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) { va_list ap; const Method* traceMethod = NULL; @@ -594,7 +594,7 @@ class ScopedCheck { * to "running" mode before doing the checks. */ bool CheckInstance(InstanceKind kind, jobject java_object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* what = NULL; switch (kind) { case kClass: @@ -668,7 +668,7 @@ class ScopedCheck { * * Since we're dealing with objects, switch to "running" mode. */ - void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (java_array == NULL) { JniAbortF(function_name_, "jarray was NULL"); return; @@ -689,7 +689,7 @@ class ScopedCheck { } } - Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (fid == NULL) { JniAbortF(function_name_, "jfieldID was NULL"); return NULL; @@ -702,7 +702,7 @@ class ScopedCheck { return f; } - Method* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Method* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (mid == NULL) { JniAbortF(function_name_, "jmethodID was NULL"); return NULL; @@ -722,7 +722,7 @@ class ScopedCheck { * Switches to "running" mode before performing checks. */ void CheckObject(jobject java_object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (java_object == NULL) { return; } @@ -745,7 +745,7 @@ class ScopedCheck { } } - void CheckThread(int flags) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void CheckThread(int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); if (self == NULL) { JniAbortF(function_name_, "a thread (tid %d) is making JNI calls without being attached", GetTid()); diff --git a/src/class_linker.cc b/src/class_linker.cc index f6fb4701a9..3c52370f21 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -60,7 +60,7 @@ namespace art { static void ThrowNoClassDefFoundError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -70,7 +70,7 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) { static void ThrowClassFormatError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowClassFormatError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -80,7 +80,7 @@ static void ThrowClassFormatError(const char* fmt, ...) { static void ThrowLinkageError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowLinkageError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -90,7 +90,7 @@ static void ThrowLinkageError(const char* fmt, ...) { static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const StringPiece& type, const StringPiece& name) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(c); std::ostringstream msg; msg << "No " << scope << "field " << name << " of type " << type @@ -104,7 +104,7 @@ static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const Stri static void ThrowNullPointerException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowNullPointerException(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -113,7 +113,7 @@ static void ThrowNullPointerException(const char* fmt, ...) { } static void ThrowEarlierClassFailure(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // The class failed to initialize on a previous attempt, so we want to throw // a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we // failed in verification, in which case v2 5.4.1 says we need to re-throw @@ -134,7 +134,7 @@ static void ThrowEarlierClassFailure(Class* c) } static void WrapExceptionInInitializer() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); @@ -916,7 +916,7 @@ void ClassLinker::InitFromImage() { // reinit clases_ table { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); const Spaces& vec = heap->GetSpaces(); // TODO: C++0x auto @@ -985,7 +985,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { } { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { visitor(it->second, arg); @@ -999,7 +999,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { } void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { if (!visitor(it->second, arg)) { @@ -1124,7 +1124,7 @@ ObjectArray* ClassLinker::AllocStackTraceElementArray(size_t } static Class* EnsureResolved(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass != NULL); // Wait for the class if it has not already been linked. Thread* self = Thread::Current(); @@ -1456,7 +1456,7 @@ void ClassLinker::FixupStaticTrampolines(Class* klass) { static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class, uint32_t method_index) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Every kind of method should at least get an invoke stub from the oat_method. // non-abstract methods also get their code pointers. const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index); @@ -1891,7 +1891,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo LOG(INFO) << "Loaded class " << descriptor << source; } size_t hash = StringPieceHash()(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); Table& classes = image_class ? image_classes_ : classes_; Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes); #ifndef NDEBUG @@ -1908,7 +1908,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_loader) { size_t hash = Hash(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh; @@ -1933,7 +1933,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_l Class* ClassLinker::LookupClass(const char* descriptor, const ClassLoader* class_loader) { size_t hash = Hash(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); // TODO: determine if its better to search classes_ or image_classes_ first Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_); if (klass != NULL) { @@ -1968,7 +1968,7 @@ Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* void ClassLinker::LookupClasses(const char* descriptor, std::vector& classes) { classes.clear(); size_t hash = Hash(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh(NULL, this); @@ -1990,7 +1990,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector& cla #if !defined(NDEBUG) && !defined(ART_USE_LLVM_COMPILER) static void CheckMethodsHaveGcMaps(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!Runtime::Current()->IsStarted()) { return; } @@ -2339,7 +2339,7 @@ Method* ClassLinker::CreateProxyConstructor(SirtRef& klass, Class* proxy_ } static void CheckProxyConstructor(Method* constructor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(constructor->IsConstructor()); MethodHelper mh(constructor); CHECK_STREQ(mh.GetName(), ""); @@ -2378,7 +2378,7 @@ Method* ClassLinker::CreateProxyMethod(SirtRef& klass, SirtRef& p } static void CheckProxyMethod(Method* method, SirtRef& prototype) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Basic sanity CHECK(!prototype->IsFinal()); CHECK(method->IsFinal()); @@ -2526,7 +2526,7 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in } bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { while (true) { self->AssertNoPendingException(); lock.Wait(); @@ -3138,7 +3138,7 @@ bool ClassLinker::LinkStaticFields(SirtRef& klass) { struct LinkFieldsComparator { explicit LinkFieldsComparator(FieldHelper* fh) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : fh_(fh) {} // No thread safety analysis as will be called from STL. Checked lock held in constructor. bool operator()(const Field* field1, const Field* field2) NO_THREAD_SAFETY_ANALYSIS { @@ -3636,7 +3636,7 @@ void ClassLinker::DumpAllClasses(int flags) const { // lock held, because it might need to resolve a field's type, which would try to take the lock. std::vector all_classes; { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { all_classes.push_back(it->second); @@ -3652,18 +3652,18 @@ void ClassLinker::DumpAllClasses(int flags) const { } void ClassLinker::DumpForSigQuit(std::ostream& os) const { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); os << "Loaded classes: " << image_classes_.size() << " image classes; " << classes_.size() << " allocated classes\n"; } size_t ClassLinker::NumLoadedClasses() const { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); return classes_.size() + image_classes_.size(); } pid_t ClassLinker::GetClassesLockOwner() { - return GlobalSynchronization::classlinker_classes_lock_->GetExclusiveOwnerTid(); + return Locks::classlinker_classes_lock_->GetExclusiveOwnerTid(); } pid_t ClassLinker::GetDexLockOwner() { diff --git a/src/class_linker.h b/src/class_linker.h index eed9f6a1a9..c21ff71951 100644 --- a/src/class_linker.h +++ b/src/class_linker.h @@ -46,60 +46,60 @@ class ClassLinker { // Creates the class linker by boot strapping from dex files. static ClassLinker* CreateFromCompiler(const std::vector& boot_class_path, InternTable* intern_table) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Creates the class linker from an image. static ClassLinker* CreateFromImage(InternTable* intern_table) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ~ClassLinker(); // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. Class* FindClass(const char* descriptor, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* FindSystemClass(const char* descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Define a new a class based on a ClassDef from a DexFile Class* DefineClass(const StringPiece& descriptor, ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded // by the given 'class_loader'. Class* LookupClass(const char* descriptor, const ClassLoader* class_loader) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. void LookupClasses(const char* descriptor, std::vector& classes) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. bool RemoveClass(const char* descriptor, const ClassLoader* class_loader) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpAllClasses(int flags) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); - size_t NumLoadedClasses() const LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + size_t NumLoadedClasses() const LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. String* ResolveString(uint32_t string_idx, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx); if (UNLIKELY(resolved_string == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -113,13 +113,13 @@ class ClassLinker { // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. String* ResolveString(const DexFile& dex_file, uint32_t string_idx, DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, const Class* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveType(dex_file, type_idx, referrer->GetDexCache(), @@ -130,7 +130,7 @@ class ClassLinker { // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. Class* ResolveType(uint16_t type_idx, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx); if (UNLIKELY(resolved_type == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -143,7 +143,7 @@ class ClassLinker { } Class* ResolveType(uint16_t type_idx, const Field* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* declaring_class = referrer->GetDeclaringClass(); DexCache* dex_cache = declaring_class->GetDexCache(); Class* resolved_type = dex_cache->GetResolvedType(type_idx); @@ -163,7 +163,7 @@ class ClassLinker { uint16_t type_idx, DexCache* dex_cache, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -176,10 +176,10 @@ class ClassLinker { ClassLoader* class_loader, const Method* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* ResolveMethod(uint32_t method_idx, const Method* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* resolved_method = referrer->GetDexCacheResolvedMethods()->Get(method_idx); if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -192,7 +192,7 @@ class ClassLinker { } Field* ResolveField(uint32_t field_idx, const Method* referrer, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { @@ -215,7 +215,7 @@ class ClassLinker { DexCache* dex_cache, ClassLoader* class_loader, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -225,28 +225,28 @@ class ClassLinker { uint32_t field_idx, DexCache* dex_cache, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get shorty from method index without resolution. Used to do handlerization. const char* MethodShorty(uint32_t method_idx, Method* referrer, uint32_t* length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no execution is possible. bool EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initializes classes that have instances in the image but that have // methods so they could not be initialized by the compiler. - void RunRootClinits() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterOatFile(const OatFile& oat_file) LOCKS_EXCLUDED(dex_lock_); @@ -256,26 +256,26 @@ class ClassLinker { } void VisitClasses(ClassVisitor* visitor, void* arg) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); // Less efficient variant of VisitClasses that doesn't hold the classlinker_classes_lock_ // when calling the visitor. void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_, dex_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_); const DexFile& FindDexFile(const DexCache* dex_cache) const LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DexCache* FindDexCache(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_); void FixupDexCaches(Method* resolution_method) const LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate an oat file from a dex file bool GenerateOatFile(const std::string& dex_filename, @@ -302,51 +302,51 @@ class ClassLinker { // does not match the OatFile. const DexFile* FindDexFileInOatFileFromDexLocation(const std::string& location) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if oat file contains the dex file with the given location and checksum static bool VerifyOatFileChecksums(const OatFile* oat_file, const std::string& dex_location, uint32_t dex_location_checksum) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: replace this with multiple methods that allocate the correct managed type. template ObjectArray* AllocObjectArray(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ObjectArray::Alloc(GetClassRoot(kObjectArrayClass), length); } ObjectArray* AllocClassArray(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ObjectArray::Alloc(GetClassRoot(kClassArrayClass), length); } ObjectArray* AllocStackTraceElementArray(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, Class::Status& oat_file_class_status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, Method* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* CreateProxyClass(String* name, ObjectArray* interfaces, ClassLoader* loader, ObjectArray* methods, ObjectArray >* throws) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string GetDescriptorForProxy(const Class* proxy_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindMethodForProxy(const Class* proxy_class, const Method* proxy_method) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized const void* GetOatCodeFor(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Relocate the OatFiles (ELF images) void RelocateExecutable() LOCKS_EXCLUDED(dex_lock_); @@ -358,59 +358,59 @@ class ClassLinker { explicit ClassLinker(InternTable*); const OatFile::OatMethod GetOatMethodFor(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker by bootstraping from dex files void InitFromCompiler(const std::vector& boot_class_path) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker from one or more images. - void InitFromImage() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); OatFile* OpenOat(const ImageSpace* space) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void InitFromImageCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FinishInit() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For early bootstrapping by Init Class* AllocClass(Class* java_lang_Class, size_t class_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Alloc* convenience functions to avoid needing to pass in Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. - Class* AllocClass(size_t class_size) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Class* AllocClass(size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DexCache* AllocDexCache(const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* AllocField() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* AllocMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Field* AllocField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Method* AllocMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); InterfaceEntry* AllocInterfaceEntry(Class* interface) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* CreatePrimitiveClass(const char* descriptor, Primitive::Type type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return InitializePrimitiveClass(AllocClass(sizeof(Class)), descriptor, type); } Class* InitializePrimitiveClass(Class* primitive_class, const char* descriptor, Primitive::Type type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, Class* c, SafeMap& field_map) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t SizeOfClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); @@ -419,17 +419,17 @@ class ClassLinker { const DexFile::ClassDef& dex_class_def, SirtRef& klass, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef& klass, SirtRef& dst); void LoadMethod(const DexFile& dex_file, const ClassDataItemIterator& dex_method, SirtRef& klass, SirtRef& dst) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupStaticTrampolines(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor); @@ -438,70 +438,70 @@ class ClassLinker { // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock); bool ValidateSuperClassDescriptors(const Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize static fields, returns true if fields were initialized. bool InitializeStaticFields(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameDescriptorInDifferentClassContexts(const char* descriptor, const Class* klass1, const Class* klass2) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameMethodSignatureInDifferentClassContexts(const Method* descriptor, const Class* klass1, const Class* klass2) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkClass(SirtRef& klass, ObjectArray* interfaces) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkSuperClass(SirtRef& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkMethods(SirtRef& klass, ObjectArray* interfaces) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkVirtualMethods(SirtRef& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkInterfaceMethods(SirtRef& klass, ObjectArray* interfaces) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkStaticFields(SirtRef& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkInstanceFields(SirtRef& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkFields(SirtRef& klass, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(SirtRef& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceStaticOffsets(SirtRef& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceOffsets(SirtRef& klass, bool is_static, uint32_t reference_offsets) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots const std::vector& GetDexCaches() { @@ -518,12 +518,12 @@ class ClassLinker { const std::string& dex_location, uint32_t dex_location_checksum) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateProxyConstructor(SirtRef& klass, Class* proxy_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateProxyMethod(SirtRef& klass, SirtRef& prototype) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::vector boot_class_path_; @@ -537,13 +537,13 @@ class ClassLinker { // Class* instances. Results should be compared for a matching // Class::descriptor_ and Class::class_loader_. typedef std::multimap Table; - Table image_classes_ GUARDED_BY(GlobalSynchronization::classlinker_classes_lock_); - Table classes_ GUARDED_BY(GlobalSynchronization::classlinker_classes_lock_); + Table image_classes_ GUARDED_BY(Locks::classlinker_classes_lock_); + Table classes_ GUARDED_BY(Locks::classlinker_classes_lock_); Class* LookupClassLocked(const char* descriptor, const ClassLoader* class_loader, size_t hash, const Table& classes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::classlinker_classes_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::classlinker_classes_lock_); // indexes into class_roots_. // needs to be kept in sync with class_roots_descriptors_. @@ -585,7 +585,7 @@ class ClassLinker { ObjectArray* class_roots_; Class* GetClassRoot(ClassRoot class_root) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(class_roots_ != NULL); Class* klass = class_roots_->Get(class_root); DCHECK(klass != NULL); @@ -593,7 +593,7 @@ class ClassLinker { } void SetClassRoot(ClassRoot class_root, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray* GetClassRoots() { DCHECK(class_roots_ != NULL); diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc index 1eb5e0d1c4..f249c6a1c2 100644 --- a/src/class_linker_test.cc +++ b/src/class_linker_test.cc @@ -30,7 +30,7 @@ namespace art { class ClassLinkerTest : public CommonTest { protected: void AssertNonExistentClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { EXPECT_TRUE(class_linker_->FindSystemClass(descriptor.c_str()) == NULL); Thread* self = Thread::Current(); EXPECT_TRUE(self->IsExceptionPending()); @@ -41,12 +41,12 @@ class ClassLinkerTest : public CommonTest { } void AssertPrimitiveClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(descriptor.c_str())); } void AssertPrimitiveClass(const std::string& descriptor, const Class* primitive) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper primitive_ch(primitive); ASSERT_TRUE(primitive != NULL); ASSERT_TRUE(primitive->GetClass() != NULL); @@ -83,7 +83,7 @@ class ClassLinkerTest : public CommonTest { void AssertArrayClass(const std::string& array_descriptor, const std::string& component_type, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* array = class_linker_->FindClass(array_descriptor.c_str(), class_loader); ClassHelper array_component_ch(array->GetComponentType()); EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor()); @@ -92,7 +92,7 @@ class ClassLinkerTest : public CommonTest { } void AssertArrayClass(const std::string& array_descriptor, Class* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(array); ASSERT_TRUE(array != NULL); ASSERT_TRUE(array->GetClass() != NULL); @@ -135,7 +135,7 @@ class ClassLinkerTest : public CommonTest { EXPECT_STREQ(kh.GetDescriptor(), "Ljava/io/Serializable;"); } - void AssertMethod(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void AssertMethod(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(method); EXPECT_TRUE(method != NULL); EXPECT_TRUE(method->GetClass() != NULL); @@ -157,7 +157,7 @@ class ClassLinkerTest : public CommonTest { } void AssertField(Class* klass, Field* field) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FieldHelper fh(field); EXPECT_TRUE(field != NULL); EXPECT_TRUE(field->GetClass() != NULL); @@ -167,7 +167,7 @@ class ClassLinkerTest : public CommonTest { } void AssertClass(const std::string& descriptor, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(klass); EXPECT_STREQ(descriptor.c_str(), kh.GetDescriptor()); if (descriptor == "Ljava/lang/Object;") { @@ -291,7 +291,7 @@ class ClassLinkerTest : public CommonTest { } void AssertDexFileClass(ClassLoader* class_loader, const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); ASSERT_TRUE(klass != NULL); @@ -307,7 +307,7 @@ class ClassLinkerTest : public CommonTest { } void AssertDexFile(const DexFile* dex, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(dex != NULL); // Verify all the classes defined in this file @@ -350,7 +350,7 @@ struct CheckOffsets { std::string class_descriptor; std::vector offsets; - bool Check() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool Check() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* klass = Runtime::Current()->GetClassLinker()->FindSystemClass(class_descriptor.c_str()); CHECK(klass != NULL) << class_descriptor; diff --git a/src/common_test.h b/src/common_test.h index 4424d91059..dee40e301e 100644 --- a/src/common_test.h +++ b/src/common_test.h @@ -209,7 +209,7 @@ class CommonTest : public testing::Test { ); } - void MakeExecutable(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void MakeExecutable(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); MethodHelper mh(method); @@ -452,7 +452,7 @@ class CommonTest : public testing::Test { } jobject LoadDex(const char* dex_name) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* dex_file = OpenTestDexFile(dex_name); CHECK(dex_file != NULL); class_linker_->RegisterDexFile(*dex_file); @@ -479,7 +479,7 @@ class CommonTest : public testing::Test { } } - void CompileMethod(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void CompileMethod(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); compiler_->CompileOne(method); MakeExecutable(method); @@ -491,7 +491,7 @@ class CommonTest : public testing::Test { const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; @@ -505,7 +505,7 @@ class CommonTest : public testing::Test { const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; diff --git a/src/common_throws.cc b/src/common_throws.cc index 5bd30b4ec0..758e03b1ff 100644 --- a/src/common_throws.cc +++ b/src/common_throws.cc @@ -27,7 +27,7 @@ namespace art { static void AddReferrerLocation(std::ostream& os, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { ClassHelper kh(referrer->GetDeclaringClass()); std::string location(kh.GetLocation()); @@ -38,7 +38,7 @@ static void AddReferrerLocation(std::ostream& os, const Method* referrer) } static void AddReferrerLocationFromClass(std::ostream& os, Class* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { ClassHelper kh(referrer); std::string location(kh.GetLocation()); @@ -63,7 +63,7 @@ void ThrowNullPointerExceptionForMethodAccess(Method* caller, uint32_t method_id DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = Runtime::Current()->GetClassLinker()->FindDexFile(dex_cache); std::ostringstream msg; - msg << "Attempt to invoke " << ToStr(type).str() << " method '" + msg << "Attempt to invoke " << type << " method '" << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference"; Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str()); } @@ -131,6 +131,8 @@ void ThrowNullPointerExceptionFromDexPC(Method* throw_method, uint32_t dex_pc) { "Attempt to get length of null array"); break; default: { + // TODO: We should have covered all the cases where we expect a NPE above, this + // message/logging is so we can improve any cases we've missed in the future. const DexFile& dex_file = Runtime::Current()->GetClassLinker() ->FindDexFile(throw_method->GetDeclaringClass()->GetDexCache()); std::string message("Null pointer exception during instruction '"); @@ -146,7 +148,7 @@ void ThrowNullPointerExceptionFromDexPC(Method* throw_method, uint32_t dex_pc) { void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) { std::ostringstream msg; - msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' -> '" + msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '" << PrettyDescriptor(accessed) << "'"; AddReferrerLocationFromClass(msg, referrer); Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); @@ -157,8 +159,8 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* acces const Method* called, InvokeType type) { std::ostringstream msg; - msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' -> '" - << PrettyDescriptor(accessed) << "') in attempt to invoke " << ToStr(type).str() + msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '" + << PrettyDescriptor(accessed) << "') in attempt to invoke " << type << " method " << PrettyMethod(called).c_str(); AddReferrerLocation(msg, caller); Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); @@ -220,7 +222,8 @@ void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is const Method* referrer) { std::ostringstream msg; msg << "Expected '" << PrettyField(resolved_field) << "' to be a " - << (is_static ? "static" : "instance") << " field"; + << (is_static ? "static" : "instance") << " field" << " rather than a " + << (is_static ? "instance" : "static") << " field"; AddReferrerLocation(msg, referrer); Thread::Current()->ThrowNewException("Ljava/lang/IncompatibleClassChangeError;", msg.str().c_str()); diff --git a/src/common_throws.h b/src/common_throws.h index 47186b04f2..ca2211f007 100644 --- a/src/common_throws.h +++ b/src/common_throws.h @@ -25,56 +25,56 @@ namespace art { // NullPointerException void ThrowNullPointerExceptionForFieldAccess(Field* field, bool is_read) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNullPointerExceptionForMethodAccess(Method* caller, uint32_t method_idx, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNullPointerExceptionFromDexPC(Method* throw_method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IllegalAccessError void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* accessed, const Method* caller, const Method* called, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorMethod(Class* referrer, Method* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorFinalField(const Method* referrer, Field* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, Method* method, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const Method* interface_method, Object* this_object, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is_static, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // NoSuchMethodError void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name, const StringPiece& signature, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNoSuchMethodError(uint32_t method_idx, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/compiler.cc b/src/compiler.cc index b05e688fd2..69de8caaba 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -669,7 +669,7 @@ bool Compiler::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, static Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, OatCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); const DexFile::MethodId& referrer_method_id = mUnit->dex_file_->GetMethodId(mUnit->method_idx_); @@ -680,7 +680,7 @@ static Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, static Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa, OatCompilationUnit* mUnit, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); return mUnit->class_linker_->ResolveField(*mUnit->dex_file_, field_idx, dex_cache, @@ -691,7 +691,7 @@ static Method* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& so OatCompilationUnit* mUnit, uint32_t method_idx, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); return mUnit->class_linker_->ResolveMethod(*mUnit->dex_file_, method_idx, dex_cache, @@ -1026,7 +1026,7 @@ class WorkerThread { } private: - static void* Go(void* arg) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + static void* Go(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_) { WorkerThread* worker = reinterpret_cast(arg); Runtime* runtime = Runtime::Current(); if (worker->spawn_) { @@ -1039,11 +1039,11 @@ class WorkerThread { return NULL; } - void Go() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + void Go() LOCKS_EXCLUDED(Locks::mutator_lock_) { Go(this); } - void Run() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + void Run() LOCKS_EXCLUDED(Locks::mutator_lock_) { Thread* self = Thread::Current(); for (size_t i = begin_; i < end_; i += stripe_) { callback_(context_, i); @@ -1066,7 +1066,7 @@ class WorkerThread { static void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback, size_t thread_count) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { Thread* self = Thread::Current(); self->AssertNoPendingException(); CHECK_GT(thread_count, 0U); @@ -1080,7 +1080,7 @@ static void ForAll(CompilationContext* context, size_t begin, size_t end, Callba // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker // thread destructor's called below perform join). { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); } STLDeleteElements(&threads); @@ -1096,7 +1096,7 @@ static void ForAll(CompilationContext* context, size_t begin, size_t end, Callba static bool SkipClass(ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (class_loader == NULL) { return false; } @@ -1113,7 +1113,7 @@ static bool SkipClass(ClassLoader* class_loader, } static void ResolveClassFieldsAndMethods(const CompilationContext* context, size_t class_def_index) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(Thread::Current()); ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); const DexFile& dex_file = *context->GetDexFile(); @@ -1182,7 +1182,7 @@ static void ResolveClassFieldsAndMethods(const CompilationContext* context, size } static void ResolveType(const CompilationContext* context, size_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { // Class derived values are more complicated, they require the linker and loader. ScopedObjectAccess soa(Thread::Current()); ClassLinker* class_linker = context->GetClassLinker(); @@ -1221,7 +1221,7 @@ void Compiler::Verify(jobject class_loader, const std::vector& d } static void VerifyClass(const CompilationContext* context, size_t class_def_index) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(Thread::Current()); const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def); @@ -1474,7 +1474,7 @@ void ForClassesInAllDexFiles(CompilationContext* worker_context, // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker // thread destructor's called below perform join). { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); } STLDeleteElements(&threads); diff --git a/src/compiler.h b/src/compiler.h index 4a7fc2a820..c27bf956e8 100644 --- a/src/compiler.h +++ b/src/compiler.h @@ -53,11 +53,11 @@ class Compiler { ~Compiler(); void CompileAll(jobject class_loader, const std::vector& dex_files) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Compile a single Method void CompileOne(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDebuggingSupported() { return support_debugging_; @@ -73,16 +73,16 @@ class Compiler { // Stub to throw AbstractMethodError static ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate the trampoline that's invoked by unresolved direct methods static ByteArray* CreateResolutionStub(InstructionSet instruction_set, Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile typedef std::pair ClassReference; @@ -107,38 +107,38 @@ class Compiler { // Callbacks from compiler to see what runtime checks must be generated. bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Are runtime access checks necessary in the compiled code? bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Can we fast path instance field access? Computes field's offset and volatility. bool ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, bool& is_volatile, bool is_put) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Can we fastpath static field access? Computes field's offset, volatility and whether the // field is within the referrer (which can avoid checking class initialization). bool ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, int& ssb_index, bool& is_referrers_class, bool& is_volatile, bool is_put) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Can we fastpath a interface, super class or virtual method call? Computes method's vtable // index. bool ComputeInvokeInfo(uint32_t method_idx, OatCompilationUnit* mUnit, InvokeType& type, int& vtable_idx, uintptr_t& direct_code, uintptr_t& direct_method) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Record patch information for later fix up. void AddCodePatch(const DexFile* dex_file, @@ -231,14 +231,14 @@ class Compiler { // Compute constant code and method pointers when possible void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, Method* method, uintptr_t& direct_code, uintptr_t& direct_method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Checks if class specified by type_idx is one of the image_classes_ bool IsImageClass(const std::string& descriptor) const; void PreCompile(jobject class_loader, const std::vector& dex_files, TimingLogger& timings) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void PostCompile(jobject class_loader, const std::vector& dex_files); // Attempt to resolve all type, methods, fields, and strings @@ -246,37 +246,37 @@ class Compiler { // ordering semantics. void Resolve(jobject class_loader, const std::vector& dex_files, TimingLogger& timings) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, TimingLogger& timings) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void Verify(jobject class_loader, const std::vector& dex_files); void VerifyDexFile(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClassesWithoutClinit(jobject class_loader, const std::vector& dex_files) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClassesWithoutClinit(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, compiled_classes_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_); void Compile(jobject class_loader, const std::vector& dex_files); void CompileDexFile(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t method_idx, jobject class_loader, const DexFile& dex_file) LOCKS_EXCLUDED(compiled_methods_lock_); static void CompileClass(const CompilationContext* context, size_t class_def_index) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SetGcMaps(jobject class_loader, const std::vector& dex_files) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SetGcMapsDexFile(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SetGcMapsMethod(const DexFile& dex_file, Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void InsertInvokeStub(const std::string& key, const CompiledInvokeStub* compiled_invoke_stub) LOCKS_EXCLUDED(compiled_invoke_stubs_lock_); diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc index dd56ac896c..4384e87331 100644 --- a/src/compiler_llvm/runtime_support_llvm.cc +++ b/src/compiler_llvm/runtime_support_llvm.cc @@ -85,7 +85,7 @@ void art_unlock_object_from_code(Object* obj, Thread* thread) } void art_test_suspend_from_code(Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { thread->FullSuspendCheck(); } @@ -115,21 +115,21 @@ bool art_is_exception_pending_from_code() { } void art_throw_div_zero_from_code() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); } void art_throw_array_bounds_from_code(int32_t index, int32_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "length=%d; index=%d", length, index); } void art_throw_no_such_method_from_code(int32_t method_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); // We need the calling method as context for the method_idx Method* method = thread->GetCurrentMethod(); @@ -137,7 +137,7 @@ void art_throw_no_such_method_from_code(int32_t method_idx) } void art_throw_null_pointer_exception_from_code(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); NthCallerVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), 0); visitor.WalkStack(); @@ -146,7 +146,7 @@ void art_throw_null_pointer_exception_from_code(uint32_t dex_pc) } void art_throw_stack_overflow_from_code() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); if (Runtime::Current()->IsMethodTracingActive()) { TraceMethodUnwindFromCode(thread); @@ -158,7 +158,7 @@ void art_throw_stack_overflow_from_code() } void art_throw_exception_from_code(Object* exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); if (exception == NULL) { thread->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception"); @@ -169,7 +169,7 @@ void art_throw_exception_from_code(Object* exception) int32_t art_find_catch_block_from_code(Method* current_method, uint32_t ti_offset) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); Class* exception_type = thread->GetException()->GetClass(); MethodHelper mh(current_method); @@ -208,14 +208,14 @@ int32_t art_find_catch_block_from_code(Method* current_method, Object* art_alloc_object_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectFromCode(type_idx, referrer, thread, false); } Object* art_alloc_object_from_code_with_access_check(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectFromCode(type_idx, referrer, thread, true); } @@ -223,7 +223,7 @@ Object* art_alloc_array_from_code(uint32_t type_idx, Method* referrer, uint32_t length, Thread* /*thread*/) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocArrayFromCode(type_idx, referrer, length, false); } @@ -231,7 +231,7 @@ Object* art_alloc_array_from_code_with_access_check(uint32_t type_idx, Method* referrer, uint32_t length, Thread* /*thread*/) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocArrayFromCode(type_idx, referrer, length, true); } @@ -239,7 +239,7 @@ Object* art_check_and_alloc_array_from_code(uint32_t type_idx, Method* referrer, uint32_t length, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); } @@ -247,13 +247,13 @@ Object* art_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, Method* referrer, uint32_t length, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); } static Method* FindMethodHelper(uint32_t method_idx, Object* this_object, Method* caller_method, bool access_check, InvokeType type, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { method = FindMethodFromCode(method_idx, this_object, caller_method, @@ -279,7 +279,7 @@ Object* art_find_static_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); } @@ -287,7 +287,7 @@ Object* art_find_direct_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); } @@ -295,7 +295,7 @@ Object* art_find_virtual_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); } @@ -303,7 +303,7 @@ Object* art_find_super_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); } @@ -312,7 +312,7 @@ art_find_interface_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); } @@ -320,40 +320,40 @@ Object* art_find_interface_method_from_code(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); } Object* art_initialize_static_storage_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); } Object* art_initialize_type_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); } Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); } Object* art_resolve_string_from_code(Method* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveStringFromCode(referrer, string_idx); } int32_t art_set32_static_from_code(uint32_t field_idx, Method* referrer, int32_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t)); if (LIKELY(field != NULL)) { field->Set32(NULL, new_value); @@ -369,7 +369,7 @@ int32_t art_set32_static_from_code(uint32_t field_idx, Method* referrer, int32_t } int32_t art_set64_static_from_code(uint32_t field_idx, Method* referrer, int64_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); if (LIKELY(field != NULL)) { field->Set64(NULL, new_value); @@ -385,7 +385,7 @@ int32_t art_set64_static_from_code(uint32_t field_idx, Method* referrer, int64_t } int32_t art_set_obj_static_from_code(uint32_t field_idx, Method* referrer, Object* new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL)) { field->SetObj(NULL, new_value); @@ -401,7 +401,7 @@ int32_t art_set_obj_static_from_code(uint32_t field_idx, Method* referrer, Objec } int32_t art_get32_static_from_code(uint32_t field_idx, Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); if (LIKELY(field != NULL)) { return field->Get32(NULL); @@ -415,7 +415,7 @@ int32_t art_get32_static_from_code(uint32_t field_idx, Method* referrer) } int64_t art_get64_static_from_code(uint32_t field_idx, Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); if (LIKELY(field != NULL)) { return field->Get64(NULL); @@ -429,7 +429,7 @@ int64_t art_get64_static_from_code(uint32_t field_idx, Method* referrer) } Object* art_get_obj_static_from_code(uint32_t field_idx, Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(NULL); @@ -444,7 +444,7 @@ Object* art_get_obj_static_from_code(uint32_t field_idx, Method* referrer) int32_t art_set32_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj, uint32_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); if (LIKELY(field != NULL)) { field->Set32(obj, new_value); @@ -461,7 +461,7 @@ int32_t art_set32_instance_from_code(uint32_t field_idx, Method* referrer, int32_t art_set64_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj, int64_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); if (LIKELY(field != NULL)) { field->Set64(obj, new_value); @@ -478,7 +478,7 @@ int32_t art_set64_instance_from_code(uint32_t field_idx, Method* referrer, int32_t art_set_obj_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj, Object* new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL)) { field->SetObj(obj, new_value); @@ -494,7 +494,7 @@ int32_t art_set_obj_instance_from_code(uint32_t field_idx, Method* referrer, } int32_t art_get32_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); if (LIKELY(field != NULL)) { return field->Get32(obj); @@ -508,7 +508,7 @@ int32_t art_get32_instance_from_code(uint32_t field_idx, Method* referrer, Objec } int64_t art_get64_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); if (LIKELY(field != NULL)) { return field->Get64(obj); @@ -522,7 +522,7 @@ int64_t art_get64_instance_from_code(uint32_t field_idx, Method* referrer, Objec } Object* art_get_obj_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(obj); @@ -537,7 +537,7 @@ Object* art_get_obj_instance_from_code(uint32_t field_idx, Method* referrer, Obj void art_fill_array_data_from_code(Method* method, uint32_t dex_pc, Array* array, uint32_t payload_offset) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Test: Is array equal to null? (Guard NullPointerException) if (UNLIKELY(array == NULL)) { art_throw_null_pointer_exception_from_code(dex_pc); @@ -577,14 +577,14 @@ void art_fill_array_data_from_code(Method* method, uint32_t dex_pc, //---------------------------------------------------------------------------- int32_t art_is_assignable_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(dest_type != NULL); DCHECK(src_type != NULL); return dest_type->IsAssignableFrom(src_type) ? 1 : 0; } void art_check_cast_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); DCHECK(src_type->IsClass()) << PrettyClass(src_type); if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { @@ -597,7 +597,7 @@ void art_check_cast_from_code(const Class* dest_type, const Class* src_type) } void art_check_put_array_element_from_code(const Object* element, const Object* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (element == NULL) { return; } @@ -631,7 +631,7 @@ uint32_t art_jni_method_start(Thread* self) } uint32_t art_jni_method_start_synchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + UNLOCK_FUNCTION(Locks::mutator_lock_) { self->DecodeJObject(to_lock)->MonitorEnter(self); return art_jni_method_start(self); } @@ -643,7 +643,7 @@ static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* s } void art_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); PopLocalReferences(saved_local_ref_cookie, self); } @@ -651,7 +651,7 @@ void art_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) void art_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -659,7 +659,7 @@ void art_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, jobject lo Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -676,7 +676,7 @@ Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_r Object* art_jni_method_end_with_reference_synchronized(jobject result, uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. Object* o = self->DecodeJObject(result); @@ -734,7 +734,7 @@ static void* art_find_compiler_runtime_func(const char* name) { // Handler for invocation on proxy methods. We create a boxed argument array. And we invoke // the invocation handler which is a field within the proxy object receiver. void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list ap; va_start(ap, proxy_method); diff --git a/src/compiler_test.cc b/src/compiler_test.cc index 0c53bb9981..98b21422f4 100644 --- a/src/compiler_test.cc +++ b/src/compiler_test.cc @@ -31,14 +31,14 @@ namespace art { class CompilerTest : public CommonTest { protected: - void CompileAll(jobject class_loader) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { compiler_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader)); MakeAllExecutable(class_loader); } void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, const char* signature, bool is_virtual) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { CompileAll(class_loader); Thread::Current()->TransitionFromSuspendedToRunnable(); runtime_->Start(); diff --git a/src/debugger.cc b/src/debugger.cc index b47377e2eb..87ad446c4e 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -98,7 +98,7 @@ struct AllocRecordStackTraceElement { Method* method; uint32_t dex_pc; - int32_t LineNumber() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return MethodHelper(method).GetLineNumFromDexPC(dex_pc); } }; @@ -125,7 +125,7 @@ struct Breakpoint { }; static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc); return os; } @@ -182,7 +182,7 @@ static std::vector gBreakpoints GUARDED_BY(gBreakpointsLock); static SingleStepControl gSingleStepControl GUARDED_BY(gBreakpointsLock); static bool IsBreakpoint(Method* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MutexLock mu(gBreakpointsLock); for (size_t i = 0; i < gBreakpoints.size(); ++i) { if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) { @@ -194,7 +194,7 @@ static bool IsBreakpoint(Method* m, uint32_t dex_pc) } static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; @@ -209,7 +209,7 @@ static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) } static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; @@ -224,8 +224,8 @@ static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) } static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId threadId) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* thread_peer = gRegistry->Get(threadId); if (thread_peer == NULL || thread_peer == kInvalidObject) { return NULL; @@ -241,7 +241,7 @@ static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { } static JDWP::JdwpTag TagFromClass(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(c != NULL); if (c->IsArrayClass()) { return JDWP::JT_ARRAY; @@ -272,7 +272,7 @@ static JDWP::JdwpTag TagFromClass(Class* c) * Null objects are tagged JT_OBJECT. */ static JDWP::JdwpTag TagFromObject(const Object* o) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass()); } @@ -491,7 +491,7 @@ static void SetDebuggerUpdatesEnabledCallback(Thread* t, void* user_data) { } static void SetDebuggerUpdatesEnabled(bool enabled) { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(SetDebuggerUpdatesEnabledCallback, &enabled); } @@ -830,7 +830,7 @@ JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId arrayId, int offset, int count, JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId arrayId, int offset, int count, const uint8_t* src) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; Array* a = DecodeArray(arrayId, status); if (a == NULL) { @@ -920,7 +920,7 @@ bool Dbg::MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) { } static JDWP::FieldId ToFieldId(const Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -929,7 +929,7 @@ static JDWP::FieldId ToFieldId(const Field* f) } static JDWP::MethodId ToMethodId(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -938,7 +938,7 @@ static JDWP::MethodId ToMethodId(const Method* m) } static Field* FromFieldId(JDWP::FieldId fid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -947,7 +947,7 @@ static Field* FromFieldId(JDWP::FieldId fid) } static Method* FromMethodId(JDWP::MethodId mid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -956,7 +956,7 @@ static Method* FromMethodId(JDWP::MethodId mid) } static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m == NULL) { memset(&location, 0, sizeof(location)); } else { @@ -969,7 +969,7 @@ static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc } std::string Dbg::GetMethodName(JDWP::RefTypeId, JDWP::MethodId methodId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = FromMethodId(methodId); return MethodHelper(m).GetName(); } @@ -1013,7 +1013,7 @@ static uint16_t MangleSlot(uint16_t slot, const char* name) { } static uint16_t DemangleSlot(uint16_t slot, Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (slot == kEclipseWorkaroundSlot) { return 0; } else if (slot == 0) { @@ -1096,7 +1096,7 @@ JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId classId, JDWP::Exp } void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct DebugCallbackContext { int numItems; JDWP::ExpandBuf* pReply; @@ -1198,7 +1198,7 @@ JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId fieldId) { static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId refTypeId, JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; Class* c = DecodeClass(refTypeId, status); if (refTypeId != 0 && c == NULL) { @@ -1268,7 +1268,7 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldI static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = gRegistry->Get(objectId); if ((!is_static && o == NULL) || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; @@ -1330,7 +1330,7 @@ std::string Dbg::StringToUtf8(JDWP::ObjectId strId) { } bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); ScopedObjectAccessUnchecked soa(Thread::Current()); Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { @@ -1348,7 +1348,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pR } // Okay, so it's an object, but is it actually a thread? - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); if (DecodeThread(soa, threadId) == NULL) { return JDWP::ERR_INVALID_THREAD; } @@ -1407,13 +1407,13 @@ JDWP::ObjectId Dbg::GetMainThreadGroupId() { bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return false; } - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // TODO: if we're in Thread.sleep(long), we should return TS_SLEEPING, // even if it's implemented using Object.wait(long). @@ -1447,28 +1447,28 @@ bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThre JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return JDWP::ERR_INVALID_THREAD; } - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); expandBufAdd4BE(pReply, thread->GetDebugSuspendCount()); return JDWP::ERR_NONE; } bool Dbg::ThreadExists(JDWP::ObjectId threadId) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); return DecodeThread(soa, threadId) != NULL; } bool Dbg::IsSuspended(JDWP::ObjectId threadId) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = DecodeThread(soa, threadId); CHECK(thread != NULL); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); return thread->IsSuspended(); } @@ -1477,7 +1477,7 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector public: ThreadListVisitor(const ScopedObjectAccessUnchecked& ts, Object* thread_group, std::vector& thread_ids) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ts_(ts), thread_group_(thread_group), thread_ids_(thread_ids) {} static void Visit(Thread* t, void* arg) { @@ -1506,7 +1506,7 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector ScopedObjectAccessUnchecked soa(Thread::Current()); Object* thread_group = gRegistry->Get(thread_group_id); ThreadListVisitor tlv(soa, thread_group, thread_ids); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); } @@ -1531,7 +1531,7 @@ void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector* trace_stack) @@ -1547,7 +1547,7 @@ static int GetStackDepth(Thread* thread) }; if (kIsDebugBuild) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK(thread->IsSuspended()); } CountStackDepthVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack()); @@ -1565,7 +1565,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram public: GetFrameVisitor(const ManagedStack* stack, const std::vector* trace_stack, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), depth_(0), start_frame_(start_frame), frame_count_(frame_count), buf_(buf) { expandBufAdd4BE(buf_, frame_count_); @@ -1644,7 +1644,7 @@ JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId threadId, bool request_suspens void Dbg::ResumeThread(JDWP::ObjectId threadId) { ScopedObjectAccessUnchecked soa(Thread::Current()); Object* peer = gRegistry->Get(threadId); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for resume: " << peer; @@ -1652,7 +1652,7 @@ void Dbg::ResumeThread(JDWP::ObjectId threadId) { } bool needs_resume; { - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); needs_resume = thread->GetSuspendCount() > 0; } if (needs_resume) { @@ -1667,7 +1667,7 @@ void Dbg::SuspendSelf() { struct GetThisVisitor : public StackVisitor { GetThisVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, JDWP::FrameId frameId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), this_object(NULL), frame_id(frameId) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses @@ -1691,7 +1691,7 @@ struct GetThisVisitor : public StackVisitor { }; static Object* GetThis(Thread* self, Method* m, size_t frame_id) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: should we return the 'this' we passed through to non-static native methods? if (m->IsNative() || m->IsStatic()) { return NULL; @@ -1708,12 +1708,12 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame ScopedObjectAccessUnchecked soa(Thread::Current()); Thread* thread; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); thread = DecodeThread(soa, thread_id); if (thread == NULL) { return JDWP::ERR_INVALID_THREAD; } - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); if (!thread->IsSuspended()) { return JDWP::ERR_THREAD_NOT_SUSPENDED; } @@ -1731,7 +1731,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot GetLocalVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), frame_id_(frameId), slot_(slot), tag_(tag), buf_(buf), width_(width) {} @@ -1850,7 +1850,7 @@ void Dbg::SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot SetLocalVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width) {} @@ -2139,7 +2139,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize s SingleStepStackVisitor(const ManagedStack* stack, const std::vector* trace_stack) EXCLUSIVE_LOCKS_REQUIRED(gBreakpointsLock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { gBreakpointsLock.AssertHeld(); gSingleStepControl.method = NULL; @@ -2299,7 +2299,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object Thread* self = Thread::Current(); { ScopedObjectAccessUnchecked soa(self); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); targetThread = DecodeThread(soa, threadId); if (targetThread == NULL) { LOG(ERROR) << "InvokeMethod request for non-existent thread " << threadId; @@ -2327,7 +2327,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object */ int suspend_count; { - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); suspend_count = targetThread->GetSuspendCount(); } if (suspend_count > 1) { @@ -2624,7 +2624,7 @@ void Dbg::DdmBroadcast(bool connect) { Thread* self = Thread::Current(); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (self->GetState() != kRunnable) { LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); /* try anyway? */ @@ -2692,7 +2692,7 @@ void Dbg::DdmSetThreadNotification(bool enable) { SuspendVM(); std::list threads; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); threads = Runtime::Current()->GetThreadList()->GetList(); } { @@ -2885,7 +2885,7 @@ class HeapChunkContext { needHeader_ = false; } - void Flush() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Patch the "length of piece" field. CHECK_LE(&buf_[0], pieceLenField_); CHECK_LE(pieceLenField_, p_); @@ -2896,8 +2896,8 @@ class HeapChunkContext { } static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { reinterpret_cast(arg)->HeapChunkCallback(start, end, used_bytes); } @@ -2913,8 +2913,8 @@ class HeapChunkContext { } void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken // in the following code not to allocate memory, by ensuring buf_ is of the correct size if (used_bytes == 0) { @@ -2967,7 +2967,7 @@ class HeapChunkContext { } void AppendChunk(uint8_t state, void* ptr, size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Make sure there's enough room left in the buffer. // We need to use two bytes for every fractional 256 allocation units used by the chunk plus // 17 bytes for any header. @@ -2997,7 +2997,7 @@ class HeapChunkContext { } uint8_t ExamineObject(const Object* o, bool is_native_heap) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { if (o == NULL) { return HPSG_STATE(SOLIDITY_FREE, 0); } @@ -3089,7 +3089,7 @@ void Dbg::DdmSendHeapSegments(bool native) { const Spaces& spaces = heap->GetSpaces(); for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) { if ((*cur)->IsAllocSpace()) { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); (*cur)->AsAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); } } @@ -3119,7 +3119,7 @@ void Dbg::SetAllocTrackingEnabled(bool enabled) { struct AllocRecordStackVisitor : public StackVisitor { AllocRecordStackVisitor(const ManagedStack* stack, const std::vector* trace_stack, AllocRecord* record) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), record(record), depth(0) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses diff --git a/src/debugger.h b/src/debugger.h index 6e82001d82..43590f839c 100644 --- a/src/debugger.h +++ b/src/debugger.h @@ -81,7 +81,7 @@ class Dbg { static void StopJdwp(); // Invoked by the GC in case we need to keep DDMS informed. - static void GcDidFinish() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + static void GcDidFinish() LOCKS_EXCLUDED(Locks::mutator_lock_); // Return the DebugInvokeReq for the current thread. static DebugInvokeReq* GetInvokeReq(); @@ -124,105 +124,105 @@ class Dbg { * Class, Object, Array */ static std::string GetClassName(JDWP::RefTypeId id) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& classObjectId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclassId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId classId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void GetClassList(std::vector& classes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId classId, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void FindLoadedClassBySignature(const char* descriptor, std::vector& ids) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetReferenceType(JDWP::ObjectId objectId, JDWP::ExpandBuf* pReply); static JDWP::JdwpError GetSignature(JDWP::RefTypeId refTypeId, std::string& signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId refTypeId, std::string& source_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetObjectTag(JDWP::ObjectId objectId, uint8_t& tag) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static size_t GetTagWidth(JDWP::JdwpTag tag); static JDWP::JdwpError GetArrayLength(JDWP::ObjectId arrayId, int& length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputArray(JDWP::ObjectId arrayId, int firstIndex, int count, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError SetArrayElements(JDWP::ObjectId arrayId, int firstIndex, int count, const uint8_t* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId CreateString(const std::string& str) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError CreateObject(JDWP::RefTypeId classId, JDWP::ObjectId& new_object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId arrayTypeId, uint32_t length, JDWP::ObjectId& new_array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Method and Field */ static std::string GetMethodName(JDWP::RefTypeId refTypeId, JDWP::MethodId id) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId refTypeId, bool withGeneric, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId refTypeId, bool withGeneric, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId refTypeId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void OutputLineTable(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void OutputVariableTable(JDWP::RefTypeId refTypeId, JDWP::MethodId id, bool withGeneric, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId fieldId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId fieldId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);; static JDWP::JdwpError GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId fieldId, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static std::string StringToUtf8(JDWP::ObjectId strId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Thread, ThreadGroup, Frame */ static bool GetThreadName(JDWP::ObjectId threadId, std::string& name) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_); static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply); static std::string GetThreadGroupName(JDWP::ObjectId threadGroupId); static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId threadGroupId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId GetSystemThreadGroupId() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId GetMainThreadGroupId(); static bool GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus); @@ -234,43 +234,43 @@ class Dbg { // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0, // returns all threads. static void GetThreads(JDWP::ObjectId thread_group_id, std::vector& thread_ids) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector& child_thread_group_ids); static int GetThreadFrameCount(JDWP::ObjectId threadId); static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId GetThreadSelfId() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SuspendVM() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); static void ResumeVM(); static JDWP::JdwpError SuspendThread(JDWP::ObjectId threadId, bool request_suspension = true) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); static void ResumeThread(JDWP::ObjectId threadId) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SuspendSelf(); static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Debugger notification @@ -282,29 +282,29 @@ class Dbg { kMethodExit = 0x08, }; static void PostLocationEvent(const Method* method, int pcOffset, Object* thisPtr, int eventFlags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, Method* throw_method, uint32_t throw_dex_pc, Method* catch_method, uint32_t catch_dex_pc, Throwable* exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStart(Thread* t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadDeath(Thread* t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostClassPrepare(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UpdateDebugger(int32_t dex_pc, Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void WatchLocation(const JDWP::JdwpLocation* pLoc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UnwatchLocation(const JDWP::JdwpLocation* pLoc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth) LOCKS_EXCLUDED(gBreakpointsLock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UnconfigureStep(JDWP::ObjectId threadId); static JDWP::JdwpError InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, @@ -313,9 +313,9 @@ class Dbg { JDWP::JdwpTag* arg_types, uint32_t options, JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, JDWP::ObjectId* pExceptObj) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ExecuteMethod(DebugInvokeReq* pReq); /* perform "late registration" of an object ID */ @@ -325,27 +325,27 @@ class Dbg { * DDM support. */ static void DdmSendThreadNotification(Thread* t, uint32_t type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSetThreadNotification(bool enable); static bool DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, int* pReplyLen); - static void DdmConnected() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static void DdmDisconnected() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, const std::vector& bytes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Recent allocation tracking support. */ static void RecordAllocation(Class* type, size_t byte_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetAllocTrackingEnabled(bool enabled); static inline bool IsAllocTrackingEnabled() { return recent_allocation_records_ != NULL; } static jbyteArray GetRecentAllocations() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DumpRecentAllocations(); enum HpifWhen { @@ -355,7 +355,7 @@ class Dbg { HPIF_WHEN_EVERY_GC = 3 }; static int DdmHandleHpifChunk(HpifWhen when) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); enum HpsgWhen { HPSG_WHEN_NEVER = 0, @@ -368,14 +368,14 @@ class Dbg { static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); static void DdmSendHeapInfo(HpifWhen reason) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendHeapSegments(bool native) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static AllocRecord* recent_allocation_records_; }; diff --git a/src/dex2oat.cc b/src/dex2oat.cc index b68a75b18b..2a6a2204c4 100644 --- a/src/dex2oat.cc +++ b/src/dex2oat.cc @@ -123,7 +123,7 @@ class Dex2Oat { public: static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, InstructionSet instruction_set, size_t thread_count, bool support_debugging) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_) { + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) { if (!CreateRuntime(options, instruction_set)) { *p_dex2oat = NULL; return false; @@ -139,7 +139,7 @@ class Dex2Oat { // Make a list of descriptors for classes to include in the image const std::set* GetImageClassDescriptors(const char* image_classes_filename) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { UniquePtr image_classes_file(new std::ifstream(image_classes_filename, std::ifstream::in)); if (image_classes_file.get() == NULL) { LOG(ERROR) << "Failed to open image classes file " << image_classes_filename; @@ -213,7 +213,7 @@ class Dex2Oat { const std::set* image_classes, bool dump_stats, bool dump_timings) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // SirtRef and ClassLoader creation needs to come after Runtime::Create jobject class_loader = NULL; if (!boot_image_option.empty()) { @@ -279,7 +279,7 @@ class Dex2Oat { const std::string& oat_filename, const std::string& oat_location, const Compiler& compiler) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ImageWriter image_writer(image_classes); if (!image_writer.Write(image_filename, image_base, oat_filename, oat_location, compiler)) { LOG(ERROR) << "Failed to create image file " << image_filename; @@ -299,7 +299,7 @@ class Dex2Oat { } static bool CreateRuntime(Runtime::Options& options, InstructionSet instruction_set) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_) { + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) { if (!Runtime::Create(options, false)) { LOG(ERROR) << "Failed to create runtime"; return false; @@ -333,7 +333,7 @@ class Dex2Oat { static void ResolveExceptionsForMethod(MethodHelper* mh, std::set >& exceptions_to_resolve) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = mh->GetCodeItem(); if (code_item == NULL) { return; // native or abstract method @@ -370,7 +370,7 @@ class Dex2Oat { } static bool ResolveCatchBlockExceptionsClassVisitor(Class* c, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::set >* exceptions_to_resolve = reinterpret_cast >*>(arg); MethodHelper mh; @@ -388,7 +388,7 @@ class Dex2Oat { } static bool RecordImageClassesVisitor(Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::set* image_classes = reinterpret_cast*>(arg); if (klass->IsArrayClass() || klass->IsPrimitive()) { return true; diff --git a/src/dex_cache.h b/src/dex_cache.h index b3c5ce6e91..a08c644d7f 100644 --- a/src/dex_cache.h +++ b/src/dex_cache.h @@ -39,11 +39,11 @@ class MANAGED DexCache : public ObjectArray { ObjectArray* methods, ObjectArray* fields, ObjectArray* initialized_static_storage) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Fixup(Method* trampoline) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Fixup(Method* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - String* GetLocation() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + String* GetLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Get(kLocation)->AsString(); } @@ -62,49 +62,49 @@ class MANAGED DexCache : public ObjectArray { kResolvedMethods * sizeof(Object*)); } - size_t NumStrings() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumStrings() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetStrings()->GetLength(); } - size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedTypes()->GetLength(); } - size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedMethods()->GetLength(); } - size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedFields()->GetLength(); } size_t NumInitializedStaticStorage() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetInitializedStaticStorage()->GetLength(); } String* GetResolvedString(uint32_t string_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetStrings()->Get(string_idx); } void SetResolvedString(uint32_t string_idx, String* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetStrings()->Set(string_idx, resolved); } Class* GetResolvedType(uint32_t type_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedTypes()->Get(type_idx); } void SetResolvedType(uint32_t type_idx, Class* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetResolvedTypes()->Set(type_idx, resolved); } Method* GetResolvedMethod(uint32_t method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = GetResolvedMethods()->Get(method_idx); // Hide resolution trampoline methods from the caller if (method != NULL && method->GetDexMethodIndex() == DexFile::kDexNoIndex16) { @@ -116,38 +116,38 @@ class MANAGED DexCache : public ObjectArray { } void SetResolvedMethod(uint32_t method_idx, Method* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetResolvedMethods()->Set(method_idx, resolved); } Field* GetResolvedField(uint32_t field_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedFields()->Get(field_idx); } void SetResolvedField(uint32_t field_idx, Field* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetResolvedFields()->Set(field_idx, resolved); } ObjectArray* GetStrings() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast*>(GetNonNull(kStrings)); } ObjectArray* GetResolvedTypes() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast*>(GetNonNull(kResolvedTypes)); } ObjectArray* GetResolvedMethods() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast*>(GetNonNull(kResolvedMethods)); } ObjectArray* GetResolvedFields() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast*>(GetNonNull(kResolvedFields)); } ObjectArray* GetInitializedStaticStorage() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast*>(GetNonNull(kInitializedStaticStorage)); } @@ -167,7 +167,7 @@ class MANAGED DexCache : public ObjectArray { }; Object* GetNonNull(ArrayIndex array_index) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* obj = Get(array_index); DCHECK(obj != NULL); return obj; diff --git a/src/dex_file.h b/src/dex_file.h index 99a748d723..7d39945a10 100644 --- a/src/dex_file.h +++ b/src/dex_file.h @@ -776,7 +776,7 @@ class DexFile { // // This is used by runtime; therefore use art::Method not art::DexFile::Method. int32_t GetLineNumFromPC(const Method* method, uint32_t rel_pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb, @@ -1125,10 +1125,10 @@ class EncodedStaticFieldValueIterator { public: EncodedStaticFieldValueIterator(const DexFile& dex_file, DexCache* dex_cache, ClassLinker* linker, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ReadValueToField(Field* field) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasNext() { return pos_ < array_size_; } diff --git a/src/heap.cc b/src/heap.cc index 47473e3c53..d8f55ab7d5 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -273,7 +273,7 @@ class SpaceSorter { }; void Heap::AddSpace(Space* space) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); DCHECK(space != NULL); DCHECK(space->GetLiveBitmap() != NULL); live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap()); @@ -366,7 +366,7 @@ Object* Heap::AllocObject(Class* c, size_t byte_count) { Object* obj = Allocate(alloc_space_, byte_count); if (LIKELY(obj != NULL)) { #if VERIFY_OBJECT_ENABLED - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); // Verify objects doesn't like objects in allocation stack not being marked as live. live_bitmap_->Set(obj); #endif @@ -426,7 +426,7 @@ bool Heap::IsHeapAddress(const Object* obj) { } bool Heap::IsLiveObjectLocked(const Object* obj) { - GlobalSynchronization::heap_bitmap_lock_->AssertReaderHeld(); + Locks::heap_bitmap_lock_->AssertReaderHeld(); return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj); } @@ -489,7 +489,7 @@ void Heap::VerificationCallback(Object* obj, void* arg) { } void Heap::VerifyHeap() { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); GetLiveBitmap()->Walk(Heap::VerificationCallback, this); } @@ -546,7 +546,7 @@ Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) { // done in the runnable state where suspension is expected. #ifndef NDEBUG { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(self->GetState(), kRunnable); } self->AssertThreadSuspensionIsAllowable(); @@ -660,7 +660,7 @@ int64_t Heap::GetFreeMemory() { class InstanceCounter { public: InstanceCounter(Class* c, bool count_assignable) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_(c), count_assignable_(count_assignable), count_(0) { } @@ -670,12 +670,12 @@ class InstanceCounter { } static void Callback(Object* o, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { reinterpret_cast(arg)->VisitInstance(o); } private: - void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* instance_class = o->GetClass(); if (count_assignable_) { if (instance_class == class_) { @@ -694,7 +694,7 @@ class InstanceCounter { }; int64_t Heap::CountInstances(Class* c, bool count_assignable) { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); InstanceCounter counter(c, count_assignable); GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter); return counter.GetCount(); @@ -722,7 +722,7 @@ void Heap::PreZygoteFork() { { // Flush the alloc stack. - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); FlushAllocStack(); } @@ -806,10 +806,10 @@ void Heap::UnMarkStackAsLive(MarkStack* alloc_stack) { } void Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); #ifndef NDEBUG { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc); } #endif @@ -868,7 +868,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference ThreadList* thread_list = Runtime::Current()->GetThreadList(); thread_list->SuspendAll(); timings.AddSplit("SuspendAll"); - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); size_t bytes_freed = 0; Object* cleared_references = NULL; @@ -880,7 +880,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference // Pre verify the heap if (pre_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPreGC"); } @@ -918,7 +918,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference } } - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); if (gc_type == GC_PARTIAL) { // Copy the mark bits over from the live bits, do this as early as possible or else we can // accidentally un-mark roots. @@ -1007,7 +1007,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference // Post gc verify the heap if (post_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPostGC"); } @@ -1081,8 +1081,8 @@ class ScanVisitor { class VerifyReferenceVisitor { public: VerifyReferenceVisitor(Heap* heap, bool* failed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, + Locks::heap_bitmap_lock_) : heap_(heap), failed_(failed) { } @@ -1183,8 +1183,7 @@ class VerifyObjectVisitor { } void operator ()(const Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceVisitor visitor(heap_, const_cast(&failed_)); MarkSweep::VisitObjectReferences(obj, visitor); } @@ -1200,7 +1199,7 @@ class VerifyObjectVisitor { // Must do this with mutators suspended since we are directly accessing the allocation stacks. void Heap::VerifyHeapReferences(const std::string& phase) { - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); // Lets sort our allocation stacks so that we can efficiently binary search them. std::sort(allocation_stack_->Begin(), allocation_stack_->End()); std::sort(live_stack_->Begin(), live_stack_->End()); @@ -1220,7 +1219,7 @@ void Heap::SwapBitmaps() { // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit // instead, resulting in no new allocated objects being incorrectly freed by sweep. - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) { Space* space = *it; // We never allocate into zygote spaces. @@ -1242,7 +1241,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft ThreadList* thread_list = Runtime::Current()->GetThreadList(); thread_list->SuspendAll(); timings.AddSplit("SuspendAll"); - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); size_t bytes_freed = 0; Object* cleared_references = NULL; @@ -1255,7 +1254,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft // Pre verify the heap if (pre_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPreGC"); } @@ -1294,7 +1293,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft } { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); if (gc_type == GC_PARTIAL) { // Copy the mark bits over from the live bits, do this as early as possible or else we can @@ -1343,11 +1342,11 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft // Allow mutators to go again, acquire share on mutator_lock_ to continue. thread_list->ResumeAll(); { - ReaderMutexLock reader_lock(*GlobalSynchronization::mutator_lock_); + ReaderMutexLock reader_lock(*Locks::mutator_lock_); root_end = NanoTime(); timings.AddSplit("RootEnd"); - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); UpdateAndMarkModUnion(timings, gc_type); if (gc_type != GC_STICKY) { // Recursively mark all the non-image bits set in the mark bitmap. @@ -1361,10 +1360,10 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft dirty_begin = NanoTime(); thread_list->SuspendAll(); timings.AddSplit("ReSuspend"); - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); // Re-mark root set. mark_sweep.ReMarkRoots(); @@ -1376,7 +1375,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft } { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); mark_sweep.ProcessReferences(clear_soft_references); timings.AddSplit("ProcessReferences"); @@ -1395,7 +1394,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft if (kIsDebugBuild) { // Verify that we only reach marked objects from the image space. - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); mark_sweep.VerifyImageRoots(); timings.AddSplit("VerifyImageRoots"); } @@ -1411,18 +1410,18 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft // If we are going to do post Gc verification, lets keep the mutators paused since we don't // want them to touch dead objects before we find these in verification. if (post_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPostGC"); } thread_list->ResumeAll(); dirty_end = NanoTime(); - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); { // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above). - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); if (gc_type != GC_STICKY) { mark_sweep.Sweep(gc_type == GC_PARTIAL, swap); } else { diff --git a/src/heap.h b/src/heap.h index 23f2ac3c37..104cbdf84e 100644 --- a/src/heap.h +++ b/src/heap.h @@ -90,7 +90,7 @@ class LOCKABLE Heap { // Allocates and initializes storage for an object instance. Object* AllocObject(Class* klass, size_t num_bytes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Check sanity of given reference. Requires the heap lock. #if VERIFY_OBJECT_ENABLED @@ -103,8 +103,8 @@ class LOCKABLE Heap { void VerifyHeap(); static void RootMatchesObjectVisitor(const Object* root, void* arg); void VerifyHeapReferences(const std::string& phase) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, // and doesn't abort on error, allowing the caller to report more @@ -114,11 +114,11 @@ class LOCKABLE Heap { // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). // Requires the heap lock to be held. bool IsLiveObjectLocked(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Initiates an explicit garbage collection. void CollectGarbage(bool clear_soft_references) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. @@ -133,8 +133,8 @@ class LOCKABLE Heap { // Implements VMDebug.countInstancesOfClass. int64_t CountInstances(Class* c, bool count_assignable) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to // implement dalvik.system.VMRuntime.clearGrowthLimit. @@ -244,11 +244,11 @@ class LOCKABLE Heap { void Trim(); - HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { return live_bitmap_.get(); } - HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { return mark_bitmap_.get(); } @@ -256,7 +256,7 @@ class LOCKABLE Heap { // Mark and empty stack. void FlushAllocStack() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Mark all the objects in the allocation stack as live. void MarkStackAsLive(MarkStack* alloc_stack); @@ -269,7 +269,7 @@ class LOCKABLE Heap { // Update and mark mod union table based on gc type. void UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. // Assumes there is only one image space. @@ -280,8 +280,8 @@ class LOCKABLE Heap { private: // Allocates uninitialized storage. Object* Allocate(AllocSpace* space, size_t num_bytes) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Pushes a list of cleared references out to the managed heap. void EnqueueClearedReferences(Object** cleared_references); @@ -290,19 +290,19 @@ class LOCKABLE Heap { void RequestConcurrentGC(); void RecordAllocation(AllocSpace* space, const Object* object) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); void CollectGarbageInternal(GcType gc_plan, bool clear_soft_references) LOCKS_EXCLUDED(gc_complete_lock_, - GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + Locks::heap_bitmap_lock_, + Locks::mutator_lock_, + Locks::thread_suspend_count_lock_); void CollectGarbageMarkSweepPlan(GcType gc_plan, bool clear_soft_references) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_); void CollectGarbageConcurrentMarkSweepPlan(GcType gc_plan, bool clear_soft_references) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_); // Given the current contents of the alloc space, increase the allowed heap footprint to match // the target utilization ratio. This should only be called immediately after a full garbage @@ -311,7 +311,7 @@ class LOCKABLE Heap { size_t GetPercentFree(); - void AddSpace(Space* space) LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_); + void AddSpace(Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); // No thread saftey analysis since we call this everywhere and it is impossible to find a proper // lock ordering for it. @@ -376,8 +376,8 @@ class LOCKABLE Heap { // Last trim time uint64_t last_trim_time_; - UniquePtr live_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_); - UniquePtr mark_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_); + UniquePtr live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); + UniquePtr mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); // True while the garbage collector is trying to signal the GC daemon thread. // This flag is needed to prevent recursion from occurring when the JNI calls diff --git a/src/heap_bitmap.h b/src/heap_bitmap.h index d202ae3591..50ecc7a9ee 100644 --- a/src/heap_bitmap.h +++ b/src/heap_bitmap.h @@ -26,14 +26,14 @@ namespace art { class HeapBitmap { public: bool Test(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL); return bitmap->Test(obj); } void Clear(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL) << "tried to clear object " @@ -43,7 +43,7 @@ namespace art { } void Set(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL) << "tried to mark object " @@ -63,7 +63,7 @@ namespace art { } void Walk(SpaceBitmap::Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { // TODO: C++0x auto for (Bitmaps::iterator it = bitmaps_.begin(); it!= bitmaps_.end(); ++it) { (*it)->Walk(callback, arg); @@ -72,7 +72,7 @@ namespace art { template void Visit(const Visitor& visitor) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { // TODO: C++0x auto for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) { SpaceBitmap* bitmap = *it; @@ -83,7 +83,7 @@ namespace art { // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); HeapBitmap(Heap* heap) : heap_(heap) { diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc index 356c1fb4f6..0cad70968b 100644 --- a/src/hprof/hprof.cc +++ b/src/hprof/hprof.cc @@ -402,17 +402,17 @@ class Hprof { } void Dump() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) { // Walk the roots and the heap. current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_SEGMENT, HPROF_TIME); Runtime::Current()->VisitRoots(RootVisitor, this); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); Runtime::Current()->GetHeap()->FlushAllocStack(); } { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); Runtime::Current()->GetHeap()->GetLiveBitmap()->Walk(HeapBitmapCallback, this); } current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_END, HPROF_TIME); @@ -474,28 +474,28 @@ class Hprof { private: static void RootVisitor(const Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(arg != NULL); Hprof* hprof = reinterpret_cast(arg); hprof->VisitRoot(obj); } static void HeapBitmapCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(obj != NULL); CHECK(arg != NULL); Hprof* hprof = reinterpret_cast(arg); hprof->DumpHeapObject(obj); } - void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Finish() { } - int WriteClassTable() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + int WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { HprofRecord* rec = ¤t_record_; uint32_t nextSerialNumber = 1; @@ -563,7 +563,7 @@ class Hprof { int MarkRootObject(const Object* obj, jobject jniObj); HprofClassObjectId LookupClassId(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (c == NULL) { // c is the superclass of java.lang.Object or a primitive return (HprofClassObjectId)0; @@ -598,7 +598,7 @@ class Hprof { } HprofStringId LookupClassNameId(const Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return LookupStringId(PrettyDescriptor(c)); } diff --git a/src/image.h b/src/image.h index 253b762e54..399b1439d2 100644 --- a/src/image.h +++ b/src/image.h @@ -95,7 +95,7 @@ class PACKED ImageHeader { }; Object* GetImageRoot(ImageRoot image_root) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetImageRoots()->Get(image_root); } diff --git a/src/image_writer.cc b/src/image_writer.cc index 0932c307f7..d91fc59905 100644 --- a/src/image_writer.cc +++ b/src/image_writer.cc @@ -189,10 +189,10 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) { } void ImageWriter::ComputeEagerResolvedStrings() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: Check image spaces only? Heap* heap = Runtime::Current()->GetHeap(); - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); heap->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this); } @@ -270,18 +270,18 @@ bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) { } void ImageWriter::CheckNonImageClassesRemoved() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (image_classes_ == NULL) { return; } Heap* heap = Runtime::Current()->GetHeap(); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); } - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->GetLiveBitmap()->Walk(CheckNonImageClassesRemovedCallback, this); } @@ -394,7 +394,7 @@ void ImageWriter::CalculateNewObjectOffsets() { { Heap* heap = Runtime::Current()->GetHeap(); - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); } @@ -420,12 +420,12 @@ void ImageWriter::CalculateNewObjectOffsets() { } void ImageWriter::CopyAndFixupObjects() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Heap* heap = Runtime::Current()->GetHeap(); // TODO: heap validation can't handle this fix up pass heap->DisableObjectValidation(); // TODO: Image spaces only? - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); heap->GetLiveBitmap()->Walk(CopyAndFixupObjectsCallback, this); } @@ -604,7 +604,7 @@ void ImageWriter::FixupFields(const Object* orig, } static Method* GetReferrerMethod(const Compiler::PatchInformation* patch) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile()); @@ -626,7 +626,7 @@ static Method* GetReferrerMethod(const Compiler::PatchInformation* patch) } static Method* GetTargetMethod(const Compiler::PatchInformation* patch) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile()); Method* method = class_linker->ResolveMethod(patch->GetDexFile(), diff --git a/src/image_writer.h b/src/image_writer.h index f768d8707d..8e8d106c68 100644 --- a/src/image_writer.h +++ b/src/image_writer.h @@ -49,14 +49,14 @@ class ImageWriter { const std::string& oat_filename, const std::string& oat_location, const Compiler& compiler) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); private: bool AllocMemory(); // we use the lock word to store the offset of the object in the image void AssignImageOffset(Object* object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(object != NULL); SetImageOffset(object, image_end_); image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment @@ -108,55 +108,55 @@ class ImageWriter { return oat_begin_ + offset; } - bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpImageClasses(); void ComputeLazyFieldsForImageClasses() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool ComputeLazyFieldsForClassesVisitor(Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Wire dex cache resolved strings to strings in the image to avoid runtime resolution void ComputeEagerResolvedStrings(); static void ComputeEagerResolvedStringsCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool NonImageClassesVisitor(Class* c, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CheckNonImageClassesRemoved(); static void CheckNonImageClassesRemovedCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CalculateNewObjectOffsets() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void CalculateNewObjectOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray* CreateImageRoots() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void CalculateNewObjectOffsetsCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CopyAndFixupObjects(); static void CopyAndFixupObjectsCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupClass(const Class* orig, Class* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupMethod(const Method* orig, Method* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupObject(const Object* orig, Object* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupObjectArray(const ObjectArray* orig, ObjectArray* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupInstanceFields(const Object* orig, Object* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupStaticFields(const Class* orig, Class* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PatchOatCodeAndMethods(const Compiler& compiler) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); SafeMap offsets_; diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h index c3e17b0f89..48bd35b1fc 100644 --- a/src/indirect_reference_table.h +++ b/src/indirect_reference_table.h @@ -258,7 +258,7 @@ class IndirectReferenceTable { * failed during expansion). */ IndirectRef Add(uint32_t cookie, const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Given an IndirectRef in the table, return the Object it refers to. @@ -288,7 +288,7 @@ class IndirectReferenceTable { void AssertEmpty(); - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Return the #of entries in the entire table. This includes holes, and diff --git a/src/intern_table.h b/src/intern_table.h index 0d9e0971ab..6f5677385f 100644 --- a/src/intern_table.h +++ b/src/intern_table.h @@ -42,26 +42,26 @@ class InternTable { // Interns a potentially new string in the 'strong' table. (See above.) String* InternStrong(int32_t utf16_length, const char* utf8_data) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) String* InternStrong(const char* utf8_data) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'weak' table. (See above.) - String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Register a String trusting that it is safe to intern. // Used when reinitializing InternTable from an image. - void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t Size() const; @@ -73,10 +73,10 @@ class InternTable { typedef std::multimap Table; String* Insert(String* s, bool is_strong) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); String* Lookup(Table& table, String* s, uint32_t hash_code) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); String* Insert(Table& table, String* s, uint32_t hash_code); void Remove(Table& table, const String* s, uint32_t hash_code); diff --git a/src/intern_table_test.cc b/src/intern_table_test.cc index 0d46076382..15fa259d20 100644 --- a/src/intern_table_test.cc +++ b/src/intern_table_test.cc @@ -102,7 +102,7 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { p.Expect(s0.get()); p.Expect(s1.get()); { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); t.SweepInternTableWeaks(IsMarked, &p); } diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h index 725e857aea..54e5cc75e2 100644 --- a/src/jdwp/jdwp.h +++ b/src/jdwp/jdwp.h @@ -79,7 +79,7 @@ struct JdwpLocation { uint64_t dex_pc; }; std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs); bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs); @@ -120,7 +120,7 @@ struct JdwpState { * Returns a newly-allocated JdwpState struct on success, or NULL on failure. */ static JdwpState* Create(const JdwpOptions* options) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); ~JdwpState(); @@ -180,7 +180,7 @@ struct JdwpState { * The VM has finished initializing. Only called when the debugger is * connected at the time initialization completes. */ - bool PostVMStart() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * A location of interest has been reached. This is used for breakpoints, @@ -193,7 +193,7 @@ struct JdwpState { * "eventFlags" indicates the types of events that have occurred. */ bool PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, int eventFlags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * An exception has been thrown. @@ -202,20 +202,20 @@ struct JdwpState { */ bool PostException(const JdwpLocation* pThrowLoc, ObjectId excepId, RefTypeId excepClassId, const JdwpLocation* pCatchLoc, ObjectId thisPtr) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * A thread has started or stopped. */ bool PostThreadChange(ObjectId threadId, bool start) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Class has been prepared. */ bool PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature, int status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * The VM is about to stop. @@ -223,13 +223,13 @@ struct JdwpState { bool PostVMDeath(); // Called if/when we realize we're talking to DDMS. - void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Send up a chunk of DDM data. */ void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Process a request from the debugger. @@ -248,15 +248,15 @@ struct JdwpState { void ResetState() LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* atomic ops to get next serial number */ uint32_t NextRequestSerial(); uint32_t NextEventSerial(); void Run() - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_suspend_count_lock_); /* * Register an event by adding it to the event list. @@ -266,45 +266,45 @@ struct JdwpState { */ JdwpError RegisterEvent(JdwpEvent* pEvent) LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Unregister an event, given the requestId. */ void UnregisterEventById(uint32_t requestId) LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Unregister all events. */ void UnregisterAll() LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: explicit JdwpState(const JdwpOptions* options); bool InvokeInProgress(); bool IsConnected(); void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy, ObjectId threadId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CleanupMatchList(JdwpEvent** match_list, int match_count) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void EventFinish(ExpandBuf* pReq); void FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** match_list, int* pMatchCount) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void UnregisterEvent(JdwpEvent* pEvent) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); public: // TODO: fix privacy const JdwpOptions* options_; diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc index 0eb2930f01..4f11a65331 100644 --- a/src/jdwp/jdwp_event.cc +++ b/src/jdwp/jdwp_event.cc @@ -375,7 +375,7 @@ static bool PatternMatch(const char* pattern, const std::string& target) { * need to do this even if later mods cause us to ignore the event. */ static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JdwpEventMod* pMod = pEvent->mods; for (int i = pEvent->modCount; i > 0; i--, pMod++) { diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc index fbe9192ee2..07e47b5c68 100644 --- a/src/jdwp/jdwp_handler.cc +++ b/src/jdwp/jdwp_handler.cc @@ -92,7 +92,7 @@ static void JdwpWriteValue(ExpandBuf* pReply, int width, uint64_t value) { static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, ObjectId thread_id, ObjectId object_id, RefTypeId class_id, MethodId method_id, bool is_constructor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(!is_constructor || object_id != 0); uint32_t arg_count = Read4BE(&buf); @@ -161,7 +161,7 @@ static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Request for version info. */ static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* text information on runtime version */ std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion())); expandBufAddUtf8String(pReply, version); @@ -182,7 +182,7 @@ static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) * been loaded by multiple class loaders. */ static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string classDescriptor(ReadNewUtf8String(&buf)); VLOG(jdwp) << " Req for class by signature '" << classDescriptor << "'"; @@ -215,7 +215,7 @@ static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, Expa * to be suspended, and that violates some JDWP expectations. */ static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::vector thread_ids; Dbg::GetThreads(0, thread_ids); @@ -231,7 +231,7 @@ static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pRepl * List all thread groups that do not have a parent. */ static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* * TODO: maintain a list of parentless thread groups in the VM. * @@ -254,7 +254,7 @@ static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, Expand * All IDs are 8 bytes. */ static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAdd4BE(pReply, sizeof(FieldId)); expandBufAdd4BE(pReply, sizeof(MethodId)); expandBufAdd4BE(pReply, sizeof(ObjectId)); @@ -264,7 +264,7 @@ static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) } static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::Disposed(); return ERR_NONE; } @@ -276,7 +276,7 @@ static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) * This needs to increment the "suspend count" on all threads. */ static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::SuspendVM(); return ERR_NONE; } @@ -285,7 +285,7 @@ static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) * Resume execution. Decrements the "suspend count" of all threads. */ static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::ResumeVM(); return ERR_NONE; } @@ -294,7 +294,7 @@ static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) * The debugger wants the entire VM to exit. */ static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t exitCode = Get4BE(buf); LOG(WARNING) << "Debugger is telling the VM to exit with code=" << exitCode; @@ -310,7 +310,7 @@ static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * string "java.util.Arrays".) */ static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string str(ReadNewUtf8String(&buf)); VLOG(jdwp) << " Req to create string '" << str << "'"; ObjectId stringId = Dbg::CreateString(str); @@ -325,7 +325,7 @@ static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* * Tell the debugger what we are capable of. */ static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAdd1(pReply, false); /* canWatchFieldModification */ expandBufAdd1(pReply, false); /* canWatchFieldAccess */ expandBufAdd1(pReply, false); /* canGetBytecodes */ @@ -337,7 +337,7 @@ static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pRe } static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAddUtf8String(pReply, "/"); std::vector class_path; @@ -363,7 +363,7 @@ static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pRepl * Currently does nothing. */ static JdwpError VM_DisposeObjects(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ERR_NONE; } @@ -371,7 +371,7 @@ static JdwpError VM_DisposeObjects(JdwpState*, const uint8_t*, int, ExpandBuf*) * Tell the debugger what we are capable of. */ static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAdd1(pReply, false); /* canWatchFieldModification */ expandBufAdd1(pReply, false); /* canWatchFieldAccess */ expandBufAdd1(pReply, false); /* canGetBytecodes */ @@ -402,7 +402,7 @@ static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* } static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::vector classes; Dbg::GetClassList(classes); @@ -433,17 +433,17 @@ static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status } static JdwpError VM_AllClasses(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, false); } static JdwpError VM_AllClassesWithGeneric(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, true); } static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); return Dbg::GetModifiers(refTypeId, pReply); } @@ -452,7 +452,7 @@ static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Get values from static fields in a reference type. */ static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); uint32_t field_count = Read4BE(&buf); expandBufAdd4BE(pReply, field_count); @@ -470,7 +470,7 @@ static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Get the name of the source file in which a reference type was declared. */ static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); std::string source_file; JdwpError status = Dbg::GetSourceFile(refTypeId, source_file); @@ -485,7 +485,7 @@ static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Return the current status of the reference type. */ static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); JDWP::JdwpTypeTag type_tag; uint32_t class_status; @@ -501,7 +501,7 @@ static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Return interfaces implemented directly by this class. */ static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for interfaces in %#llx (%s)", refTypeId, Dbg::GetClassName(refTypeId).c_str()); return Dbg::OutputDeclaredInterfaces(refTypeId, pReply); @@ -511,7 +511,7 @@ static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Return the class object corresponding to this type. */ static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); ObjectId classObjectId; JdwpError status = Dbg::GetClassObject(refTypeId, classObjectId); @@ -529,14 +529,14 @@ static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* * JDB seems interested, but DEX files don't currently support this. */ static JdwpError RT_SourceDebugExtension(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* referenceTypeId in, string out */ return ERR_ABSENT_INFORMATION; } static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, bool with_generic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for signature of refTypeId=%#llx", refTypeId); @@ -554,13 +554,13 @@ static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR } static JdwpError RT_Signature(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return RT_Signature(state, buf, dataLen, pReply, false); } static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return RT_Signature(state, buf, dataLen, pReply, true); } @@ -569,13 +569,13 @@ static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, i * reference type, or null if it was loaded by the system loader. */ static JdwpError RT_ClassLoader(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); return Dbg::GetClassLoader(refTypeId, pReply); } static std::string Describe(const RefTypeId& refTypeId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string signature("unknown"); Dbg::GetSignature(refTypeId, signature); return StringPrintf("refTypeId=%#llx (%s)", refTypeId, signature.c_str()); @@ -586,7 +586,7 @@ static std::string Describe(const RefTypeId& refTypeId) * fields declared by a class. */ static JdwpError RT_FieldsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for fields in " << Describe(refTypeId); return Dbg::OutputDeclaredFields(refTypeId, true, pReply); @@ -594,7 +594,7 @@ static JdwpError RT_FieldsWithGeneric(JdwpState*, const uint8_t* buf, int, Expan // Obsolete equivalent of FieldsWithGeneric, without the generic type information. static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for fields in " << Describe(refTypeId); return Dbg::OutputDeclaredFields(refTypeId, false, pReply); @@ -605,7 +605,7 @@ static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * methods declared by a class. */ static JdwpError RT_MethodsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for methods in " << Describe(refTypeId); return Dbg::OutputDeclaredMethods(refTypeId, true, pReply); @@ -613,7 +613,7 @@ static JdwpError RT_MethodsWithGeneric(JdwpState*, const uint8_t* buf, int, Expa // Obsolete equivalent of MethodsWithGeneric, without the generic type information. static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for methods in " << Describe(refTypeId); return Dbg::OutputDeclaredMethods(refTypeId, false, pReply); @@ -623,7 +623,7 @@ static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRep * Return the immediate superclass of a class. */ static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); RefTypeId superClassId; JdwpError status = Dbg::GetSuperclass(class_id, superClassId); @@ -638,7 +638,7 @@ static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Set static class values. */ static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); uint32_t values = Read4BE(&buf); @@ -668,7 +668,7 @@ static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) */ static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); ObjectId thread_id = ReadObjectId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -685,7 +685,7 @@ static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataL */ static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); ObjectId thread_id = ReadObjectId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -706,7 +706,7 @@ static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLe * Create a new array object of the requested type and length. */ static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId arrayTypeId = ReadRefTypeId(&buf); uint32_t length = Read4BE(&buf); @@ -728,7 +728,7 @@ static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* * Return line number information for the method, if present. */ static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -741,7 +741,7 @@ static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRe static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, bool generic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -759,13 +759,13 @@ static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* static JdwpError M_VariableTable(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return M_VariableTable(state, buf, dataLen, pReply, false); } static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return M_VariableTable(state, buf, dataLen, pReply, true); } @@ -777,7 +777,7 @@ static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf * passed in here. */ static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for type of object_id=%#llx", object_id); return Dbg::GetReferenceType(object_id, pReply); @@ -787,7 +787,7 @@ static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf * Get values from the fields of an object. */ static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); uint32_t field_count = Read4BE(&buf); @@ -810,7 +810,7 @@ static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Set values in the fields of an object. */ static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); uint32_t field_count = Read4BE(&buf); @@ -846,7 +846,7 @@ static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) */ static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); ObjectId thread_id = ReadObjectId(&buf); RefTypeId class_id = ReadRefTypeId(&buf); @@ -859,7 +859,7 @@ static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataL * Disable garbage collection of the specified object. */ static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // this is currently a no-op return ERR_NONE; } @@ -868,7 +868,7 @@ static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf * Enable garbage collection of the specified object. */ static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // this is currently a no-op return ERR_NONE; } @@ -877,7 +877,7 @@ static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf* * Determine whether an object has been garbage collected. */ static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id; object_id = ReadObjectId(&buf); @@ -893,7 +893,7 @@ static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* * Return the string value in a string object. */ static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId stringObject = ReadObjectId(&buf); std::string str(Dbg::StringToUtf8(stringObject)); @@ -908,7 +908,7 @@ static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply * Return a thread's name. */ static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for name of thread %#llx", thread_id); @@ -929,7 +929,7 @@ static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) * resume it; only the JDI is allowed to resume it. */ static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (thread_id == Dbg::GetThreadSelfId()) { @@ -948,7 +948,7 @@ static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * Resume the specified thread. */ static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (thread_id == Dbg::GetThreadSelfId()) { @@ -964,7 +964,7 @@ static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * Return status of specified thread. */ static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for status of thread %#llx", thread_id); @@ -987,7 +987,7 @@ static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Return the thread group that the specified thread is a member of. */ static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); return Dbg::GetThreadGroup(thread_id, pReply); } @@ -999,7 +999,7 @@ static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* * be THREAD_NOT_SUSPENDED. */ static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); uint32_t start_frame = Read4BE(&buf); uint32_t length = Read4BE(&buf); @@ -1036,7 +1036,7 @@ static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Returns the #of frames on the specified thread, which must be suspended. */ static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (!Dbg::ThreadExists(thread_id)) { @@ -1060,7 +1060,7 @@ static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Get the monitor that the thread is waiting on. */ static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ReadObjectId(&buf); // thread_id // TODO: create an Object to represent the monitor (we're currently @@ -1076,7 +1076,7 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, * its suspend count recently.) */ static JdwpError TR_DebugSuspendCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); return Dbg::GetThreadDebugSuspendCount(thread_id, pReply); } @@ -1087,7 +1087,7 @@ static JdwpError TR_DebugSuspendCount(JdwpState*, const uint8_t* buf, int, Expan * The Eclipse debugger recognizes "main" and "system" as special. */ static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for name of thread_group_id=%#llx", thread_group_id); @@ -1101,7 +1101,7 @@ static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply * thread group. */ static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); ObjectId parentGroup = Dbg::GetThreadGroupParent(thread_group_id); @@ -1115,7 +1115,7 @@ static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRep * specified thread group. */ static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for threads in thread_group_id=%#llx", thread_group_id); @@ -1140,7 +1140,7 @@ static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Return the #of components in the array. */ static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for length of array %#llx", arrayId); @@ -1160,7 +1160,7 @@ static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Return the values from an array. */ static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); uint32_t firstIndex = Read4BE(&buf); uint32_t length = Read4BE(&buf); @@ -1173,7 +1173,7 @@ static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Set values in an array. */ static JdwpError AR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); uint32_t firstIndex = Read4BE(&buf); uint32_t values = Read4BE(&buf); @@ -1185,7 +1185,7 @@ static JdwpError AR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) } static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ReadObjectId(&buf); // classLoaderObject // TODO: we should only return classes which have the given class loader as a defining or // initiating loader. The former would be easy; the latter is hard, because we don't have @@ -1199,7 +1199,7 @@ static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandB * Reply with a requestID. */ static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint8_t* origBuf = buf; uint8_t eventKind = Read1(&buf); @@ -1369,7 +1369,7 @@ static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, Expan * and does not return an error. */ static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint8_t eventKind; eventKind = Read1(&buf); uint32_t requestId = Read4BE(&buf); @@ -1385,7 +1385,7 @@ static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) * Return the values of arguments and local variables. */ static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); uint32_t slots = Read4BE(&buf); @@ -1411,7 +1411,7 @@ static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Set the values of arguments and local variables. */ static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); uint32_t slots = Read4BE(&buf); @@ -1435,7 +1435,7 @@ static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * Returns the value of "this" for the specified frame. */ static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); @@ -1467,7 +1467,7 @@ static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * that, or I have no idea what this is for.) */ static JdwpError COR_ReflectedType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId classObjectId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for refTypeId for class=%#llx (%s)", classObjectId, Dbg::GetClassName(classObjectId).c_str()); @@ -1478,7 +1478,7 @@ static JdwpError COR_ReflectedType(JdwpState*, const uint8_t* buf, int, ExpandBu * Handle a DDM packet with a single chunk in it. */ static JdwpError DDM_Chunk(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint8_t* replyBuf = NULL; int replyLen = -1; diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc index 4b442db51f..4fec005005 100644 --- a/src/jdwp/jdwp_main.cc +++ b/src/jdwp/jdwp_main.cc @@ -118,7 +118,7 @@ JdwpState::JdwpState(const JdwpOptions* options) * the thread is accepting network connections. */ JdwpState* JdwpState::Create(const JdwpOptions* options) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); UniquePtr state(new JdwpState(options)); switch (options->transport) { case kJdwpTransportSocket: @@ -301,7 +301,7 @@ void JdwpState::Run() { /* set the thread state to kWaitingInMainDebuggerLoop so GCs don't wait for us */ { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(thread_->GetState(), kNative); thread_->SetState(kWaitingInMainDebuggerLoop); } @@ -346,7 +346,7 @@ void JdwpState::Run() { while (!Dbg::IsDisposed()) { { // sanity check -- shouldn't happen? - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop); } @@ -401,7 +401,7 @@ void JdwpState::Run() { /* back to native, for thread shutdown */ { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop); thread_->SetState(kNative); } diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc index 72b5848fc3..e0320b35dd 100644 --- a/src/jni_compiler_test.cc +++ b/src/jni_compiler_test.cc @@ -116,9 +116,9 @@ void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) { // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); @@ -170,7 +170,7 @@ jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) { // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -198,7 +198,7 @@ jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) { // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -227,7 +227,7 @@ jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -257,7 +257,7 @@ jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdoub // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -288,7 +288,7 @@ jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -316,7 +316,7 @@ jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject // 3 = this + y + z EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -369,7 +369,7 @@ jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) { // 1 = klass EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -394,7 +394,7 @@ jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble // 1 = klass EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -425,7 +425,7 @@ jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y // 3 = klass + y + z EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -479,7 +479,7 @@ jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject // 3 = klass + y + z EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); diff --git a/src/jni_internal.cc b/src/jni_internal.cc index ab2286fa5c..6dc1a73267 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -93,7 +93,7 @@ size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) { class ArgArray { public: - explicit ArgArray(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + explicit ArgArray(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(method); shorty_ = mh.GetShorty(); shorty_len_ = mh.GetShortyLength(); @@ -110,7 +110,7 @@ class ArgArray { } void BuildArgArray(const ScopedObjectAccess& soa, va_list ap) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -145,7 +145,7 @@ class ArgArray { } void BuildArgArray(const ScopedObjectAccess& soa, jvalue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -189,7 +189,7 @@ class ArgArray { }; static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (obj == NULL) { return NULL; } @@ -201,7 +201,7 @@ static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj) } static void CheckMethodArguments(Method* m, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(m); ObjectArray* parameter_types = mh.GetParameterTypes(); CHECK(parameter_types != NULL); @@ -227,7 +227,7 @@ static void CheckMethodArguments(Method* m, JValue* args) static JValue InvokeWithArgArray(const ScopedObjectAccess& soa, Object* receiver, Method* method, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(soa.Env()->check_jni)) { CheckMethodArguments(method, args); } @@ -238,7 +238,7 @@ static JValue InvokeWithArgArray(const ScopedObjectAccess& soa, Object* receiver static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* receiver = soa.Decode(obj); Method* method = soa.DecodeMethod(mid); ArgArray arg_array(method); @@ -247,13 +247,13 @@ static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, } static Method* FindVirtualMethod(Object* receiver, Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method); } static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* receiver = soa.Decode(obj); Method* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); ArgArray arg_array(method); @@ -263,7 +263,7 @@ static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa, static JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* receiver = soa.Decode(obj); Method* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); ArgArray arg_array(method); @@ -297,14 +297,14 @@ static std::string NormalizeJniClassDescriptor(const char* name) { static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, Class* c, const char* name, const char* sig, const char* kind) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", "no %s method \"%s.%s%s\"", kind, ClassHelper(c).GetDescriptor(), name, sig); } static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa.Decode(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; @@ -331,7 +331,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, } static ClassLoader* GetClassLoader(Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = self->GetCurrentMethod(); if (method == NULL || PrettyMethod(method, false) == "java.lang.Runtime.nativeLoad") { return self->GetClassLoaderOverride(); @@ -341,7 +341,7 @@ static ClassLoader* GetClassLoader(Thread* self) static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa.Decode(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; @@ -380,14 +380,14 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con } static void PinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JavaVMExt* vm = soa.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Add(array); } static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JavaVMExt* vm = soa.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Remove(array); @@ -395,7 +395,7 @@ static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* arra static void ThrowAIOOBE(ScopedObjectAccess& soa, Array* array, jsize start, jsize length, const char* identifier) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string type(PrettyTypeOf(array)); soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "%s offset=%d length=%d %s.length=%d", @@ -404,13 +404,13 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, Array* array, jsize start, static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length, jsize array_length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", "offset=%d length=%d string.length()=%d", start, length, array_length); } int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(env); // Turn the const char* into a java.lang.String. @@ -512,7 +512,7 @@ class SharedLibrary { */ bool CheckOnLoadResult() LOCKS_EXCLUDED(jni_on_load_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad); bool okay; @@ -615,7 +615,7 @@ class Libraries { // See section 11.3 "Linking Native Methods" of the JNI spec. void* FindNativeMethod(const Method* m, std::string& detail) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string jni_short_name(JniShortName(m)); std::string jni_long_name(JniLongName(m)); const ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader(); @@ -660,7 +660,7 @@ JValue InvokeWithJValues(const ScopedObjectAccess& soa, jobject obj, jmethodID m JValue InvokeWithJValues(const ScopedObjectAccess& soa, Object* receiver, Method* m, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return InvokeWithArgArray(soa, receiver, m, args); } @@ -2286,7 +2286,7 @@ class JNI { private: static jint EnsureLocalCapacity(const ScopedObjectAccess& soa, jint desired_capacity, const char* caller) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: we should try to expand the table if necessary. if (desired_capacity < 1 || desired_capacity > static_cast(kLocalsMax)) { LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; @@ -2303,7 +2303,7 @@ class JNI { template static JniT NewPrimitiveArray(const ScopedObjectAccess& soa, jsize length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_GE(length, 0); // TODO: ReportJniError ArtT* result = ArtT::Alloc(length); return soa.AddLocalReference(result); @@ -2312,7 +2312,7 @@ class JNI { template static CArrayT GetPrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array, jboolean* is_copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ArtArrayT* array = soa.Decode(java_array); PinPrimitiveArray(soa, array); if (is_copy != NULL) { @@ -2324,7 +2324,7 @@ class JNI { template static void ReleasePrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array, jint mode) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (mode != JNI_COMMIT) { Array* array = soa.Decode(java_array); UnpinPrimitiveArray(soa, array); @@ -2334,7 +2334,7 @@ class JNI { template static void GetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array, jsize start, jsize length, JavaT* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ArrayT* array = soa.Decode(java_array); if (start < 0 || length < 0 || start + length > array->GetLength()) { ThrowAIOOBE(soa, array, start, length, "src"); @@ -2347,7 +2347,7 @@ class JNI { template static void SetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array, jsize start, jsize length, const JavaT* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ArrayT* array = soa.Decode(java_array); if (start < 0 || length < 0 || start + length > array->GetLength()) { ThrowAIOOBE(soa, array, start, length, "dst"); diff --git a/src/jni_internal.h b/src/jni_internal.h index fad06e1bec..6833c2a622 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -54,9 +54,9 @@ void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINat size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len); JValue InvokeWithJValues(const ScopedObjectAccess&, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); JValue InvokeWithJValues(const ScopedObjectAccess&, Object* receiver, Method* m, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause); @@ -71,19 +71,19 @@ struct JavaVMExt : public JavaVM { * human-readable description of the error. */ bool LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, std::string& detail) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /** * Returns a pointer to the code for the native method 'm', found * using dlsym(3) on every native library that's been loaded so far. */ void* FindCodeForNativeMethod(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetCheckJniEnabled(bool enabled); @@ -129,7 +129,7 @@ struct JNIEnvExt : public JNIEnv { ~JNIEnvExt(); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetCheckJniEnabled(bool enabled); diff --git a/src/jni_internal_test.cc b/src/jni_internal_test.cc index 64461b010f..5db258d789 100644 --- a/src/jni_internal_test.cc +++ b/src/jni_internal_test.cc @@ -70,7 +70,7 @@ class JniInternalTest : public CommonTest { Method::InvokeStub* DoCompile(Method*& method, Object*& receiver, bool is_static, const char* method_name, const char* method_signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods"; jobject jclass_loader(LoadDex(class_name)); SirtRef class_loader(ScopedObjectAccessUnchecked(Thread::Current()).Decode(jclass_loader)); @@ -97,7 +97,7 @@ class JniInternalTest : public CommonTest { return stub; } - void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "nop", "()V"); @@ -105,7 +105,7 @@ class JniInternalTest : public CommonTest { } void InvokeIdentityByteMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(B)B"); @@ -135,7 +135,7 @@ class JniInternalTest : public CommonTest { } void InvokeIdentityIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(I)I"); @@ -165,7 +165,7 @@ class JniInternalTest : public CommonTest { } void InvokeIdentityDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(D)D"); @@ -195,7 +195,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(II)I"); @@ -234,7 +234,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(III)I"); @@ -278,7 +278,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIII)I"); @@ -327,7 +327,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntIntIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIIII)I"); @@ -381,7 +381,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DD)D"); @@ -421,7 +421,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDD)D"); @@ -452,7 +452,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDD)D"); @@ -486,7 +486,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDDD)D"); diff --git a/src/logging.cc b/src/logging.cc index 712c02bcd7..a0c07cf227 100644 --- a/src/logging.cc +++ b/src/logging.cc @@ -51,7 +51,7 @@ const char* ProgramInvocationShortName() { // This can be used to reveal or conceal logs with specific tags. void InitLogging(char* argv[]) { // TODO: Move this to a more obvious InitART... - GlobalSynchronization::Init(); + Locks::Init(); // Stash the command line for later use. We can use /proc/self/cmdline on Linux to recover this, // but we don't have that luxury on the Mac, and there are a couple of argv[0] variants that are @@ -104,7 +104,7 @@ LogMessage::~LogMessage() { // Do the actual logging with the lock held. { - MutexLock mu(*GlobalSynchronization::logging_lock_); + MutexLock mu(*Locks::logging_lock_); if (msg.find('\n') == std::string::npos) { LogLine(msg.c_str()); } else { diff --git a/src/logging.h b/src/logging.h index ce86a72cbb..75782d5640 100644 --- a/src/logging.h +++ b/src/logging.h @@ -187,7 +187,7 @@ struct LogMessageData { class LogMessage { public: LogMessage(const char* file, int line, LogSeverity severity, int error); - ~LogMessage() LOCKS_EXCLUDED(GlobalSynchronization::logging_lock_); + ~LogMessage() LOCKS_EXCLUDED(Locks::logging_lock_); std::ostream& stream(); private: @@ -253,10 +253,10 @@ template class MutatorLockedDumpable { public: explicit MutatorLockedDumpable(T& value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : value_(value) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { } - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { value_.Dump(os); } @@ -271,7 +271,7 @@ class MutatorLockedDumpable { template std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable& rhs) -// TODO: should be SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) however annotalysis +// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis // currently fails for this. NO_THREAD_SAFETY_ANALYSIS { rhs.Dump(os); diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc index 0b09f90b4f..c21c19c4ed 100644 --- a/src/mark_sweep.cc +++ b/src/mark_sweep.cc @@ -160,8 +160,8 @@ class CheckObjectVisitor { } void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { mark_sweep_->CheckReference(obj, ref, offset, is_static); } @@ -195,8 +195,8 @@ class ScanImageRootVisitor { } void operator ()(const Object* root) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(root != NULL); mark_sweep_->ScanObject(root); } @@ -245,8 +245,8 @@ class CheckBitmapVisitor { } void operator ()(const Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); mark_sweep_->CheckObject(obj); } @@ -280,8 +280,8 @@ class ScanObjectVisitor { } void operator ()(const Object* obj) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mark_sweep_->ScanObject(obj); } @@ -415,7 +415,7 @@ struct SweepCallbackContext { }; void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { - GlobalSynchronization::heap_bitmap_lock_->AssertExclusiveHeld(); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(); size_t freed_objects = num_ptrs; size_t freed_bytes = 0; @@ -449,7 +449,7 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { } void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { - GlobalSynchronization::heap_bitmap_lock_->AssertExclusiveHeld(); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(); SweepCallbackContext* context = static_cast(arg); Heap* heap = context->mark_sweep->GetHeap(); diff --git a/src/mark_sweep.h b/src/mark_sweep.h index 2333bdbfca..d1e348187c 100644 --- a/src/mark_sweep.h +++ b/src/mark_sweep.h @@ -46,14 +46,14 @@ class MarkSweep { // Marks the root set at the start of a garbage collection. void MarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Marks the roots in the image space on dirty cards. - void ScanDirtyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void ScanDirtyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Verify that image roots point to only marked objects within the alloc space. - void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); bool IsMarkStackEmpty() const { return mark_stack_->IsEmpty(); @@ -61,8 +61,8 @@ class MarkSweep { // Builds a mark stack and recursively mark until it empties. void RecursiveMark(bool partial, TimingLogger& timings) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs. void CopyMarkBits(Space* space); @@ -70,27 +70,27 @@ class MarkSweep { // Builds a mark stack with objects on dirty cards and recursively mark // until it empties. void RecursiveMarkDirtyObjects(bool update_finger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Recursive mark objects on specified cards. Updates finger. void RecursiveMarkCards(CardTable* card_table, const std::vector& cards, TimingLogger& timings) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);; + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);; // Remarks the root set after completing the concurrent mark. void ReMarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Heap* GetHeap() { return heap_; } void ProcessReferences(bool clear_soft_references) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ProcessReferences(&soft_reference_list_, clear_soft_references, &weak_reference_list_, &finalizer_reference_list_, @@ -99,11 +99,11 @@ class MarkSweep { // Sweeps unmarked objects to complete the garbage collection. void Sweep(bool partial, bool swap_bitmaps) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Sweep only pointers within an array. WARNING: Trashes objects. void SweepArray(TimingLogger& logger, MarkStack* allocation_stack_, bool swap_bitmaps) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); Object* GetClearedReferences() { return cleared_reference_list_; @@ -111,13 +111,13 @@ class MarkSweep { // Proxy for external access to ScanObject. void ScanRoot(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Blackens an object. void ScanObject(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetFinger(Object* new_finger) { finger_ = new_finger; @@ -140,12 +140,12 @@ class MarkSweep { } void SweepSystemWeaks(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); template static void VisitObjectReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); DCHECK(obj->GetClass() != NULL); if (obj->IsClass()) { @@ -160,7 +160,7 @@ class MarkSweep { private: // Returns true if the object has its bit set in the mark bitmap. bool IsMarked(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { DCHECK(current_mark_bitmap_ != NULL); if (current_mark_bitmap_->HasAddress(object)) { return current_mark_bitmap_->Test(object); @@ -169,61 +169,58 @@ class MarkSweep { } static bool IsMarkedCallback(const Object* object, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static bool IsLiveCallback(const Object* object, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void MarkObjectVisitor(const Object* root, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void ReMarkObjectVisitor(const Object* root, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void VerifyImageRootVisitor(Object* root, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_); static void ScanDirtyCardCallback(Object* obj, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Marks an object. void MarkObject(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Yuck. void MarkObject0(const Object* obj, bool check_finger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void ScanBitmapCallback(Object* obj, void* finger, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Special sweep for zygote that just marks objects / dirties cards. static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void CheckObject(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Grays references in instance fields. void ScanInstanceFields(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template static void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != NULL); Class* klass = obj->GetClass(); DCHECK(klass != NULL); @@ -232,41 +229,39 @@ class MarkSweep { // Blackens a class object. void ScanClass(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template static void VisitClassReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { VisitInstanceFieldsReferences(obj, visitor); VisitStaticFieldsReferences(obj->AsClass(), visitor); } // Grays references in static fields. void ScanStaticFields(const Class* klass) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template static void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(klass != NULL); VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor); } // Used by ScanInstanceFields and ScanStaticFields void ScanFields(const Object* obj, uint32_t ref_offsets, bool is_static) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template static void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { if (ref_offsets != CLASS_WALK_SUPER) { // Found a reference offset bitmap. Mark the specified offsets. while (ref_offsets != 0) { @@ -301,13 +296,12 @@ class MarkSweep { // Grays references in an array. void ScanArray(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template static void VisitArrayReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { visitor(obj, obj->GetClass(), Object::ClassOffset(), false); if (obj->IsObjectArray()) { const ObjectArray* array = obj->AsObjectArray(); @@ -320,49 +314,48 @@ class MarkSweep { } void ScanOther(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template static void VisitOtherReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { return VisitInstanceFieldsReferences(obj, visitor); } // Blackens objects grayed during a garbage collection. void ScanGrayObjects(bool update_finger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Schedules an unmarked object for reference processing. void DelayReferenceReferent(Object* reference) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Recursively blackens objects on the mark stack. void ProcessMarkStack() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void EnqueueFinalizerReferences(Object** ref) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PreserveSomeSoftReferences(Object** ref) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ClearWhiteReferences(Object** list) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void ProcessReferences(Object** soft_references, bool clear_soft_references, Object** weak_references, Object** finalizer_references, Object** phantom_references) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SweepJniWeakGlobals(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Current space, we check this space first to avoid searching for the appropriate space for an object. SpaceBitmap* current_mark_bitmap_; diff --git a/src/mod_union_table.cc b/src/mod_union_table.cc index eb8c598db5..d62128d610 100644 --- a/src/mod_union_table.cc +++ b/src/mod_union_table.cc @@ -57,8 +57,8 @@ class ModUnionVisitor { } void operator ()(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. @@ -155,8 +155,8 @@ class ModUnionScanImageRootVisitor { } void operator ()(const Object* root) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(root != NULL); mark_sweep_->ScanRoot(root); } @@ -226,8 +226,8 @@ class ModUnionReferenceVisitor { } void operator ()(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. @@ -254,8 +254,7 @@ class CheckReferenceVisitor { // Extra parameters are required since we use this same visitor signature for checking objects. void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { Heap* heap = mod_union_table_->GetMarkSweep()->GetHeap(); if (mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) { Space* from_space = heap->FindSpaceFromObject(obj); @@ -287,8 +286,7 @@ class ModUnionCheckReferences { } void operator ()(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != NULL); MarkSweep* mark_sweep = mod_union_table_->GetMarkSweep(); CheckReferenceVisitor visitor(mod_union_table_, references_); diff --git a/src/mod_union_table.h b/src/mod_union_table.h index df2023fb9f..17ca24085d 100644 --- a/src/mod_union_table.h +++ b/src/mod_union_table.h @@ -54,7 +54,7 @@ class ModUnionTable { // for said cards. Exclusive lock is required since verify sometimes uses // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the // bitmap or not. - virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) = 0; + virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0; // Should probably clean this up later. void Init(MarkSweep* mark_sweep) { @@ -86,11 +86,11 @@ class ModUnionTableBitmap : public ModUnionTable { // Update table based on cleared cards. void Update() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Mark all references to the alloc space(s). - void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); protected: // Cleared card array, used to update the mod-union table. @@ -115,15 +115,15 @@ class ModUnionTableReferenceCache : public ModUnionTable { // Update table based on cleared cards. void Update() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Mark all references to the alloc space(s). - void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and // VisitMarkedRange can't know if the callback will modify the bitmap or not. - void Verify() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Function that tells whether or not to add a reference to the table. virtual bool AddReference(const Object* obj, const Object* ref) = 0; @@ -152,8 +152,8 @@ class ModUnionTableCardCache : public ModUnionTable { // Mark all references to the alloc space(s). void MarkReferences() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Nothing to verify. void Verify() {} diff --git a/src/monitor.cc b/src/monitor.cc index 0e6735d1d3..6b7fbf116a 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -252,7 +252,7 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) __attribute__((format(printf, 1, 2))); static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list args; va_start(args, fmt); Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args); @@ -284,7 +284,7 @@ void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owne { // TODO: isn't this too late to prevent threads from disappearing? // Acquire thread list lock so threads won't disappear from under us. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); // Re-read owner now that we hold lock. current_owner = (monitor != NULL) ? monitor->owner_ : NULL; // Get short descriptions of the threads involved. @@ -366,7 +366,7 @@ bool Monitor::Unlock(Thread* self, bool for_wait) { // Converts the given waiting time (relative to "now") into an absolute time in 'ts'. static void ToAbsoluteTime(int64_t ms, int32_t ns, timespec* ts) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { int64_t endSec; #ifdef HAVE_TIMEDWAIT_MONOTONIC @@ -873,7 +873,7 @@ static uint32_t LockOwnerFromThreadLock(Object* thread_lock) { void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { ThreadState state; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); state = thread->GetState(); } @@ -913,7 +913,7 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { } static void DumpLockedObject(std::ostream& os, Object* o) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << " - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n"; } diff --git a/src/monitor.h b/src/monitor.h index b506b39a96..de70803f2c 100644 --- a/src/monitor.h +++ b/src/monitor.h @@ -73,43 +73,43 @@ class Monitor { static void MonitorEnter(Thread* thread, Object* obj) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool MonitorExit(Thread* thread, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_); static void Notify(Thread* self, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void NotifyAll(Thread* self, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DescribeWait(std::ostream& os, const Thread* thread) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DescribeLocks(std::ostream& os, StackVisitor* stack_visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetObject(); private: explicit Monitor(Thread* owner, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); static void Inflate(Thread* self, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); bool Unlock(Thread* thread, bool for_wait) UNLOCK_FUNCTION(monitor_lock_); @@ -117,24 +117,24 @@ class Monitor { void Notify(Thread* self) NO_THREAD_SAFETY_ANALYSIS; void NotifyWithLock() EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void NotifyAll(Thread* self) NO_THREAD_SAFETY_ANALYSIS; void NotifyAllWithLock() EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow) NO_THREAD_SAFETY_ANALYSIS; void WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. void TranslateLocation(const Method* method, uint32_t pc, const char*& source_file, uint32_t& line_number) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool (*is_sensitive_thread_hook_)(); static uint32_t lock_profiling_threshold_; @@ -172,7 +172,7 @@ class MonitorList { void Add(Monitor* m); void SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); private: Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/src/mutex.cc b/src/mutex.cc index 2811843526..cb344d4f81 100644 --- a/src/mutex.cc +++ b/src/mutex.cc @@ -75,16 +75,16 @@ struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t { // ...other stuff we don't care about. }; -ReaderWriterMutex* GlobalSynchronization::mutator_lock_ = NULL; -Mutex* GlobalSynchronization::thread_list_lock_ = NULL; -Mutex* GlobalSynchronization::classlinker_classes_lock_ = NULL; -ReaderWriterMutex* GlobalSynchronization::heap_bitmap_lock_ = NULL; -Mutex* GlobalSynchronization::abort_lock_ = NULL; -Mutex* GlobalSynchronization::logging_lock_ = NULL; -Mutex* GlobalSynchronization::unexpected_signal_lock_ = NULL; -Mutex* GlobalSynchronization::thread_suspend_count_lock_ = NULL; - -void GlobalSynchronization::Init() { +ReaderWriterMutex* Locks::mutator_lock_ = NULL; +Mutex* Locks::thread_list_lock_ = NULL; +Mutex* Locks::classlinker_classes_lock_ = NULL; +ReaderWriterMutex* Locks::heap_bitmap_lock_ = NULL; +Mutex* Locks::abort_lock_ = NULL; +Mutex* Locks::logging_lock_ = NULL; +Mutex* Locks::unexpected_signal_lock_ = NULL; +Mutex* Locks::thread_suspend_count_lock_ = NULL; + +void Locks::Init() { if (logging_lock_ != NULL) { // Already initialized. DCHECK(mutator_lock_ != NULL); diff --git a/src/mutex.h b/src/mutex.h index be3704c234..5154d45f58 100644 --- a/src/mutex.h +++ b/src/mutex.h @@ -65,7 +65,7 @@ enum MutexLevel { std::ostream& operator<<(std::ostream& os, const MutexLevel& rhs); // Global mutexes corresponding to the levels above. -class GlobalSynchronization { +class Locks { public: static void Init(); diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 30f411cc3e..b5e1c19d8e 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -108,7 +108,7 @@ static jint DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jst } static const DexFile* toDexFile(int dex_file_address) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* dex_file = reinterpret_cast(static_cast(dex_file_address)); if (dex_file == NULL) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", "dex_file == null"); diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index ed61de92f5..fae06f68ca 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -138,7 +138,7 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv*, jobject, jint targetSdkVersio #if !defined(ART_USE_LLVM_COMPILER) if (vm->check_jni) { LOG(WARNING) << "Turning off CheckJNI so we can turn on JNI app bug workarounds..."; - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); vm->SetCheckJniEnabled(false); runtime->GetThreadList()->ForEach(DisableCheckJniCallback, NULL); } diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index e63cf1aece..68fc97e922 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -28,7 +28,7 @@ namespace art { static Class* DecodeClass(const ScopedObjectAccess& soa, jobject java_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa.Decode(java_class); DCHECK(c != NULL); DCHECK(c->IsClass()); @@ -93,7 +93,7 @@ struct WorkAroundGccAnnotalysisBug { template static jobjectArray ToArray(const ScopedObjectAccessUnchecked& soa, const char* array_class_name, const std::vector& objects) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedLocalRef array_class(soa.Env(), soa.Env()->FindClass(array_class_name)); jobjectArray result = soa.Env()->NewObjectArray(objects.size(), array_class.get(), NULL); for (size_t i = 0; i < objects.size(); ++i) { @@ -240,7 +240,7 @@ static jobject Class_getDex(JNIEnv* env, jobject javaClass) { } static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray* arg_array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (name != mh->GetName()) { return false; } @@ -262,7 +262,7 @@ static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray static Method* FindConstructorOrMethodInArray(ObjectArray* methods, const std::string& name, ObjectArray* arg_array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (methods == NULL) { return NULL; } diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index f4fe6ca9cd..d74c9dbc7f 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -102,7 +102,7 @@ static void move32(void* dst, const void* src, size_t n) { namespace art { static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "%s of type %s is not an array", identifier, actualType.c_str()); diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index a0c90eea7f..adc246aa28 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -35,7 +35,7 @@ static jboolean Thread_interrupted(JNIEnv* env, jclass) { static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) { ScopedObjectAccess soa(env); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE; } @@ -56,10 +56,10 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha ScopedObjectAccess soa(env); ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); internal_thread_state = thread->GetState(); } switch (internal_thread_state) { @@ -92,14 +92,14 @@ static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null"); return JNI_FALSE; } - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); return thread->HoldsLock(object); } static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) { ScopedObjectAccess soa(env); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { thread->Interrupt(); @@ -140,7 +140,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) { */ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_priority) { ScopedObjectAccess soa(env); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { thread->SetNativePriority(new_priority); diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index fa5975019d..49a4694eda 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -25,7 +25,7 @@ namespace art { // Recursively create an array with multiple dimensions. Elements may be // Objects or primitive types. static Array* CreateMultiArray(Class* array_class, int current_dimension, IntArray* dimensions) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { int32_t array_length = dimensions->Get(current_dimension++); SirtRef new_array(Array::Alloc(array_class, array_length)); if (new_array.get() == NULL) { diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index e764b2500b..c82e5034b7 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -25,7 +25,7 @@ namespace art { static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, JValue& value, bool allow_references) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_EQ(value.GetJ(), 0LL); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { @@ -74,7 +74,7 @@ static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, Field* f, Object*& o) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (f->IsStatic()) { o = NULL; return true; @@ -163,7 +163,7 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) { } static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool allow_references) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { return; diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 9bcea0492d..8e5bbf6ee4 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -56,7 +56,7 @@ static jobject FindThreadByThinLockId(JNIEnv* env, uint32_t thin_lock_id) { }; ThreadFinder finder(thin_lock_id); { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(ThreadFinder::Callback, &finder); } if (finder.thread != NULL) { @@ -134,7 +134,7 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) { std::vector& bytes = *reinterpret_cast*>(context); JDWP::Append4BE(bytes, t->GetThinLockId()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); JDWP::Append1BE(bytes, t->GetState()); } JDWP::Append4BE(bytes, t->GetTid()); @@ -146,7 +146,7 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) { static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) { std::vector bytes; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); ThreadList* thread_list = Runtime::Current()->GetThreadList(); uint16_t thread_count = 0; diff --git a/src/oat/runtime/callee_save_frame.h b/src/oat/runtime/callee_save_frame.h index 14ba0465c7..9288a955c9 100644 --- a/src/oat/runtime/callee_save_frame.h +++ b/src/oat/runtime/callee_save_frame.h @@ -25,9 +25,9 @@ class Method; // Place a special frame at the TOS that will save the callee saves for the given type. static void FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Be aware the store below may well stomp on an incoming argument. - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); *sp = Runtime::Current()->GetCalleeSaveMethod(type); self->SetTopOfStack(sp, 0); self->VerifyStack(); diff --git a/src/oat/runtime/stub.h b/src/oat/runtime/stub.h index 9e5e66fa6b..0e5f0dd67d 100644 --- a/src/oat/runtime/stub.h +++ b/src/oat/runtime/stub.h @@ -23,29 +23,29 @@ namespace art { namespace arm { ByteArray* CreateAbstractMethodErrorStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* CreateJniDlsymLookupStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } namespace mips { ByteArray* CreateAbstractMethodErrorStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* CreateJniDlsymLookupStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } namespace x86 { ByteArray* CreateAbstractMethodErrorStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* CreateJniDlsymLookupStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } } // namespace art diff --git a/src/oat/runtime/support_alloc.cc b/src/oat/runtime/support_alloc.cc index 528198d81c..fb83fadeef 100644 --- a/src/oat/runtime/support_alloc.cc +++ b/src/oat/runtime/support_alloc.cc @@ -21,21 +21,21 @@ namespace art { extern "C" Object* artAllocObjectFromCode(uint32_t type_idx, Method* method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, false); } extern "C" Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, Method* method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, true); } extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, false); } @@ -43,14 +43,14 @@ extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32 extern "C" Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, true); } extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false); } @@ -58,7 +58,7 @@ extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* metho extern "C" Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true); } diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc index ea083f1aae..45a3e60d83 100644 --- a/src/oat/runtime/support_cast.cc +++ b/src/oat/runtime/support_cast.cc @@ -21,7 +21,7 @@ namespace art { // Assignable test for code, won't throw. Null and equality tests already performed extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass != NULL); DCHECK(ref_class != NULL); return klass->IsAssignableFrom(ref_class) ? 1 : 0; @@ -29,7 +29,7 @@ extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref // Check whether it is safe to cast one class to the other, throw exception and return -1 on failure extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(a->IsClass()) << PrettyClass(a); DCHECK(b->IsClass()) << PrettyClass(b); if (LIKELY(b->IsAssignableFrom(a))) { @@ -48,7 +48,7 @@ extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self // Returns 0 on success and -1 if an exception is pending. extern "C" int artCanPutArrayElementFromCode(const Object* element, const Class* array_class, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(array_class != NULL); // element can't be NULL as we catch this is screened in runtime_support Class* element_class = element->GetClass(); diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc index 996804396a..9eaf55b71c 100644 --- a/src/oat/runtime/support_debug.cc +++ b/src/oat/runtime/support_debug.cc @@ -26,14 +26,14 @@ namespace art { * to denote method entry. */ extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); Dbg::UpdateDebugger(dex_pc, self); } // Temporary debugging hook for compiler. extern void DebugMe(Method* method, uint32_t info) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { LOG(INFO) << "DebugMe"; if (method != NULL) { LOG(INFO) << PrettyMethod(method); diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc index 8e7c2ad8a4..98cce55c7b 100644 --- a/src/oat/runtime/support_dexcache.cc +++ b/src/oat/runtime/support_dexcache.cc @@ -21,7 +21,7 @@ namespace art { extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called to ensure static storage base is initialized for direct static field reads and writes. // A class may be accessing another class' fields when it doesn't have access, as access has been // given by inheritance. @@ -31,7 +31,7 @@ extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Me extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when method->dex_cache_resolved_types_[] misses. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveVerifyAndClinit(type_idx, referrer, self, false, false); @@ -40,7 +40,7 @@ extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* ref extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); @@ -49,7 +49,7 @@ extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, extern "C" String* artResolveStringFromCode(Method* referrer, int32_t string_idx, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveStringFromCode(referrer, string_idx); } diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc index fe8974bff0..90a99dafd1 100644 --- a/src/oat/runtime/support_field.cc +++ b/src/oat/runtime/support_field.cc @@ -23,7 +23,7 @@ namespace art { extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL)) { return field->Get32(NULL); @@ -38,7 +38,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* ref extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL)) { return field->Get64(NULL); @@ -53,7 +53,7 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* ref extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(NULL); @@ -68,7 +68,7 @@ extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* ref extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get32(obj); @@ -87,7 +87,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get64(obj); @@ -106,7 +106,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); if (LIKELY(field != NULL && obj != NULL)) { return field->GetObj(obj); @@ -125,7 +125,7 @@ extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL)) { field->Set32(NULL, new_value); @@ -142,7 +142,7 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer, uint64_t new_value, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != NULL)) { field->Set64(NULL, new_value); @@ -159,7 +159,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL)) { if (LIKELY(!FieldHelper(field).IsPrimitiveType())) { @@ -178,7 +178,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_t new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { field->Set32(obj, new_value); @@ -199,7 +199,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_t new_value, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); Method* referrer = sp[callee_save->GetFrameSizeInBytes() / sizeof(Method*)]; Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t)); @@ -223,7 +223,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, Object* obj, Object* new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL && obj != NULL)) { field->SetObj(obj, new_value); diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc index 8561bd8308..62b9cf96cf 100644 --- a/src/oat/runtime/support_fillarray.cc +++ b/src/oat/runtime/support_fillarray.cc @@ -38,7 +38,7 @@ namespace art { extern "C" int artHandleFillArrayDataFromCode(Array* array, const Instruction::ArrayDataPayload* payload, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); if (UNLIKELY(array == NULL)) { diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc index b2867ef52f..540e46bbfe 100644 --- a/src/oat/runtime/support_invoke.cc +++ b/src/oat/runtime/support_invoke.cc @@ -21,7 +21,7 @@ namespace art { static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, Method** sp, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); @@ -56,7 +56,7 @@ static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method extern "C" uint64_t artInvokeInterfaceTrampoline(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, false, kInterface); } @@ -64,7 +64,7 @@ extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_ Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface); } @@ -73,7 +73,7 @@ extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect); } @@ -81,7 +81,7 @@ extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic); } @@ -89,7 +89,7 @@ extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper); } @@ -97,7 +97,7 @@ extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_id Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual); } diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc index 5c0bbee45c..49365b55bd 100644 --- a/src/oat/runtime/support_jni.cc +++ b/src/oat/runtime/support_jni.cc @@ -23,8 +23,8 @@ namespace art { // Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); // We come here as Native. +extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertNotHeld(); // We come here as Native. DCHECK(Thread::Current() == self); ScopedObjectAccess soa(self); @@ -54,7 +54,7 @@ extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(GlobalSynchronizati } extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + UNLOCK_FUNCTION(Locks::mutator_lock_) { self->DecodeJObject(to_lock)->MonitorEnter(self); return JniMethodStart(self); } @@ -67,14 +67,14 @@ static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { } extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); PopLocalReferences(saved_local_ref_cookie, self); } extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -82,7 +82,7 @@ extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject lo extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -99,7 +99,7 @@ extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_re extern Object* JniMethodEndWithReferenceSynchronized(jobject result, uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. Object* o = self->DecodeJObject(result); @@ -123,7 +123,7 @@ static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { } extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ DCHECK(Thread::Current() == self); // TODO: this code is specific to ARM // On entry the stack pointed by sp is: diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc index c22448479d..8c40aba147 100644 --- a/src/oat/runtime/support_proxy.cc +++ b/src/oat/runtime/support_proxy.cc @@ -48,7 +48,7 @@ namespace art { // the invocation handler which is a field within the proxy object receiver. extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, Thread* self, byte* stack_args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Register the top of the managed stack Method** proxy_sp = reinterpret_cast(stack_args - SP_OFFSET_IN_BYTES); DCHECK_EQ(*proxy_sp, proxy_method); diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc index 510df3b3dc..41de9f7e0b 100644 --- a/src/oat/runtime/support_stubs.cc +++ b/src/oat/runtime/support_stubs.cc @@ -34,7 +34,7 @@ namespace art { // Lazily resolve a method. Called by stub code. const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp, Thread* thread, Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__arm__) // On entry the stack pointed by sp is: // | argN | | @@ -230,7 +230,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp #else // ART_USE_LLVM_COMPILER const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** called_addr, Thread* thread, Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t dex_pc; Method* caller = thread->GetCurrentMethod(&dex_pc); @@ -323,7 +323,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** ca #if !defined(ART_USE_LLVM_COMPILER) // Called by the AbstractMethodError. Called by stub code. extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", PrettyMethod(method).c_str()); @@ -331,7 +331,7 @@ extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Met } #else // ART_USE_LLVM_COMPILER extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method**) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", PrettyMethod(method).c_str()); } diff --git a/src/oat/runtime/support_thread.cc b/src/oat/runtime/support_thread.cc index 32284bbccc..20fe3e590d 100644 --- a/src/oat/runtime/support_thread.cc +++ b/src/oat/runtime/support_thread.cc @@ -21,14 +21,14 @@ namespace art { void CheckSuspendFromCode(Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame. thread->VerifyStack(); thread->FullSuspendCheck(); } extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when suspend count check value is 0 and thread->suspend_count_ != 0 FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); thread->FullSuspendCheck(); diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc index 0abdb0459f..6e1e1010ce 100644 --- a/src/oat/runtime/support_throw.cc +++ b/src/oat/runtime/support_throw.cc @@ -24,14 +24,14 @@ namespace art { // Deliver an exception that's pending on thread helping set up a callee save frame on the way. extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->DeliverException(); } // Called by generated call to throw an exception. extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* * exception may be NULL, in which case this routine should * throw NPE. NOTE: this is a convenience for generated code, @@ -50,7 +50,7 @@ extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread // Called by generated call to throw a NPE exception. extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); uint32_t dex_pc; Method* throw_method = self->GetCurrentMethod(&dex_pc); @@ -60,7 +60,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) // Called by generated call to throw an arithmetic divide by zero exception. extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); thread->DeliverException(); @@ -68,7 +68,7 @@ extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) // Called by generated call to throw an array index out of bounds exception. extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "length=%d; index=%d", limit, index); @@ -76,7 +76,7 @@ extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread } extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); // Remove extra entry pushed onto second stack during method tracing. if (Runtime::Current()->IsMethodTracingActive()) { @@ -90,7 +90,7 @@ extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) } extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); Method* method = self->GetCurrentMethod(); ThrowNoSuchMethodError(method_idx, method); diff --git a/src/oat_test.cc b/src/oat_test.cc index 288854bd8a..0886864a8a 100644 --- a/src/oat_test.cc +++ b/src/oat_test.cc @@ -26,7 +26,7 @@ class OatTest : public CommonTest { void CheckMethod(Method* method, const OatFile::OatMethod& oat_method, const DexFile* dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const CompiledMethod* compiled_method = compiler_->GetCompiledMethod(Compiler::MethodReference(dex_file, method->GetDexMethodIndex())); diff --git a/src/oat_writer.h b/src/oat_writer.h index 200d695f98..fd8e2b2533 100644 --- a/src/oat_writer.h +++ b/src/oat_writer.h @@ -69,14 +69,14 @@ class OatWriter { uint32_t image_file_location_checksum, const std::string& image_file_location, const Compiler& compiler) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: OatWriter(const std::vector& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, jobject class_loader, - const Compiler& compiler) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const Compiler& compiler) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ~OatWriter(); size_t InitOatHeader(); @@ -85,20 +85,20 @@ class OatWriter { size_t InitOatClasses(size_t offset); size_t InitOatCode(size_t offset); size_t InitOatCodeDexFiles(size_t offset) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t InitOatCodeDexFile(size_t offset, size_t& oat_class_index, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t InitOatCodeClassDef(size_t offset, size_t oat_class_index, size_t class_def_index, const DexFile& dex_file, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index, size_t class_def_method_index, bool is_native, InvokeType type, uint32_t method_idx, const DexFile*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool Write(File* file); bool WriteTables(File* file); diff --git a/src/oatdump.cc b/src/oatdump.cc index cde4dc6a13..e0ee14843c 100644 --- a/src/oatdump.cc +++ b/src/oatdump.cc @@ -156,7 +156,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetOatCode(Method* m) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const void* GetOatCode(Method* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(m); for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; @@ -487,7 +487,7 @@ class ImageDumper { : os_(os), image_filename_(image_filename), host_prefix_(host_prefix), image_space_(image_space), image_header_(image_header) {} - void Dump() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os_ << "MAGIC:\n"; os_ << image_header_.GetMagic() << "\n\n"; @@ -558,10 +558,10 @@ class ImageDumper { Heap* heap = Runtime::Current()->GetHeap(); const Spaces& spaces = heap->GetSpaces(); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); } - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); // TODO: C++0x auto for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) { (*cur)->GetLiveBitmap()->Walk(ImageDumper::Callback, this); @@ -585,7 +585,7 @@ class ImageDumper { private: static void PrettyObjectValue(std::string& summary, Class* type, Object* value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(type != NULL); if (value == NULL) { StringAppendF(&summary, "null %s\n", PrettyDescriptor(type).c_str()); @@ -607,7 +607,7 @@ class ImageDumper { } static void PrintField(std::string& summary, Field* field, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FieldHelper fh(field); const char* descriptor = fh.GetTypeDescriptor(); StringAppendF(&summary, "\t%s: ", fh.GetName()); @@ -636,7 +636,7 @@ class ImageDumper { } static void DumpFields(std::string& summary, Object* obj, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* super = klass->GetSuperClass(); if (super != NULL) { DumpFields(summary, obj, super); @@ -655,7 +655,7 @@ class ImageDumper { } const void* GetOatCodeBegin(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); if (code == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { @@ -668,7 +668,7 @@ class ImageDumper { } uint32_t GetOatCodeSize(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { return 0; @@ -677,7 +677,7 @@ class ImageDumper { } const void* GetOatCodeEnd(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { return NULL; @@ -686,7 +686,7 @@ class ImageDumper { } static void Callback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(obj != NULL); DCHECK(arg != NULL); ImageDumper* state = reinterpret_cast(arg); @@ -945,7 +945,7 @@ class ImageDumper { } void DumpOutliers(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { size_t sum_of_sizes = 0; size_t sum_of_sizes_squared = 0; size_t sum_of_expansion = 0; @@ -1045,7 +1045,7 @@ class ImageDumper { os << "\n" << std::flush; } - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << "\tart_file_bytes = " << PrettySize(file_bytes) << "\n\n" << "\tart_file_bytes = header_bytes + object_bytes + alignment_bytes\n" << StringPrintf("\theader_bytes = %8zd (%2.0f%% of art file bytes)\n" diff --git a/src/object.cc b/src/object.cc index 83994ffd59..cde7e04e48 100644 --- a/src/object.cc +++ b/src/object.cc @@ -439,7 +439,7 @@ Method* Method::FindOverriddenMethod() const { } static const void* GetOatCode(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); // Peel off any method tracing trampoline. @@ -529,7 +529,7 @@ uint32_t Method::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const { void Method::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const { if (kIsDebugBuild) { self->AssertThreadSuspensionIsAllowable(); - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(kRunnable, self->GetState()); } diff --git a/src/object.h b/src/object.h index c20c99aaab..36db13d04c 100644 --- a/src/object.h +++ b/src/object.h @@ -172,11 +172,11 @@ class MANAGED Object { void SetClass(Class* new_klass); bool InstanceOf(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t SizeOf() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* Clone() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* Clone() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t IdentityHashCode() const { #ifdef MOVING_GARBAGE_COLLECTOR @@ -199,20 +199,20 @@ class MANAGED Object { uint32_t GetThinLockId(); - void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); - bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_); - void Notify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void NotifyAll() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(int64_t timeout, int32_t nanos) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsClass() const; @@ -260,14 +260,14 @@ class MANAGED Object { return down_cast(this); } - bool IsField() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Field* AsField() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Field* AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsField()); return down_cast(this); } - const Field* AsField() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const Field* AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsField()); return down_cast(this); } @@ -424,55 +424,55 @@ class MANAGED Field : public Object { // field access, null object for static fields bool GetBoolean(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetBoolean(Object* object, bool z) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int8_t GetByte(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetByte(Object* object, int8_t b) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint16_t GetChar(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetChar(Object* object, uint16_t c) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int16_t GetShort(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetShort(Object* object, int16_t s) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetInt(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetInt(Object* object, int32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int64_t GetLong(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetLong(Object* object, int64_t j) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); float GetFloat(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetFloat(Object* object, float f) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); double GetDouble(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetDouble(Object* object, double d) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetObject(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetObject(Object* object, const Object* l) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // raw field accesses uint32_t Get32(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Set32(Object* object, uint32_t new_value) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint64_t Get64(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Set64(Object* object, uint64_t new_value) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetObj(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetObj(Object* object, const Object* new_value) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Class* GetJavaLangReflectField() { DCHECK(java_lang_reflect_Field_ != NULL); @@ -652,10 +652,10 @@ class MANAGED Method : public Object { void SetDexCacheInitializedStaticStorage(ObjectArray* new_value); // Find the method that this method overrides - Method* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const void* GetCode() const { return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Method, code_), false); @@ -665,7 +665,7 @@ class MANAGED Method : public Object { SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Method, code_), code, false); } - uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this); uintptr_t code = reinterpret_cast(GetCode()); if (code == 0) { @@ -677,7 +677,7 @@ class MANAGED Method : public Object { } bool IsWithinCode(uintptr_t pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uintptr_t code = reinterpret_cast(GetCode()); if (code == 0) { return pc == 0; @@ -686,7 +686,7 @@ class MANAGED Method : public Object { } void AssertPcIsWithinCode(uintptr_t pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint32_t GetOatCodeOffset() const { DCHECK(!Runtime::Current()->IsStarted()); @@ -811,9 +811,9 @@ class MANAGED Method : public Object { bool IsRegistered() const; void RegisterNative(Thread* self, const void* native_method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static MemberOffset NativeMethodOffset() { return OFFSET_OF_OBJECT_MEMBER(Method, native_method_); @@ -914,16 +914,16 @@ class MANAGED Method : public Object { // Converts a native PC to a dex PC. TODO: this is a no-op // until we associate a PC mapping table with each method. uint32_t ToDexPC(const uintptr_t pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Converts a dex PC to a native PC. TODO: this is a no-op // until we associate a PC mapping table with each method. uintptr_t ToNativePC(const uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Find the catch block for the given exception type and dex_pc uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method); @@ -1014,10 +1014,10 @@ class MANAGED Array : public Object { // A convenience for code that doesn't know the component size, // and doesn't want to have to work it out itself. static Array* Alloc(Class* array_class, int32_t component_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Array* Alloc(Class* array_class, int32_t component_count, size_t component_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t SizeOf() const; @@ -1055,7 +1055,7 @@ class MANAGED Array : public Object { protected: bool IsValidIndex(int32_t index) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(index < 0 || index >= length_)) { return ThrowArrayIndexOutOfBoundsException(index); } @@ -1064,9 +1064,9 @@ class MANAGED Array : public Object { protected: bool ThrowArrayIndexOutOfBoundsException(int32_t index) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ThrowArrayStoreException(Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: // The number of array elements. @@ -1081,26 +1081,26 @@ template class MANAGED ObjectArray : public Array { public: static ObjectArray* Alloc(Class* object_array_class, int32_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set element without bound and element type checks, to be used in limited // circumstances, such as during boot image writing void SetWithoutChecks(int32_t i, T* object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Copy(const ObjectArray* src, int src_pos, ObjectArray* dst, int dst_pos, size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray* CopyOf(int32_t new_length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray); @@ -1191,7 +1191,7 @@ class MANAGED Class : public StaticStorageBase { return static_cast(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), false)); } - void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if the class has failed to link. bool IsErroneous() const { @@ -1302,7 +1302,7 @@ class MANAGED Class : public StaticStorageBase { String* GetName() const; // Returns the cached name void SetName(String* name); // Sets the cached name String* ComputeName() // Computes the name, then sets the cached value - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsProxyClass() const { // Read access flags without using getter as whether something is a proxy can be check in @@ -1381,7 +1381,7 @@ class MANAGED Class : public StaticStorageBase { bool IsStringClass() const; - bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* GetComponentType() const { return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), false); @@ -1405,7 +1405,7 @@ class MANAGED Class : public StaticStorageBase { } // Creates a raw object instance but does not invoke the default constructor. - Object* AllocObject() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* AllocObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsVariableSize() const { // Classes and arrays vary in size, and so the object_size_ field cannot @@ -1424,9 +1424,9 @@ class MANAGED Class : public StaticStorageBase { } void SetClassSize(size_t new_class_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this); DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false); @@ -1442,12 +1442,12 @@ class MANAGED Class : public StaticStorageBase { // Returns true if this class is in the same packages as that class. bool IsInSamePackage(const Class* that) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); // Returns true if this class can access that class. - bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return that->IsPublic() || this->IsInSamePackage(that); } @@ -1455,7 +1455,7 @@ class MANAGED Class : public StaticStorageBase { // Note that access to the class isn't checked in case the declaring class is protected and the // method has been exposed by a public sub-class bool CanAccessMember(Class* access_to, uint32_t member_flags) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Classes can access all of their own members if (this == access_to) { return true; @@ -1479,7 +1479,7 @@ class MANAGED Class : public StaticStorageBase { } bool IsSubClass(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can src be assigned to this class? For example, String can be assigned to Object (by an // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing @@ -1487,7 +1487,7 @@ class MANAGED Class : public StaticStorageBase { // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign // to themselves. Classes for primitive types may not assign to each other. bool IsAssignableFrom(const Class* src) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(src != NULL); if (this == src) { // Can always assign to things of the same type @@ -1543,7 +1543,7 @@ class MANAGED Class : public StaticStorageBase { }; void DumpClass(std::ostream& os, int flags) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DexCache* GetDexCache() const; @@ -1564,12 +1564,12 @@ class MANAGED Class : public StaticStorageBase { } Method* GetDirectMethod(int32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetDirectMethods()->Get(i); } void SetDirectMethod(uint32_t i, Method* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ ObjectArray* direct_methods = GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); @@ -1601,19 +1601,19 @@ class MANAGED Class : public StaticStorageBase { } Method* GetVirtualMethod(uint32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetVirtualMethods()->Get(i); } Method* GetVirtualMethodDuringLinking(uint32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous()); return GetVirtualMethods()->Get(i); } void SetVirtualMethod(uint32_t i, Method* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* virtual_methods = GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); @@ -1642,7 +1642,7 @@ class MANAGED Class : public StaticStorageBase { // super class, return the specific implementation // method for this class. Method* FindVirtualMethodForVirtual(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(!method->GetDeclaringClass()->IsInterface()); // The argument method may from a super class. // Use the index to a potentially overridden one for this instance's class. @@ -1653,16 +1653,16 @@ class MANAGED Class : public StaticStorageBase { // super class or interface, return the specific implementation // method for this class. Method* FindVirtualMethodForInterface(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindVirtualMethodForVirtualOrInterface(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method->IsDirect()) { return method; } @@ -1673,28 +1673,28 @@ class MANAGED Class : public StaticStorageBase { } Method* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetIfTableCount() const { ObjectArray* iftable = GetIfTable(); @@ -1731,13 +1731,13 @@ class MANAGED Class : public StaticStorageBase { } Field* GetInstanceField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ DCHECK_NE(NumInstanceFields(), 0U); return GetIFields()->Get(i); } void SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ ObjectArray* ifields= GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); ifields->Set(i, f); @@ -1808,12 +1808,12 @@ class MANAGED Class : public StaticStorageBase { } Field* GetStaticField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetSFields()->Get(i); } void SetStaticField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* sfields= GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); sfields->Set(i, f); @@ -1827,37 +1827,37 @@ class MANAGED Class : public StaticStorageBase { // Find a static or instance field using the JLS resolution order Field* FindField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass. Field* FindInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass, only searches classes that // have the same dex cache. Field* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given static field in this class or a superclass. Field* FindStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given static field in this class or superclass, only searches classes that // have the same dex cache. Field* FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); pid_t GetClinitThreadId() const { DCHECK(IsIdxLoaded() || IsErroneous()); @@ -1883,17 +1883,17 @@ class MANAGED Class : public StaticStorageBase { private: void SetVerifyErrorClass(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(klass != NULL) << PrettyClass(this); SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false); } bool Implements(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsArrayAssignableFromArray(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsAssignableFromArray(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // defining class loader, or NULL for the "bootstrap" system loader ClassLoader* class_loader_; @@ -2233,7 +2233,7 @@ class MANAGED PrimitiveArray : public Array { typedef T ElementType; static PrimitiveArray* Alloc(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const T* GetData() const { intptr_t data = reinterpret_cast(this) + DataOffset(sizeof(T)).Int32Value(); @@ -2245,14 +2245,14 @@ class MANAGED PrimitiveArray : public Array { return reinterpret_cast(data); } - T Get(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + T Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!IsValidIndex(i)) { return T(0); } return GetData()[i]; } - void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: ArrayStoreException if (IsValidIndex(i)) { GetData()[i] = value; @@ -2306,9 +2306,9 @@ class MANAGED String : public Object { int32_t GetLength() const; - int32_t GetHashCode() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ComputeHashCode() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength())); } @@ -2316,36 +2316,36 @@ class MANAGED String : public Object { return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength()); } - uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - String* Intern() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* AllocFromUtf16(int32_t utf16_length, const uint16_t* utf16_data_in, int32_t hash_code = 0) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* AllocFromModifiedUtf8(const char* utf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* AllocFromModifiedUtf8(int32_t utf16_length, const char* utf8_data_in) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* Alloc(Class* java_lang_String, int32_t utf16_length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* Alloc(Class* java_lang_String, CharArray* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool Equals(const char* modified_utf8) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const StringPiece& modified_utf8) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Compare UTF-16 code point values not in a locale-sensitive manner int Compare(int32_t utf16_length, const char* utf8_data_in); @@ -2353,7 +2353,7 @@ class MANAGED String : public Object { // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Create a modified UTF-8 encoded std::string from a java/lang/String object. std::string ToModifiedUtf8() const; @@ -2471,7 +2471,7 @@ inline bool Method::CheckIncompatibleClassChange(InvokeType type) { return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass()); } default: - LOG(FATAL) << "UNREACHABLE"; + LOG(FATAL) << "Unreachable - invocation type: " << type; return true; } } @@ -2511,13 +2511,13 @@ class MANAGED Throwable : public Object { String* GetDetailMessage() const { return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false); } - std::string Dump() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // This is a runtime version of initCause, you shouldn't use it if initCause may have been // overridden. Also it asserts rather than throwing exceptions. Currently this is only used // in cases like the verifier where the checks cannot fail and initCause isn't overridden. void SetCause(Throwable* cause); - bool IsCheckedException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsCheckedException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Class* GetJavaLangThrowable() { DCHECK(java_lang_Throwable_ != NULL); @@ -2572,7 +2572,7 @@ class MANAGED StackTraceElement : public Object { String* method_name, String* file_name, int32_t line_number) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetClass(Class* java_lang_StackTraceElement); @@ -2598,20 +2598,20 @@ class MANAGED StackTraceElement : public Object { class MANAGED InterfaceEntry : public ObjectArray { public: - Class* GetInterface() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* GetInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* interface = Get(kInterface)->AsClass(); DCHECK(interface != NULL); return interface; } - void SetInterface(Class* interface) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void SetInterface(Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(interface != NULL); DCHECK(interface->IsInterface()); DCHECK(Get(kInterface) == NULL); Set(kInterface, interface); } - size_t GetMethodArrayCount() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetMethodArrayCount() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* method_array = down_cast*>(Get(kMethodArray)); if (method_array == NULL) { return 0; @@ -2620,14 +2620,14 @@ class MANAGED InterfaceEntry : public ObjectArray { } ObjectArray* GetMethodArray() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* method_array = down_cast*>(Get(kMethodArray)); DCHECK(method_array != NULL); return method_array; } void SetMethodArray(ObjectArray* new_ma) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(new_ma != NULL); DCHECK(Get(kMethodArray) == NULL); Set(kMethodArray, new_ma); diff --git a/src/object_test.cc b/src/object_test.cc index e44cf64e72..081be4b2c1 100644 --- a/src/object_test.cc +++ b/src/object_test.cc @@ -35,7 +35,7 @@ class ObjectTest : public CommonTest { const char* utf8_in, const char* utf16_expected_le, int32_t expected_hash) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { UniquePtr utf16_expected(new uint16_t[length]); for (int32_t i = 0; i < length; i++) { uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | diff --git a/src/object_utils.h b/src/object_utils.h index d523ecc1c5..f6158f3b9e 100644 --- a/src/object_utils.h +++ b/src/object_utils.h @@ -32,25 +32,25 @@ namespace art { class ObjectLock { public: - explicit ObjectLock(Object* object) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + explicit ObjectLock(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(Thread::Current()), obj_(object) { CHECK(object != NULL); obj_->MonitorEnter(self_); } - ~ObjectLock() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { obj_->MonitorExit(self_); } - void Wait() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Wait() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Monitor::Wait(self_, obj_, 0, 0, false); } - void Notify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { obj_->Notify(); } - void NotifyAll() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { obj_->NotifyAll(); } @@ -63,7 +63,7 @@ class ObjectLock { class ClassHelper { public: ClassHelper(const Class* c = NULL, ClassLinker* l = NULL) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_def_(NULL), class_linker_(l), dex_cache_(NULL), @@ -76,7 +76,7 @@ class ClassHelper { } void ChangeClass(const Class* new_c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(new_c != NULL) << "klass_=" << klass_; // Log what we were changing from if any CHECK(new_c->IsClass()) << "new_c=" << new_c; if (dex_cache_ != NULL) { @@ -93,7 +93,7 @@ class ClassHelper { // The returned const char* is only guaranteed to be valid for the lifetime of the ClassHelper. // If you need it longer, copy it into a std::string. - const char* GetDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(klass_ != NULL); if (UNLIKELY(klass_->IsArrayClass())) { return GetArrayDescriptor(); @@ -109,7 +109,7 @@ class ClassHelper { } } - const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string result("["); const Class* saved_klass = klass_; CHECK(saved_klass != NULL); @@ -121,7 +121,7 @@ class ClassHelper { } const DexFile::ClassDef* GetClassDef() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::ClassDef* result = class_def_; if (result == NULL) { result = GetDexFile().FindClassDef(GetDescriptor()); @@ -130,7 +130,7 @@ class ClassHelper { return result; } - uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); if (klass_->IsPrimitive()) { return 0; @@ -149,7 +149,7 @@ class ClassHelper { } uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); DCHECK(!klass_->IsArrayClass()); @@ -157,7 +157,7 @@ class ClassHelper { } Class* GetDirectInterface(uint32_t idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); if (klass_->IsArrayClass()) { @@ -180,7 +180,7 @@ class ClassHelper { } } - const char* GetSourceFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string descriptor(GetDescriptor()); const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = dex_file.FindClassDef(descriptor); @@ -188,7 +188,7 @@ class ClassHelper { return dex_file.GetSourceFile(*dex_class_def); } - std::string GetLocation() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = GetDexCache(); if (dex_cache != NULL && !klass_->IsProxyClass()) { return dex_cache->GetLocation()->ToModifiedUtf8(); @@ -198,7 +198,7 @@ class ClassHelper { } } - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -208,7 +208,7 @@ class ClassHelper { return *result; } - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { DCHECK(klass_ != NULL); @@ -220,7 +220,7 @@ class ClassHelper { private: const DexFile::TypeList* GetInterfaceTypeList() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::TypeList* result = interface_type_list_; if (result == NULL) { const DexFile::ClassDef* class_def = GetClassDef(); @@ -270,7 +270,7 @@ class FieldHelper { } field_ = new_f; } - const char* GetName() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -291,7 +291,7 @@ class FieldHelper { return Runtime::Current()->GetInternTable()->InternStrong(GetName()); } } - Class* GetType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -306,7 +306,7 @@ class FieldHelper { return GetClassLinker()->FindSystemClass(GetTypeDescriptor()); } } - const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -320,14 +320,14 @@ class FieldHelper { } } Primitive::Type GetTypeAsPrimitiveType() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Primitive::GetType(GetTypeDescriptor()[0]); } - bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Primitive::Type type = GetTypeAsPrimitiveType(); return type != Primitive::kPrimNot; } - size_t FieldSize() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Primitive::Type type = GetTypeAsPrimitiveType(); return Primitive::FieldSize(type); } @@ -335,7 +335,7 @@ class FieldHelper { // The returned const char* is only guaranteed to be valid for the lifetime of the FieldHelper. // If you need it longer, copy it into a std::string. const char* GetDeclaringClassDescriptor() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint16_t type_idx = field_->GetDeclaringClass()->GetDexTypeIndex(); if (type_idx != DexFile::kDexNoIndex16) { const DexFile& dex_file = GetDexFile(); @@ -349,7 +349,7 @@ class FieldHelper { } private: - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { result = field_->GetDeclaringClass()->GetDexCache(); @@ -365,7 +365,7 @@ class FieldHelper { } return result; } - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -391,20 +391,20 @@ class MethodHelper { shorty_len_(0) {} explicit MethodHelper(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } MethodHelper(const Method* m, ClassLinker* l) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } - void ChangeMethod(Method* new_m) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void ChangeMethod(Method* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(new_m != NULL); if (dex_cache_ != NULL) { Class* klass = new_m->GetDeclaringClass(); @@ -423,7 +423,7 @@ class MethodHelper { shorty_ = NULL; } - const char* GetName() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex16) { @@ -444,15 +444,15 @@ class MethodHelper { } } - String* GetNameAsString() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + String* GetNameAsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, GetDexCache()); } - const char* GetShorty() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* result = shorty_; if (result == NULL) { const DexFile& dex_file = GetDexFile(); @@ -463,14 +463,14 @@ class MethodHelper { return result; } - uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (shorty_ == NULL) { GetShorty(); } return shorty_len_; } - const std::string GetSignature() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const std::string GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex16) { @@ -481,19 +481,19 @@ class MethodHelper { } const DexFile::ProtoId& GetPrototype() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); return dex_file.GetMethodPrototype(dex_file.GetMethodId(method_->GetDexMethodIndex())); } const DexFile::TypeList* GetParameterTypeList() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::ProtoId& proto = GetPrototype(); return GetDexFile().GetProtoParameters(proto); } ObjectArray* GetParameterTypes() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::TypeList* params = GetParameterTypeList(); Class* array_class = GetClassLinker()->FindSystemClass("[Ljava/lang/Class;"); uint32_t num_params = params == NULL ? 0 : params->Size(); @@ -509,7 +509,7 @@ class MethodHelper { return result; } - Class* GetReturnType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* GetReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -518,7 +518,7 @@ class MethodHelper { } const char* GetReturnTypeDescriptor() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -527,7 +527,7 @@ class MethodHelper { } int32_t GetLineNumFromDexPC(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (dex_pc == DexFile::kDexNoIndex) { return method_->IsNative() ? -2 : -1; } else { @@ -537,7 +537,7 @@ class MethodHelper { } const char* GetDeclaringClassDescriptor() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* klass = method_->GetDeclaringClass(); DCHECK(!klass->IsProxyClass()); uint16_t type_idx = klass->GetDexTypeIndex(); @@ -546,7 +546,7 @@ class MethodHelper { } const char* GetDeclaringClassSourceFile() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* descriptor = GetDeclaringClassDescriptor(); const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = dex_file.FindClassDef(descriptor); @@ -555,7 +555,7 @@ class MethodHelper { } uint32_t GetClassDefIndex() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* descriptor = GetDeclaringClassDescriptor(); const DexFile& dex_file = GetDexFile(); uint32_t index; @@ -564,20 +564,20 @@ class MethodHelper { } ClassLoader* GetClassLoader() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->GetDeclaringClass()->GetClassLoader(); } bool IsStatic() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->IsStatic(); } - bool IsClassInitializer() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return IsStatic() && StringPiece(GetName()) == ""; } - size_t NumArgs() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // "1 +" because the first in Args is the receiver. // "- 1" because we don't count the return type. return (IsStatic() ? 0 : 1) + GetShortyLength() - 1; @@ -585,7 +585,7 @@ class MethodHelper { // Is the specified parameter a long or double, where parameter 0 is 'this' for instance methods bool IsParamALongOrDouble(size_t param) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty @@ -597,7 +597,7 @@ class MethodHelper { } // Is the specified parameter a reference, where parameter 0 is 'this' for instance methods - bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty @@ -608,7 +608,7 @@ class MethodHelper { } bool HasSameNameAndSignature(MethodHelper* other) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (GetDexCache() == other->GetDexCache()) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& mid = dex_file.GetMethodId(method_->GetDexMethodIndex()); @@ -622,17 +622,17 @@ class MethodHelper { } const DexFile::CodeItem* GetCodeItem() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetDexFile().GetCodeItem(method_->GetCodeItemOffset()); } bool IsResolvedTypeIdx(uint16_t type_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->GetDexCacheResolvedTypes()->Get(type_idx) != NULL; } Class* GetClassFromTypeIdx(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* type = method_->GetDexCacheResolvedTypes()->Get(type_idx); if (type == NULL) { type = GetClassLinker()->ResolveType(type_idx, method_); @@ -642,17 +642,17 @@ class MethodHelper { } const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); return dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)); } Class* GetDexCacheResolvedType(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetDexCache()->GetResolvedType(type_idx); } - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -662,7 +662,7 @@ class MethodHelper { return *result; } - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { Class* klass = method_->GetDeclaringClass(); @@ -676,7 +676,7 @@ class MethodHelper { // Set the method_ field, for proxy methods looking up the interface method via the resolved // methods table. void SetMethod(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method != NULL) { Class* klass = method->GetDeclaringClass(); if (klass->IsProxyClass()) { diff --git a/src/reference_table.cc b/src/reference_table.cc index a2b54d6a14..1f6cab7446 100644 --- a/src/reference_table.cc +++ b/src/reference_table.cc @@ -63,7 +63,7 @@ struct ObjectComparator { bool operator()(const Object* obj1, const Object* obj2) // TODO: enable analysis when analysis can work with the STL. NO_THREAD_SAFETY_ANALYSIS { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); // Ensure null references and cleared jweaks appear at the end. if (obj1 == NULL) { return true; @@ -100,7 +100,7 @@ struct ObjectComparator { // or equivalent to the original. static void DumpSummaryLine(std::ostream& os, const Object* obj, size_t element_count, int identical, int equiv) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (obj == NULL) { os << " NULL reference (count=" << equiv << ")\n"; return; diff --git a/src/reference_table.h b/src/reference_table.h index de9d45d31e..f398eb2a44 100644 --- a/src/reference_table.h +++ b/src/reference_table.h @@ -43,14 +43,14 @@ class ReferenceTable { size_t Size() const; - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg); private: typedef std::vector Table; static void Dump(std::ostream& os, const Table& entries) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); friend class IndirectReferenceTable; // For Dump. std::string name_; diff --git a/src/reflection.cc b/src/reflection.cc index 542f1a2e44..7f1d4d1b30 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -235,7 +235,7 @@ void BoxPrimitive(Primitive::Type src_class, JValue& value) { } if (kIsDebugBuild) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(Thread::Current()->GetState(), kRunnable); } ScopedObjectAccessUnchecked soa(Thread::Current()); @@ -244,7 +244,7 @@ void BoxPrimitive(Primitive::Type src_class, JValue& value) { } static std::string UnboxingFailureKind(Method* m, int index, Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m != NULL && index != -1) { ++index; // Humans count from 1. return StringPrintf("method %s argument %d", PrettyMethod(m, false).c_str(), index); @@ -257,7 +257,7 @@ static std::string UnboxingFailureKind(Method* m, int index, Field* f) static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, int index, Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!dst_class->IsPrimitive()) { if (o != NULL && !o->InstanceOf(dst_class)) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", diff --git a/src/reflection.h b/src/reflection.h index 2979e5bdee..93219f8afc 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -30,24 +30,24 @@ class Object; class ScopedObjectAccess; void BoxPrimitive(Primitive::Type src_class, JValue& value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool UnboxPrimitiveForArgument(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, size_t index) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool UnboxPrimitiveForField(Object* o, Class* dst_class, JValue& unboxed_value, Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, JValue& dst) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver, jobject args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool VerifyObjectInClass(Object* o, Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/runtime.cc b/src/runtime.cc index 89477b0484..f37788da98 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -175,7 +175,7 @@ struct AbortState { void Runtime::Abort() { // Ensure that we don't have multiple threads trying to abort at once, // which would result in significantly worse diagnostics. - MutexLock mu(*GlobalSynchronization::abort_lock_); + MutexLock mu(*Locks::abort_lock_); // Get any pending output out of the way. fflush(NULL); @@ -536,7 +536,7 @@ bool Runtime::Create(const Options& options, bool ignore_unrecognized) { if (Runtime::instance_ != NULL) { return false; } - GlobalSynchronization::Init(); + Locks::Init(); instance_ = new Runtime; if (!instance_->Init(options, ignore_unrecognized)) { delete instance_; @@ -642,7 +642,7 @@ void Runtime::StartDaemonThreads() { // Must be in the kNative state for calling native methods. { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(self->GetState(), kNative); } @@ -742,7 +742,7 @@ void Runtime::InitNativeMethods() { // Must be in the kNative state for calling native methods (JNI_OnLoad code). { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(self->GetState(), kNative); } @@ -831,7 +831,7 @@ void Runtime::DumpForSigQuit(std::ostream& os) { } void Runtime::DumpLockHolders(std::ostream& os) { - uint64_t mutator_lock_owner = GlobalSynchronization::mutator_lock_->GetExclusiveOwnerTid(); + uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid(); pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner(); pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner(); pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner(); diff --git a/src/runtime.h b/src/runtime.h index f61399163d..4dcefb50d1 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -97,7 +97,7 @@ class Runtime { // Creates and initializes a new runtime. static bool Create(const Options& options, bool ignore_unrecognized) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_); + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); bool IsCompiler() const { return is_compiler_; @@ -117,7 +117,7 @@ class Runtime { } // Starts a runtime, which may cause threads to be started and code to run. - void Start() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + void Start() UNLOCK_FUNCTION(Locks::mutator_lock_); bool IsShuttingDown() const { return shutting_down_; @@ -140,7 +140,7 @@ class Runtime { // This isn't marked ((noreturn)) because then gcc will merge multiple calls // in a single function together. This reduces code size slightly, but means // that the native stack trace we get may point at the wrong call site. - static void Abort() LOCKS_EXCLUDED(GlobalSynchronization::abort_lock_); + static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); // Returns the "main" ThreadGroup, used when attaching user threads. jobject GetMainThreadGroup() const; @@ -154,10 +154,10 @@ class Runtime { void CallExitHook(jint status); // Detaches the current native thread from the runtime. - void DetachCurrentThread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpLockHolders(std::ostream& os); ~Runtime(); @@ -211,7 +211,7 @@ class Runtime { } void VisitRoots(Heap::RootVisitor* visitor, void* arg) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasJniDlsymLookupStub() const { return jni_stub_array_ != NULL; @@ -267,7 +267,7 @@ class Runtime { resolution_method_ = method; } - Method* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -289,13 +289,13 @@ class Runtime { void SetCalleeSaveMethod(Method* method, CalleeSaveType type); Method* CreateCalleeSaveMethod(InstructionSet instruction_set, CalleeSaveType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetStat(int kind); @@ -341,8 +341,8 @@ class Runtime { void BlockSignals(); bool Init(const Options& options, bool ignore_unrecognized) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_); - void InitNativeMethods() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); + void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); void InitThreadGroups(Thread* self); void RegisterRuntimeNativeMethods(JNIEnv* env); diff --git a/src/runtime_linux.cc b/src/runtime_linux.cc index 8365079cb6..e5033585c1 100644 --- a/src/runtime_linux.cc +++ b/src/runtime_linux.cc @@ -226,7 +226,7 @@ struct UContext { }; static void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) { - MutexLock mu(*GlobalSynchronization::unexpected_signal_lock_); + MutexLock mu(*Locks::unexpected_signal_lock_); bool has_address = (signal_number == SIGILL || signal_number == SIGBUS || signal_number == SIGFPE || signal_number == SIGSEGV); diff --git a/src/runtime_support.h b/src/runtime_support.h index 8f6b6558ea..16f0d2ec6e 100644 --- a/src/runtime_support.h +++ b/src/runtime_support.h @@ -50,7 +50,7 @@ class Object; // check. static inline Object* AllocObjectFromCode(uint32_t type_idx, Method* method, Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); Runtime* runtime = Runtime::Current(); if (UNLIKELY(klass == NULL)) { @@ -85,7 +85,7 @@ static inline Object* AllocObjectFromCode(uint32_t type_idx, Method* method, Thr // check. static inline Array* AllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, bool access_check) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(component_count < 0)) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); @@ -112,7 +112,7 @@ static inline Array* AllocArrayFromCode(uint32_t type_idx, Method* method, int32 extern Array* CheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Type of find field operation for fast and slow case. enum FindFieldType { @@ -129,12 +129,12 @@ enum FindFieldType { // Slow field find that can initialize classes and may throw exceptions. extern Field* FindFieldFromCode(uint32_t field_idx, const Method* referrer, Thread* self, FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Fast path field resolution that can't initialize classes or throw exceptions. static inline Field* FindFieldFast(uint32_t field_idx, const Method* referrer, FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { return NULL; @@ -183,7 +183,7 @@ static inline Field* FindFieldFast(uint32_t field_idx, const Method* referrer, // Fast path method resolution that can't throw exceptions. static inline Method* FindMethodFast(uint32_t method_idx, Object* this_object, const Method* referrer, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { bool is_direct = type == kStatic || type == kDirect; if (UNLIKELY(this_object == NULL && !is_direct)) { return NULL; @@ -223,20 +223,20 @@ static inline Method* FindMethodFast(uint32_t method_idx, Object* this_object, extern Method* FindMethodFromCode(uint32_t method_idx, Object* this_object, const Method* referrer, Thread* self, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern Class* ResolveVerifyAndClinit(uint32_t type_idx, const Method* referrer, Thread* self, bool can_run_clinit, bool verify_access) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static inline String* ResolveStringFromCode(const Method* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); return class_linker->ResolveString(string_idx, referrer); } static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_) { // Save any pending exception over monitor exit call. Throwable* saved_exception = NULL; @@ -259,7 +259,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) } static inline void CheckReferenceResult(Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (o == NULL) { return; } diff --git a/src/scoped_thread_state_change.h b/src/scoped_thread_state_change.h index 745e2d6803..ed3c384590 100644 --- a/src/scoped_thread_state_change.h +++ b/src/scoped_thread_state_change.h @@ -29,7 +29,7 @@ namespace art { class ScopedThreadStateChange { public: ScopedThreadStateChange(Thread* self, ThreadState new_thread_state) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) { if (self_ == NULL) { // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL. @@ -38,7 +38,7 @@ class ScopedThreadStateChange { } else { bool runnable_transition; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); old_thread_state_ = self->GetState(); runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable; if (!runnable_transition) { @@ -56,7 +56,7 @@ class ScopedThreadStateChange { } } - ~ScopedThreadStateChange() LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) { + ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) { if (self_ == NULL) { if (!expected_has_no_thread_) { CHECK(Runtime::Current()->IsShuttingDown()); @@ -68,7 +68,7 @@ class ScopedThreadStateChange { } else if (thread_state_ == kRunnable) { self_->TransitionFromRunnableToSuspended(old_thread_state_); } else { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); self_->SetState(old_thread_state_); } } @@ -112,14 +112,14 @@ class ScopedThreadStateChange { class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { public: explicit ScopedObjectAccessUnchecked(JNIEnv* env) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) : ScopedThreadStateChange(ThreadForEnv(env), kRunnable), env_(reinterpret_cast(env)), vm_(env_->vm) { self_->VerifyStack(); } explicit ScopedObjectAccessUnchecked(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) : ScopedThreadStateChange(self, kRunnable), env_(reinterpret_cast(self->GetJniEnv())), vm_(env_ != NULL ? env_->vm : NULL) { @@ -158,7 +158,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { */ template T AddLocalReference(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. if (obj == NULL) { return NULL; @@ -195,8 +195,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { T Decode(jobject obj) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. return down_cast(Self()->DecodeJObject(obj)); } @@ -204,8 +204,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { Field* DecodeField(jfieldID fid) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR // TODO: we should make these unique weak globals if Field instances can ever move. @@ -217,8 +217,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { jfieldID EncodeField(Field* field) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(WARNING); @@ -229,8 +229,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { Method* DecodeMethod(jmethodID mid) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR // TODO: we should make these unique weak globals if Method instances can ever move. @@ -240,8 +240,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { } jmethodID EncodeMethod(Method* method) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(WARNING); @@ -281,20 +281,20 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { class ScopedObjectAccess : public ScopedObjectAccessUnchecked { public: explicit ScopedObjectAccess(JNIEnv* env) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : ScopedObjectAccessUnchecked(env) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); } explicit ScopedObjectAccess(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : ScopedObjectAccessUnchecked(self) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); } - ~ScopedObjectAccess() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) { // Base class will release share of lock. Invoked after this destructor. } @@ -303,7 +303,7 @@ class ScopedObjectAccess : public ScopedObjectAccessUnchecked { // routines operating with just a VM are sound, they are not, but when you have just a VM // you cannot call the unsound routines. explicit ScopedObjectAccess(JavaVM* vm) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : ScopedObjectAccessUnchecked(vm) {} friend class ScopedCheck; diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc index 156aec6a63..229edf6278 100644 --- a/src/signal_catcher.cc +++ b/src/signal_catcher.cc @@ -122,12 +122,12 @@ void SignalCatcher::HandleSigQuit() { // We should exclusively hold the mutator lock, set state to Runnable without a pending // suspension to avoid giving away or trying re-acquire the mutator lock. - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); Thread* self = Thread::Current(); ThreadState old_state; int suspend_count; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); suspend_count = self->GetSuspendCount(); if (suspend_count != 0) { CHECK_EQ(suspend_count, 1); @@ -155,7 +155,7 @@ void SignalCatcher::HandleSigQuit() { os << "----- end " << getpid() << " -----\n"; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); self->SetState(old_state); if (suspend_count != 0) { self->ModifySuspendCount(+1, false); diff --git a/src/signal_catcher.h b/src/signal_catcher.h index 11a2c09382..e8ac812a17 100644 --- a/src/signal_catcher.h +++ b/src/signal_catcher.h @@ -35,9 +35,9 @@ class SignalCatcher { explicit SignalCatcher(const std::string& stack_trace_file); ~SignalCatcher(); - void HandleSigQuit() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + void HandleSigQuit() LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); private: diff --git a/src/space.h b/src/space.h index 79d5ad44e3..3132911c1f 100644 --- a/src/space.h +++ b/src/space.h @@ -53,7 +53,7 @@ class Space { // create a Space from an image file. cannot be used for future allocation or collected. static ImageSpace* CreateImageSpace(const std::string& image) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual ~Space() {} @@ -282,7 +282,7 @@ class ImageSpace : public Space { // Mark the objects defined in this space in the given live bitmap void RecordImageAllocations(SpaceBitmap* live_bitmap) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual bool IsAllocSpace() const { return false; diff --git a/src/space_bitmap.cc b/src/space_bitmap.cc index 439e637069..7a4c48dacf 100644 --- a/src/space_bitmap.cc +++ b/src/space_bitmap.cc @@ -174,7 +174,7 @@ static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callb // class. static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Visit fields of parent classes first. Class* super = klass->GetSuperClass(); if (super != NULL) { @@ -199,7 +199,7 @@ static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* call // For an unvisited object, visit it then all its children found via fields. static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (visited->Test(obj)) { return; } diff --git a/src/space_bitmap.h b/src/space_bitmap.h index db1a5eb326..02f003489f 100644 --- a/src/space_bitmap.h +++ b/src/space_bitmap.h @@ -109,7 +109,7 @@ class SpaceBitmap { template void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor, const FingerVisitor& finger_visitor) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { DCHECK_LT(visit_begin, visit_end); const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1). @@ -177,11 +177,11 @@ class SpaceBitmap { } void Walk(Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void InOrderWalk(Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SweepWalk(const SpaceBitmap& live, const SpaceBitmap& mark, diff --git a/src/stack.h b/src/stack.h index 578c631141..60218b16a1 100644 --- a/src/stack.h +++ b/src/stack.h @@ -215,7 +215,7 @@ class StackVisitor { protected: StackVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : stack_start_(stack), trace_stack_(trace_stack), cur_shadow_frame_(NULL), cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0), context_(context) {} @@ -224,10 +224,10 @@ class StackVisitor { virtual ~StackVisitor() {} // Return 'true' if we should continue to visit more frames, 'false' to stop. - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) = 0; + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; void WalkStack(bool include_transitions = false) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* GetMethod() const { if (cur_shadow_frame_ != NULL) { @@ -255,19 +255,19 @@ class StackVisitor { return *reinterpret_cast(save_addr); } - uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the height of the stack in the managed stack frames, including transitions. - size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetNumFrames() - cur_depth_; } // Returns a frame ID for JDWP use, starting from 1. - size_t GetFrameId() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetFrameHeight() + 1; } - size_t GetNumFrames() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (num_frames_ == 0) { num_frames_ = ComputeNumFrames(); } @@ -275,10 +275,10 @@ class StackVisitor { } uint32_t GetVReg(Method* m, int vreg) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetVReg(Method* m, int vreg, uint32_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uintptr_t GetGPR(uint32_t reg) const; @@ -370,13 +370,13 @@ class StackVisitor { } private: - size_t ComputeNumFrames() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + size_t ComputeNumFrames() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); TraceStackFrame GetTraceStackFrame(uint32_t depth) const { return trace_stack_->at(trace_stack_->size() - depth - 1); } - void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const ManagedStack* const stack_start_; const std::vector* const trace_stack_; diff --git a/src/thread.cc b/src/thread.cc index c2bb1a63c1..57f99055d0 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -127,7 +127,7 @@ void* Thread::CreateCallback(void* arg) { static void SetVmData(const ScopedObjectAccess& soa, Object* managed_thread, Thread* native_thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); f->SetInt(managed_thread, reinterpret_cast(native_thread)); } @@ -137,9 +137,9 @@ Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object Thread* result = reinterpret_cast(static_cast(f->GetInt(thread_peer))); // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ // to stop it from going away. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (result != NULL && !result->IsSuspended()) { - GlobalSynchronization::thread_list_lock_->AssertHeld(); + Locks::thread_list_lock_->AssertHeld(); } return result; } @@ -285,7 +285,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g self->Init(); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); self->SetState(kNative); } @@ -444,13 +444,13 @@ void Thread::GetThreadName(std::string& name) const { // Attempt to rectify locks so that we dump thread list with required locks before exiting. static void UnsafeLogFatalForSuspendCount(Thread* self) NO_THREAD_SAFETY_ANALYSIS { - GlobalSynchronization::thread_suspend_count_lock_->Unlock(); - GlobalSynchronization::mutator_lock_->SharedTryLock(); - if (!GlobalSynchronization::mutator_lock_->IsSharedHeld()) { + Locks::thread_suspend_count_lock_->Unlock(); + Locks::mutator_lock_->SharedTryLock(); + if (!Locks::mutator_lock_->IsSharedHeld()) { LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; } - GlobalSynchronization::thread_list_lock_->TryLock(); - if (!GlobalSynchronization::thread_list_lock_->IsExclusiveHeld()) { + Locks::thread_list_lock_->TryLock(); + if (!Locks::thread_list_lock_->IsExclusiveHeld()) { LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; } std::ostringstream ss; @@ -462,7 +462,7 @@ void Thread::ModifySuspendCount(int delta, bool for_debugger) { DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_) << delta << " " << debug_suspend_count_ << " " << this; DCHECK_GE(suspend_count_, debug_suspend_count_) << this; - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + Locks::thread_suspend_count_lock_->AssertHeld(); if (delta == -1 && suspend_count_ <= 0) { // This is expected if you attach a thread during a GC. @@ -494,7 +494,7 @@ void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { ThreadState old_state = SetStateUnsafe(new_state); CHECK_EQ(old_state, kRunnable); // Release share on mutator_lock_. - GlobalSynchronization::mutator_lock_->SharedUnlock(); + Locks::mutator_lock_->SharedUnlock(); } ThreadState Thread::TransitionFromSuspendedToRunnable() { @@ -506,24 +506,24 @@ ThreadState Thread::TransitionFromSuspendedToRunnable() { // may occur is covered by the second check after we acquire a share of the mutator_lock_. if (GetSuspendCountUnsafe() > 0) { // Wait while our suspend count is non-zero. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); - GlobalSynchronization::mutator_lock_->AssertNotHeld(); // Otherwise we starve GC.. + MutexLock mu(*Locks::thread_suspend_count_lock_); + Locks::mutator_lock_->AssertNotHeld(); // Otherwise we starve GC.. while (GetSuspendCount() != 0) { // Re-check when Thread::resume_cond_ is notified. - Thread::resume_cond_->Wait(*GlobalSynchronization::thread_suspend_count_lock_); + Thread::resume_cond_->Wait(*Locks::thread_suspend_count_lock_); } } // Re-acquire shared mutator_lock_ access. - GlobalSynchronization::mutator_lock_->SharedLock(); + Locks::mutator_lock_->SharedLock(); // Holding the mutator_lock_, synchronize with any thread trying to raise the suspend count // and change state to Runnable if no suspend is pending. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (GetSuspendCount() == 0) { SetState(kRunnable); done = true; } else { // Release shared mutator_lock_ access and try again. - GlobalSynchronization::mutator_lock_->SharedUnlock(); + Locks::mutator_lock_->SharedUnlock(); } } while (!done); return old_state; @@ -539,14 +539,14 @@ Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* Thread* thread; { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); thread = Thread::FromManagedThread(soa, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for suspend: " << peer; return NULL; } { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (request_suspension) { thread->ModifySuspendCount(+1, true /* for_debugger */); request_suspension = false; @@ -629,7 +629,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { if (is_daemon) { os << " daemon"; } - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); os << " prio=" << priority << " tid=" << thread->GetThinLockId() << " " << thread->GetState() << "\n"; @@ -640,7 +640,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { } if (thread != NULL) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); os << " | group=\"" << group_name << "\"" << " sCount=" << thread->suspend_count_ << " dsCount=" << thread->debug_suspend_count_ @@ -690,7 +690,7 @@ void Thread::DumpState(std::ostream& os) const { struct StackDumpVisitor : public StackVisitor { StackDumpVisitor(std::ostream& os, const Thread* thread, Context* context, bool can_allocate) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(thread->GetManagedStack(), thread->GetTraceStack(), context), os(os), thread(thread), can_allocate(can_allocate), last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) { @@ -702,7 +702,7 @@ struct StackDumpVisitor : public StackVisitor { } } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; @@ -762,7 +762,7 @@ void Thread::DumpStack(std::ostream& os) const { // If we're currently in native code, dump that stack before dumping the managed stack. ThreadState state; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); state = GetState(); } if (state == kNative) { @@ -787,7 +787,7 @@ void Thread::ThreadExitCallback(void* arg) { void Thread::Startup() { { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); // Keep GCC happy. + MutexLock mu(*Locks::thread_suspend_count_lock_); // Keep GCC happy. resume_cond_ = new ConditionVariable("Thread resumption condition variable"); } @@ -920,7 +920,7 @@ Thread::~Thread() { jni_env_ = NULL; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(GetState(), kRunnable); SetState(kTerminated); } @@ -1078,11 +1078,11 @@ class CountStackDepthVisitor : public StackVisitor { public: CountStackDepthVisitor(const ManagedStack* stack, const std::vector* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), depth_(0), skip_depth_(0), skipping_(true) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) @@ -1124,7 +1124,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {} bool Init(int depth, const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Allocate method trace with an extra slot that will hold the PC trace SirtRef > method_trace(Runtime::Current()->GetClassLinker()->AllocObjectArray(depth + 1)); @@ -1153,7 +1153,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { } } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method_trace_ == NULL || dex_pc_trace_ == NULL) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } @@ -1513,7 +1513,7 @@ static const bool kDebugExceptionDelivery = false; class CatchBlockStackVisitor : public StackVisitor { public: CatchBlockStackVisitor(Thread* self, Throwable* exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(self->GetManagedStack(), self->GetTraceStack(), self->GetLongJumpContext()), self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL), throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL), @@ -1527,8 +1527,8 @@ class CatchBlockStackVisitor : public StackVisitor { LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = GetMethod(); if (method == NULL) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. @@ -1570,7 +1570,7 @@ class CatchBlockStackVisitor : public StackVisitor { return true; // Continue stack walk. } - void DoLongJump() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* catch_method = *handler_quick_frame_; Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_, catch_method, handler_dex_pc_, exception_); @@ -1650,10 +1650,10 @@ Method* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const { struct CurrentMethodVisitor : public StackVisitor { CurrentMethodVisitor(const ManagedStack* stack, const std::vector* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {} - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. @@ -1691,10 +1691,10 @@ class ReferenceMapVisitor : public StackVisitor { public: ReferenceMapVisitor(const ManagedStack* stack, const std::vector* trace_stack, Context* context, Heap::RootVisitor* root_visitor, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), root_visitor_(root_visitor), arg_(arg) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) << StringPrintf("@ PC:%04x", GetDexPc()); diff --git a/src/thread.h b/src/thread.h index 8dce4142cb..9355dca617 100644 --- a/src/thread.h +++ b/src/thread.h @@ -121,11 +121,11 @@ class PACKED Thread { } static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Translates 172 to pAllocArrayFromCode and so on. static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers); @@ -135,79 +135,79 @@ class PACKED Thread { // Dumps the detailed thread state and the thread stack (used for SIGQUIT). void Dump(std::ostream& os) const - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which // case we use 'tid' to identify the thread, and we'll include as much information as we can. static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); ThreadState GetState() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); return state_; } ThreadState SetState(ThreadState new_state) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); ThreadState old_state = state_; if (new_state == kRunnable) { // Sanity, should never become runnable with a pending suspension and should always hold // share of mutator_lock_. CHECK_EQ(GetSuspendCount(), 0); - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); } state_ = new_state; return old_state; } int GetSuspendCount() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); return suspend_count_; } int GetDebugSuspendCount() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); return debug_suspend_count_; } bool IsSuspended() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { int suspend_count = GetSuspendCount(); return suspend_count != 0 && GetState() != kRunnable; } void ModifySuspendCount(int delta, bool for_debugger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. void FullSuspendCheck() - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Transition from non-runnable to runnable state acquiring share on mutator_lock_. ThreadState TransitionFromSuspendedToRunnable() - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_); // Transition from runnable into a state where mutator privileges are denied. Releases share of // mutator lock. void TransitionFromRunnableToSuspended(ThreadState new_state) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + UNLOCK_FUNCTION(Locks::mutator_lock_); // Wait for a debugger suspension on the thread associated with the given peer. Returns the // thread on success, else NULL. If the thread should be suspended then request_suspension should // be true on entry. If the suspension times out then *timeout is set to true. static Thread* SuspendForDebugger(jobject peer, bool request_suspension, bool* timeout) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); // Once called thread suspension will cause an assertion failure. #ifndef NDEBUG @@ -286,16 +286,16 @@ class PACKED Thread { // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. void GetThreadName(std::string& name) const; // Sets the thread's name. - void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* GetPeer() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return peer_; } @@ -304,7 +304,7 @@ class PACKED Thread { } Object* GetThreadGroup(const ScopedObjectAccessUnchecked& ts) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); RuntimeStats* GetStats() { return &stats_; @@ -316,7 +316,7 @@ class PACKED Thread { return exception_ != NULL; } - Throwable* GetException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(CanAccessDirectReferences()); return exception_; } @@ -324,7 +324,7 @@ class PACKED Thread { void AssertNoPendingException() const; void SetException(Throwable* new_exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(CanAccessDirectReferences()); CHECK(new_exception != NULL); // TODO: CHECK(exception_ == NULL); @@ -336,7 +336,7 @@ class PACKED Thread { } // Find catch block and perform long jump to appropriate exception handle - void DeliverException() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void DeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Context* GetLongJumpContext(); void ReleaseLongJumpContext(Context* context) { @@ -345,7 +345,7 @@ class PACKED Thread { } Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetTopOfStack(void* stack, uintptr_t pc) { Method** top_method = reinterpret_cast(stack); @@ -359,24 +359,24 @@ class PACKED Thread { // If 'msg' is NULL, no detail message is set. void ThrowNewException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be // used as the new exception's cause. void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) __attribute__((format(printf, 3, 4))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // OutOfMemoryError is special, because we need to pre-allocate an instance. // Only the GC should call this. void ThrowOutOfMemoryError(const char* msg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc); @@ -398,7 +398,7 @@ class PACKED Thread { Object* DecodeJObject(jobject obj) LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implements java.lang.Thread.interrupted. bool Interrupted() { @@ -429,7 +429,7 @@ class PACKED Thread { } ClassLoader* GetClassLoaderOverride() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(CanAccessDirectReferences()); return class_loader_override_; } @@ -441,7 +441,7 @@ class PACKED Thread { // Create the internal representation of a stack trace, that is more time // and space efficient to compute than the StackTraceElement[] jobject CreateInternalStackTrace(const ScopedObjectAccess& soa) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many @@ -451,12 +451,12 @@ class PACKED Thread { jobjectArray output_array = NULL, int* stack_depth = NULL); void VisitRoots(Heap::RootVisitor* visitor, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #if VERIFY_OBJECT_ENABLED - void VerifyStack() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #else - void VerifyStack() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){} + void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){} #endif // @@ -493,7 +493,7 @@ class PACKED Thread { } // Set the stack end to that to be used during a stack overflow - void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the stack end to that to be used during regular execution void ResetDefaultStackEnd() { @@ -608,8 +608,8 @@ class PACKED Thread { typedef uint32_t bool32_t; explicit Thread(bool daemon); - ~Thread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_suspend_count_lock_); void Destroy(); friend class ThreadList; // For ~Thread and Destroy. @@ -640,20 +640,20 @@ class PACKED Thread { void DumpState(std::ostream& os) const; void DumpStack(std::ostream& os) const - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Out-of-line conveniences for debugging in gdb. static Thread* CurrentFromGdb(); // Like Thread::Current. // Like Thread::Dump(std::cerr). - void DumpFromGdb() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void* CreateCallback(void* arg); void HandleUncaughtExceptions(const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RemoveFromThreadGroup(const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Init(); void InitCardTable(); @@ -677,13 +677,13 @@ class PACKED Thread { // Used to notify threads that they should attempt to resume, they will suspend again if // their suspend count is > 0. static ConditionVariable* resume_cond_ - GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + GUARDED_BY(Locks::thread_suspend_count_lock_); // --- Frequently accessed fields first for short offsets --- // A non-zero value is used to tell the current thread to enter a safe point // at the next poll. - int suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); // The biased card table, see CardTable for details byte* card_table_; @@ -706,7 +706,7 @@ class PACKED Thread { // is hard. This field can be read off of Thread::Current to give the address. Thread* self_; - volatile ThreadState state_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + volatile ThreadState state_ GUARDED_BY(Locks::thread_suspend_count_lock_); // Our managed peer (an instance of java.lang.Thread). Object* peer_; @@ -760,7 +760,7 @@ class PACKED Thread { // How much of 'suspend_count_' is by request of the debugger, used to set things right // when the debugger detaches. Must be <= suspend_count_. - int debug_suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); // JDWP invoke-during-breakpoint support. DebugInvokeReq* debug_invoke_req_; diff --git a/src/thread_list.cc b/src/thread_list.cc index 6008e16003..c1db387e0f 100644 --- a/src/thread_list.cc +++ b/src/thread_list.cc @@ -61,12 +61,12 @@ bool ThreadList::Contains(pid_t tid) { } pid_t ThreadList::GetLockOwner() { - return GlobalSynchronization::thread_list_lock_->GetExclusiveOwnerTid(); + return Locks::thread_list_lock_->GetExclusiveOwnerTid(); } void ThreadList::DumpForSigQuit(std::ostream& os) { { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); DumpLocked(os); } DumpUnattachedThreads(os); @@ -97,7 +97,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) { if (!*end) { bool contains; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); contains = Contains(tid); } if (!contains) { @@ -109,7 +109,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) { } void ThreadList::DumpLocked(std::ostream& os) { - GlobalSynchronization::thread_list_lock_->AssertHeld(); + Locks::thread_list_lock_->AssertHeld(); os << "DALVIK THREADS (" << list_.size() << "):\n"; for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->Dump(os); @@ -118,8 +118,8 @@ void ThreadList::DumpLocked(std::ostream& os) { } void ThreadList::AssertThreadsAreSuspended() { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; CHECK_NE(thread->GetState(), kRunnable); @@ -134,12 +134,12 @@ static void UnsafeLogFatalForThreadSuspendAllTimeout() NO_THREAD_SAFETY_ANALYSIS ss << "Thread suspend timeout\n"; runtime->DumpLockHolders(ss); ss << "\n"; - GlobalSynchronization::mutator_lock_->SharedTryLock(); - if (!GlobalSynchronization::mutator_lock_->IsSharedHeld()) { + Locks::mutator_lock_->SharedTryLock(); + if (!Locks::mutator_lock_->IsSharedHeld()) { LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; } - GlobalSynchronization::thread_list_lock_->TryLock(); - if (!GlobalSynchronization::thread_list_lock_->IsExclusiveHeld()) { + Locks::thread_list_lock_->TryLock(); + if (!Locks::thread_list_lock_->IsExclusiveHeld()) { LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; } runtime->GetThreadList()->DumpLocked(ss); @@ -153,16 +153,16 @@ void ThreadList::SuspendAll() { VLOG(threads) << *self << " SuspendAll starting..."; if (kIsDebugBuild) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); - GlobalSynchronization::thread_list_lock_->AssertNotHeld(); - GlobalSynchronization::thread_suspend_count_lock_->AssertNotHeld(); - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + Locks::mutator_lock_->AssertNotHeld(); + Locks::thread_list_lock_->AssertNotHeld(); + Locks::thread_suspend_count_lock_->AssertNotHeld(); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); } { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); { - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. ++suspend_all_count_; // Increment everybody's suspend count (except our own). @@ -183,11 +183,11 @@ void ThreadList::SuspendAll() { timespec timeout; clock_gettime(CLOCK_REALTIME, &timeout); timeout.tv_sec += 30; - if (UNLIKELY(!GlobalSynchronization::mutator_lock_->ExclusiveLockWithTimeout(timeout))) { + if (UNLIKELY(!Locks::mutator_lock_->ExclusiveLockWithTimeout(timeout))) { UnsafeLogFatalForThreadSuspendAllTimeout(); } #else - GlobalSynchronization::mutator_lock_->ExclusiveLock(); + Locks::mutator_lock_->ExclusiveLock(); #endif // Debug check that all threads are suspended. @@ -201,8 +201,8 @@ void ThreadList::ResumeAll() { VLOG(threads) << *self << " ResumeAll starting"; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. --suspend_all_count_; // Decrement the suspend counts for all threads. @@ -219,7 +219,7 @@ void ThreadList::ResumeAll() { VLOG(threads) << *self << " ResumeAll waking others"; Thread::resume_cond_->Broadcast(); } - GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + Locks::mutator_lock_->ExclusiveUnlock(); VLOG(threads) << *self << " ResumeAll complete"; } @@ -229,9 +229,9 @@ void ThreadList::Resume(Thread* thread, bool for_debugger) { { // To check Contains. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); // To check IsSuspended. - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); CHECK(thread->IsSuspended()); if (!Contains(thread)) { return; @@ -241,7 +241,7 @@ void ThreadList::Resume(Thread* thread, bool for_debugger) { { VLOG(threads) << "Resume(" << *thread << ") waking others"; - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); Thread::resume_cond_->Broadcast(); } @@ -255,9 +255,9 @@ void ThreadList::SuspendAllForDebugger() { VLOG(threads) << *self << " SuspendAllForDebugger starting..."; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. ++suspend_all_count_; ++debug_suspend_all_count_; @@ -280,14 +280,14 @@ void ThreadList::SuspendAllForDebugger() { timespec timeout; clock_gettime(CLOCK_REALTIME, &timeout); timeout.tv_sec += 30; - if (!GlobalSynchronization::mutator_lock_->ExclusiveLockWithTimeout(timeout)) { + if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(timeout)) { UnsafeLogFatalForThreadSuspendAllTimeout(); } else { - GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + Locks::mutator_lock_->ExclusiveUnlock(); } #else - GlobalSynchronization::mutator_lock_->ExclusiveLock(); - GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + Locks::mutator_lock_->ExclusiveLock(); + Locks::mutator_lock_->ExclusiveUnlock(); #endif AssertThreadsAreSuspended(); @@ -305,7 +305,7 @@ void ThreadList::SuspendSelfForDebugger() { // Collisions with other suspends aren't really interesting. We want // to ensure that we're the only one fiddling with the suspend count // though. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); self->ModifySuspendCount(+1, true); // Suspend ourselves. @@ -319,7 +319,7 @@ void ThreadList::SuspendSelfForDebugger() { Dbg::ClearWaitForEventThread(); while (self->suspend_count_ != 0) { - Thread::resume_cond_->Wait(*GlobalSynchronization::thread_suspend_count_lock_); + Thread::resume_cond_->Wait(*Locks::thread_suspend_count_lock_); if (self->suspend_count_ != 0) { // The condition was signaled but we're still suspended. This // can happen if the debugger lets go while a SIGQUIT thread @@ -340,8 +340,8 @@ void ThreadList::UndoDebuggerSuspensions() { VLOG(threads) << *self << " UndoDebuggerSuspensions starting"; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. suspend_all_count_ -= debug_suspend_all_count_; debug_suspend_all_count_ = 0; @@ -356,7 +356,7 @@ void ThreadList::UndoDebuggerSuspensions() { } { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); Thread::resume_cond_->Broadcast(); } @@ -364,8 +364,8 @@ void ThreadList::UndoDebuggerSuspensions() { } void ThreadList::WaitForOtherNonDaemonThreadsToExit() { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Locks::mutator_lock_->AssertNotHeld(); + MutexLock mu(*Locks::thread_list_lock_); bool all_threads_are_daemons; do { all_threads_are_daemons = true; @@ -380,15 +380,15 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() { } if (!all_threads_are_daemons) { // Wait for another thread to exit before re-checking. - thread_exit_cond_.Wait(*GlobalSynchronization::thread_list_lock_); + thread_exit_cond_.Wait(*Locks::thread_list_lock_); } } while(!all_threads_are_daemons); } void ThreadList::SuspendAllDaemonThreads() { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); { // Tell all the daemons it's time to suspend. - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; // This is only run after all non-daemon threads have exited, so the remainder should all be @@ -406,7 +406,7 @@ void ThreadList::SuspendAllDaemonThreads() { bool all_suspended = true; for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); if (thread != Thread::Current() && thread->GetState() == kRunnable) { if (!have_complained) { LOG(WARNING) << "daemon thread not yet suspended: " << *thread; @@ -432,8 +432,8 @@ void ThreadList::Register(Thread* self) { // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing // SuspendAll requests. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); self->suspend_count_ = suspend_all_count_; self->debug_suspend_count_ = debug_suspend_all_count_; CHECK(!Contains(self)); @@ -451,7 +451,7 @@ void ThreadList::Unregister(Thread* self) { { // Remove this thread from the list. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); CHECK(Contains(self)); list_.remove(self); } @@ -466,7 +466,7 @@ void ThreadList::Unregister(Thread* self) { CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self"); // Signal that a thread just detached. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); thread_exit_cond_.Signal(); } @@ -477,7 +477,7 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) { } void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->VisitRoots(visitor, arg); } diff --git a/src/thread_list.h b/src/thread_list.h index e5b911489a..b80c1a5ecf 100644 --- a/src/thread_list.h +++ b/src/thread_list.h @@ -34,57 +34,57 @@ class ThreadList { ~ThreadList(); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpLocked(std::ostream& os) // For thread suspend timeout dumps. - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); pid_t GetLockOwner(); // For SignalCatcher. // Thread suspension support. void ResumeAll() - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + UNLOCK_FUNCTION(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void Resume(Thread* thread, bool for_debugger = false) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); // Suspends all threads and gets exclusive access to the mutator_lock_. void SuspendAll() - EXCLUSIVE_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); // Suspends all threads void SuspendAllForDebugger() - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void SuspendSelfForDebugger() - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); void UndoDebuggerSuspensions() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); // Iterates over all the threads. void ForEach(void (*callback)(Thread*, void*), void* context) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); // Add/remove current thread from list. void Register(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_); void Unregister(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Return a copy of the thread list. - std::list GetList() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_) { + std::list GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) { return list_; } @@ -94,35 +94,35 @@ class ThreadList { uint32_t AllocThreadId(); void ReleaseThreadId(uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_); - bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); - bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); + bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); void DumpUnattachedThreads(std::ostream& os) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_); void SuspendAllDaemonThreads() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void WaitForOtherNonDaemonThreadsToExit() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void AssertThreadsAreSuspended() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); mutable Mutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::bitset allocated_ids_ GUARDED_BY(allocated_ids_lock_); // The actual list of all threads. - std::list list_ GUARDED_BY(GlobalSynchronization::thread_list_lock_); + std::list list_ GUARDED_BY(Locks::thread_list_lock_); // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll. - int suspend_all_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); - int debug_suspend_all_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); + int debug_suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); // Signaled when threads terminate. Used to determine when all non-daemons have terminated. - ConditionVariable thread_exit_cond_ GUARDED_BY(GlobalSynchronization::thread_list_lock_); + ConditionVariable thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_); friend class Thread; diff --git a/src/trace.cc b/src/trace.cc index 5ac7e3d6f8..e4bc83663c 100644 --- a/src/trace.cc +++ b/src/trace.cc @@ -159,7 +159,7 @@ static void Append8LE(uint8_t* buf, uint64_t val) { } static bool InstallStubsClassVisitor(Class* klass, void*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Trace* tracer = Runtime::Current()->GetTracer(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { Method* method = klass->GetDirectMethod(i); @@ -178,7 +178,7 @@ static bool InstallStubsClassVisitor(Class* klass, void*) } static bool UninstallStubsClassVisitor(Class* klass, void*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Trace* tracer = Runtime::Current()->GetTracer(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { Method* method = klass->GetDirectMethod(i); @@ -489,8 +489,8 @@ static void DumpThread(Thread* t, void* arg) { } void Trace::DumpThreadList(std::ostream& os) { - GlobalSynchronization::thread_list_lock_->AssertNotHeld(); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Locks::thread_list_lock_->AssertNotHeld(); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); } @@ -499,9 +499,9 @@ void Trace::InstallStubs() { } void Trace::UninstallStubs() { - GlobalSynchronization::thread_list_lock_->AssertNotHeld(); + Locks::thread_list_lock_->AssertNotHeld(); Runtime::Current()->GetClassLinker()->VisitClasses(UninstallStubsClassVisitor, NULL); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(TraceRestoreStack, NULL); } diff --git a/src/trace.h b/src/trace.h index c60ae1576b..9c64347b8b 100644 --- a/src/trace.h +++ b/src/trace.h @@ -83,18 +83,18 @@ class Trace { explicit Trace(File* trace_file, int buffer_size, int flags); void BeginTracing(); - void FinishTracing() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Replaces code of each method with a pointer to a stub for method tracing. void InstallStubs(); // Restores original code for each method and fixes the return values of each thread's stack. - void UninstallStubs() LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + void UninstallStubs() LOCKS_EXCLUDED(Locks::thread_list_lock_); // Methods to output traced methods and threads. void GetVisitedMethods(size_t end_offset); - void DumpMethodList(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + void DumpMethodList(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); // Maps a method to its original code pointer. SafeMap saved_code_map_; diff --git a/src/utf.h b/src/utf.h index dd5791c5a4..1d4101f385 100644 --- a/src/utf.h +++ b/src/utf.h @@ -65,7 +65,7 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t * The java.lang.String hashCode() algorithm. */ int32_t ComputeUtf16Hash(const CharArray* chars, int32_t offset, size_t char_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count); /* diff --git a/src/utils.h b/src/utils.h index 2846dadb6b..335a66951c 100644 --- a/src/utils.h +++ b/src/utils.h @@ -168,18 +168,18 @@ std::string PrettyDescriptor(const String* descriptor); std::string PrettyDescriptor(const std::string& descriptor); std::string PrettyDescriptor(Primitive::Type type); std::string PrettyDescriptor(const Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or // "int a.b.C.f" (depending on the value of 'with_type'). std::string PrettyField(const Field* f, bool with_type = true) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). std::string PrettyMethod(const Method* m, bool with_signature = true) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); // Returns a human-readable form of the name of the *class* of the given object. @@ -187,16 +187,16 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class". std::string PrettyTypeOf(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class. // Given String.class, the output would be "java.lang.Class". std::string PrettyClass(const Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class with its class loader. std::string PrettyClassAndClassLoader(const Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable size string such as "1MB". std::string PrettySize(size_t size_in_bytes); @@ -230,10 +230,10 @@ bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. std::string JniShortName(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. std::string JniLongName(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc index 55d537c81c..813073d062 100644 --- a/src/verifier/method_verifier.cc +++ b/src/verifier/method_verifier.cc @@ -1029,7 +1029,7 @@ std::ostream& MethodVerifier::DumpFailures(std::ostream& os) { } extern "C" void MethodVerifierGdbDump(MethodVerifier* v) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { v->Dump(std::cerr); } diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h index 244f1f8720..51bed6ea5f 100644 --- a/src/verifier/method_verifier.h +++ b/src/verifier/method_verifier.h @@ -164,11 +164,11 @@ class MethodVerifier { /* Verify a class. Returns "kNoFailure" on success. */ static FailureKind VerifyClass(const Class* klass, std::string& error) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static FailureKind VerifyClass(const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, std::string& error) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint8_t EncodePcToReferenceMapData() const; @@ -194,7 +194,7 @@ class MethodVerifier { // Dump the state of the verifier, namely each instruction, what flags are set on it, register // information - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static const std::vector* GetGcMap(Compiler::MethodReference ref) LOCKS_EXCLUDED(gc_maps_lock_); @@ -203,7 +203,7 @@ class MethodVerifier { // to the locks held at 'dex_pc' in 'm'. static void FindLocksAtDexPc(Method* m, uint32_t dex_pc, std::vector& monitor_enter_dex_pcs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Init(); static void Shutdown(); @@ -220,7 +220,7 @@ class MethodVerifier { explicit MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, uint32_t method_idx, Method* method, uint32_t access_flags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -242,15 +242,15 @@ class MethodVerifier { static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, Method* method, uint32_t method_access_flags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void VerifyMethodAndDump(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Run verification on the method. Returns true if verification completes and false if the input // has an irrecoverable corruption. - bool Verify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Compute the width of the instruction at each address in the instruction stream, and store it in @@ -278,7 +278,7 @@ class MethodVerifier { * Returns "false" if something in the exception table looks fishy, but we're expecting the * exception table to be somewhat sane. */ - bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Perform static verification on all instructions in a method. @@ -384,11 +384,11 @@ class MethodVerifier { bool* selfOkay); /* Perform detailed code-flow analysis on a single method. */ - bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the register types for the first instruction in the method based on the method signature. // This has the side-effect of validating the signature. - bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Perform code flow on a method. @@ -436,7 +436,7 @@ class MethodVerifier { * reordering by specifying that you can't execute the new-instance instruction if a register * contains an uninitialized instance created by that same instruction. */ - bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Perform verification for a single instruction. @@ -448,45 +448,45 @@ class MethodVerifier { * addresses. Does not set or clear any other flags in "insn_flags_". */ bool CodeFlowVerifyInstruction(uint32_t* start_guess) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of a new array instruction void VerifyNewArray(const DecodedInstruction& dec_insn, bool is_filled, bool is_range) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an aget instruction. The destination register's type will be set to // be that of component type of the array unless the array type is unknown, in which case a // bottom type inferred from the type of instruction is used. is_primitive is false for an // aget-object. void VerifyAGet(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an aput instruction. void VerifyAPut(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Lookup instance field and fail for resolution violations Field* GetInstanceField(const RegType& obj_type, int field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Lookup static field and fail for resolution violations - Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an iget or sget instruction. void VerifyISGet(const DecodedInstruction& insn, const RegType& insn_type, bool is_primitive, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an iput or sput instruction. void VerifyISPut(const DecodedInstruction& insn, const RegType& insn_type, bool is_primitive, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolves a class based on an index and performs access checks to ensure the referrer can // access the resolved class. const RegType& ResolveClassAndCheckAccess(uint32_t class_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler @@ -494,7 +494,7 @@ class MethodVerifier { * exception handler can be found or if the Join of exception types fails. */ const RegType& GetCaughtExceptionType() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Resolves a method based on an index and performs access checks to ensure @@ -502,7 +502,7 @@ class MethodVerifier { * Does not throw exceptions. */ Method* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify the arguments to a method. We're executing in "method", making @@ -528,7 +528,7 @@ class MethodVerifier { */ Method* VerifyInvocationArgs(const DecodedInstruction& dec_insn, MethodType method_type, bool is_range, bool is_super) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify that the target instruction is not "move-exception". It's important that the only way @@ -543,7 +543,7 @@ class MethodVerifier { * Returns "false" if an error is encountered. */ bool UpdateRegisters(uint32_t next_insn, const RegisterLine* merge_line) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Is the method being verified a constructor? bool IsConstructor() const { @@ -556,10 +556,10 @@ class MethodVerifier { } // Return the register type for the method. - const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get a type representing the declaring class of the method. - const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER) /* @@ -626,13 +626,13 @@ class MethodVerifier { uint32_t method_idx_; // The method we're working on. // Its object representation if known. - Method* foo_method_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + Method* foo_method_ GUARDED_BY(Locks::mutator_lock_); uint32_t method_access_flags_; // Method's access flags. const DexFile* dex_file_; // The dex file containing the method. // The dex_cache for the declaring class of the method. - DexCache* dex_cache_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_); // The class loader for the declaring class of the method. - ClassLoader* class_loader_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_); uint32_t class_def_idx_; // The class def index of the declaring class of the method. const DexFile::CodeItem* code_item_; // The code item containing the code for the method. UniquePtr insn_flags_; // Instruction widths and flags, one entry per code unit. diff --git a/src/verifier/method_verifier_test.cc b/src/verifier/method_verifier_test.cc index e52feb3258..9c9c745098 100644 --- a/src/verifier/method_verifier_test.cc +++ b/src/verifier/method_verifier_test.cc @@ -28,7 +28,7 @@ namespace verifier { class MethodVerifierTest : public CommonTest { protected: void VerifyClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); @@ -38,7 +38,7 @@ class MethodVerifierTest : public CommonTest { } void VerifyDexFile(const DexFile* dex) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(dex != NULL); // Verify all the classes defined in this file diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc index 281d96e6b4..f5552233d0 100644 --- a/src/verifier/reg_type.cc +++ b/src/verifier/reg_type.cc @@ -385,7 +385,7 @@ Class* RegType::ClassJoin(Class* s, Class* t) { } std::ostream& operator<<(std::ostream& os, const RegType& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << rhs.Dump(); return os; } diff --git a/src/verifier/reg_type.h b/src/verifier/reg_type.h index a0e2ff8525..3064f30ae3 100644 --- a/src/verifier/reg_type.h +++ b/src/verifier/reg_type.h @@ -118,7 +118,7 @@ class RegType { // The high half that corresponds to this low half const RegType& HighHalf(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsConstant() const { return type_ == kRegTypeConst; } bool IsLongConstant() const { return type_ == kRegTypeConstLo; } @@ -209,7 +209,7 @@ class RegType { return IsReference() && GetClass()->IsObjectClass(); } - bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { return GetDescriptor()->CharAt(0) == '['; } else if (IsReference()) { @@ -219,7 +219,7 @@ class RegType { } } - bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { // Primitive arrays will always resolve DCHECK(GetDescriptor()->CharAt(1) == 'L' || GetDescriptor()->CharAt(1) == '['); @@ -295,27 +295,27 @@ class RegType { } const RegType& GetSuperClass(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string Dump(const RegTypeCache* reg_types = NULL) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type access other? bool CanAccess(const RegType& other) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type access a member with the given properties? bool CanAccessMember(Class* klass, uint32_t access_flags) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type be assigned by src? bool IsAssignableFrom(const RegType& src) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool Equals(const RegType& other) const { return GetId() == other.GetId(); } // Compute the merge of this register from one edge (path) with incoming_type from another. const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is @@ -334,7 +334,7 @@ class RegType { * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ static Class* ClassJoin(Class* s, Class* t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: friend class RegTypeCache; diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h index 1287388e07..5a2c49cc75 100644 --- a/src/verifier/reg_type_cache.h +++ b/src/verifier/reg_type_cache.h @@ -41,65 +41,65 @@ class RegTypeCache { } const RegType& From(RegType::Type type, ClassLoader* loader, const char* descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromClass(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromCat1Const(int32_t value); const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromType(RegType::Type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right); const RegType& FromUnresolvedSuperClass(const RegType& child); - const RegType& Boolean() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeBoolean); } - const RegType& Byte() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeByte); } - const RegType& Char() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeChar); } - const RegType& Short() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeShort); } - const RegType& Integer() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeInteger); } - const RegType& Float() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeFloat); } - const RegType& Long() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Long() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeLongLo); } - const RegType& Double() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Double() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeDoubleLo); } - const RegType& JavaLangClass() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Class;"); } - const RegType& JavaLangObject() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Object;"); } - const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/String;"); } - const RegType& JavaLangThrowable() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Throwable;"); } - const RegType& Undefined() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeUndefined); } - const RegType& Conflict() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Conflict() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeConflict); } - const RegType& ConstLo() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& ConstLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeConstLo); } - const RegType& Zero() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromCat1Const(0); } @@ -116,7 +116,7 @@ class RegTypeCache { const RegType& IntConstant() { return FromCat1Const(std::numeric_limits::max()); } const RegType& GetComponentType(const RegType& array, ClassLoader* loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: // The allocated entries diff --git a/src/verifier/register_line.cc b/src/verifier/register_line.cc index ec7891edce..4882740dcc 100644 --- a/src/verifier/register_line.cc +++ b/src/verifier/register_line.cc @@ -368,7 +368,7 @@ void RegisterLine::WriteReferenceBitMap(std::vector& data, size_t max_b } std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << rhs.Dump(); return os; } diff --git a/src/verifier/register_line.h b/src/verifier/register_line.h index 1d131ad8f4..9f0fcb05ae 100644 --- a/src/verifier/register_line.h +++ b/src/verifier/register_line.h @@ -62,22 +62,22 @@ class RegisterLine { // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst". void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This // copies both halves of the register. void CopyRegister2(uint32_t vdst, uint32_t vsrc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implement "move-result". Copy the category-1 value from the result register to another // register, and reset the result register. void CopyResultRegister1(uint32_t vdst, bool is_reference) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implement "move-result-wide". Copy the category-2 value from the result register to another // register, and reset the result register. void CopyResultRegister2(uint32_t vdst) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the invisible result register to unknown void SetResultTypeToUnknown(); @@ -86,17 +86,17 @@ class RegisterLine { // part of a 64-bit value, register N+1 will be set to "newType+1". // The register index was validated during the static pass, so we don't need to check it here. bool SetRegisterType(uint32_t vdst, const RegType& new_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* Set the type of the "result" register. */ void SetResultRegisterType(const RegType& new_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the type of register vsrc. const RegType& GetRegisterType(uint32_t vsrc) const; bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CopyFromLine(const RegisterLine* src) { DCHECK_EQ(num_regs_, src->num_regs_); @@ -105,7 +105,7 @@ class RegisterLine { reg_to_lock_depths_ = src->reg_to_lock_depths_; } - std::string Dump() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FillWithGarbage() { memset(line_.get(), 0xf1, num_regs_ * sizeof(uint16_t)); @@ -122,7 +122,7 @@ class RegisterLine { * the new ones at the same time). */ void MarkUninitRefsAsInvalid(const RegType& uninit_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Update all registers holding "uninit_type" to instead hold the corresponding initialized @@ -161,7 +161,7 @@ class RegisterLine { * versions. We just need to make sure vA is >= 1 and then return vC. */ const RegType& GetInvocationThis(const DecodedInstruction& dec_insn) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for a simple two-register instruction (e.g. "neg-int"). @@ -169,7 +169,7 @@ class RegisterLine { */ void CheckUnaryOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for a simple three-register instruction (e.g. "add-int"). @@ -179,7 +179,7 @@ class RegisterLine { void CheckBinaryOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for a binary "2addr" operation. "src_type1"/"src_type2" @@ -189,7 +189,7 @@ class RegisterLine { const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8"). @@ -199,7 +199,7 @@ class RegisterLine { */ void CheckLiteralOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx. void PushMonitor(uint32_t reg_idx, int32_t insn_idx); @@ -217,7 +217,7 @@ class RegisterLine { bool VerifyMonitorStackEmpty(); bool MergeRegisters(const RegisterLine* incoming_line) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t GetMaxNonZeroReferenceReg(size_t max_ref_reg) { size_t i = static_cast(max_ref_reg) < 0 ? 0 : max_ref_reg; diff --git a/src/well_known_classes.h b/src/well_known_classes.h index 15961e28ae..65ee6b49c2 100644 --- a/src/well_known_classes.h +++ b/src/well_known_classes.h @@ -33,7 +33,7 @@ struct WellKnownClasses { static void Init(JNIEnv* env); static Class* ToClass(jclass global_jclass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static jclass com_android_dex_Dex; static jclass dalvik_system_PathClassLoader; diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 0aefa97022..e6d652de66 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -44,11 +44,11 @@ namespace art { struct ReferenceMap2Visitor : public StackVisitor { explicit ReferenceMap2Visitor(const ManagedStack* stack, const std::vector* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) { return true; diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 41e8d583ca..444eaed818 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -42,11 +42,11 @@ static int gJava_StackWalk_refmap_calls = 0; struct TestReferenceMapVisitor : public StackVisitor { explicit TestReferenceMapVisitor(const ManagedStack* stack, const std::vector* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); CHECK(m != NULL); LOG(INFO) << "At " << PrettyMethod(m, false); -- cgit v1.2.3-59-g8ed1b From 2dd0e2cea360bc9206eb88ecc40d259e796c239d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 24 Jan 2013 12:42:14 -0800 Subject: Directory restructuring of object.h Break object.h into constituent files. Reduce number of #includes in other GC header files. Introduce -inl.h files to avoid mirror files #include-ing each other. Check invariants of verifier RegTypes for all constructors. Change-Id: Iecf1171c02910ac152d52947330ef456df4043bc --- build/Android.common.mk | 20 +- src/barrier_test.cc | 1 + src/base/mutex.cc | 1 - src/check_jni.cc | 71 +- src/class_linker-inl.h | 146 ++ src/class_linker.cc | 946 +++---- src/class_linker.h | 363 ++- src/class_linker_test.cc | 19 +- src/class_loader.h | 41 - src/common_test.h | 34 +- src/common_throws.cc | 56 +- src/common_throws.h | 49 +- src/compiled_class.h | 8 +- src/compiler.cc | 148 +- src/compiler.h | 21 +- src/compiler/codegen/arm/call_arm.cc | 8 +- src/compiler/codegen/arm/int_arm.cc | 18 +- src/compiler/codegen/gen_common.cc | 55 +- src/compiler/codegen/gen_invoke.cc | 33 +- src/compiler/codegen/mips/int_mips.cc | 18 +- src/compiler/codegen/mir_to_gbc.cc | 2 +- src/compiler/codegen/mir_to_lir.cc | 2 +- src/compiler/codegen/x86/call_x86.cc | 6 +- src/compiler/codegen/x86/int_x86.cc | 18 +- src/compiler/compiler_internals.h | 3 +- src/compiler/frontend.cc | 2 +- src/compiler_llvm/compiler_llvm.h | 12 +- src/compiler_llvm/gbc_expander.cc | 2 +- src/compiler_llvm/jni_compiler.cc | 2 +- src/compiler_llvm/jni_compiler.h | 12 +- src/compiler_llvm/method_compiler.cc | 2 +- src/compiler_llvm/method_compiler.h | 15 +- src/compiler_llvm/runtime_support_builder.cc | 6 +- .../runtime_support_builder_thumb2.cc | 2 +- src/compiler_llvm/runtime_support_llvm.cc | 200 +- src/compiler_llvm/runtime_support_llvm.h | 2 +- src/compiler_llvm/stub_compiler.cc | 2 +- src/compiler_test.cc | 19 +- src/debugger.cc | 332 +-- src/debugger.h | 38 +- src/dex2oat.cc | 26 +- src/dex_cache.cc | 73 - src/dex_cache.h | 186 -- src/dex_cache_test.cc | 60 - src/dex_file.cc | 42 +- src/dex_file.h | 59 +- src/dex_file_verifier.cc | 3 +- src/dex_instruction.cc | 1 + src/exception_test.cc | 16 +- src/gc/atomic_stack.h | 6 +- src/gc/card_table-inl.h | 209 ++ src/gc/card_table.cc | 1 + src/gc/card_table.h | 178 +- src/gc/garbage_collector.cc | 1 + src/gc/garbage_collector.h | 14 +- src/gc/gc_type.h | 39 + src/gc/heap_bitmap-inl.h | 36 + src/gc/heap_bitmap.h | 133 +- src/gc/large_object_space.cc | 29 +- src/gc/large_object_space.h | 27 +- src/gc/mark_sweep-inl.h | 159 ++ src/gc/mark_sweep.cc | 20 +- src/gc/mark_sweep.h | 263 +- src/gc/mod_union_table-inl.h | 64 + src/gc/mod_union_table.cc | 12 +- src/gc/mod_union_table.h | 61 +- src/gc/partial_mark_sweep.cc | 42 +- src/gc/partial_mark_sweep.h | 19 +- src/gc/space.cc | 54 +- src/gc/space.h | 34 +- src/gc/space_bitmap-inl.h | 141 + src/gc/space_bitmap.cc | 49 +- src/gc/space_bitmap.h | 162 +- src/gc/space_bitmap_test.cc | 18 +- src/gc/space_test.cc | 30 +- src/gc/sticky_mark_sweep.cc | 66 +- src/gc/sticky_mark_sweep.h | 19 +- src/heap.cc | 178 +- src/heap.h | 79 +- src/heap_test.cc | 15 +- src/hprof/hprof.cc | 56 +- src/image.cc | 38 + src/image.h | 32 +- src/image_test.cc | 4 +- src/image_writer.cc | 32 +- src/image_writer.h | 55 +- src/indirect_reference_table.cc | 22 +- src/indirect_reference_table.h | 32 +- src/indirect_reference_table_test.cc | 10 +- src/instrumentation.cc | 35 +- src/instrumentation.h | 28 +- src/intern_table.cc | 48 +- src/intern_table.h | 37 +- src/intern_table_test.cc | 62 +- src/interpreter/interpreter.cc | 12 +- src/interpreter/interpreter.h | 10 +- src/invoke_arg_array_builder.h | 8 +- src/jdwp/jdwp.h | 3 +- src/jdwp/jdwp_event.cc | 1 + src/jdwp/jdwp_handler.cc | 3 + src/jni_compiler_test.cc | 22 +- src/jni_internal.cc | 20 +- src/jni_internal.h | 22 +- src/jni_internal_test.cc | 112 +- src/jobject_comparator.cc | 8 +- src/jvalue.h | 9 +- src/mirror/abstract_method-inl.h | 184 ++ src/mirror/abstract_method.cc | 376 +++ src/mirror/abstract_method.h | 524 ++++ src/mirror/array-inl.h | 39 + src/mirror/array.cc | 169 ++ src/mirror/array.h | 148 ++ src/mirror/class-inl.h | 256 ++ src/mirror/class.cc | 668 +++++ src/mirror/class.h | 866 ++++++ src/mirror/class_loader.h | 46 + src/mirror/dex_cache.cc | 94 + src/mirror/dex_cache.h | 182 ++ src/mirror/dex_cache_test.cc | 62 + src/mirror/field-inl.h | 54 + src/mirror/field.cc | 223 ++ src/mirror/field.h | 168 ++ src/mirror/iftable-inl.h | 35 + src/mirror/iftable.h | 79 + src/mirror/object-inl.h | 181 ++ src/mirror/object.cc | 215 ++ src/mirror/object.h | 261 ++ src/mirror/object_array-inl.h | 137 + src/mirror/object_array.h | 60 + src/mirror/object_test.cc | 640 +++++ src/mirror/proxy.h | 55 + src/mirror/stack_trace_element.cc | 59 + src/mirror/stack_trace_element.h | 83 + src/mirror/string.cc | 289 ++ src/mirror/string.h | 174 ++ src/mirror/throwable.cc | 94 + src/mirror/throwable.h | 75 + src/modifiers.h | 2 + src/monitor.cc | 49 +- src/monitor.h | 42 +- src/monitor_android.cc | 3 +- src/native/dalvik_system_DexFile.cc | 8 +- src/native/dalvik_system_VMDebug.cc | 5 +- src/native/dalvik_system_VMRuntime.cc | 11 +- src/native/dalvik_system_VMStack.cc | 23 +- src/native/java_lang_Class.cc | 25 +- src/native/java_lang_Object.cc | 12 +- src/native/java_lang_Runtime.cc | 5 +- src/native/java_lang_String.cc | 10 +- src/native/java_lang_System.cc | 37 +- src/native/java_lang_Thread.cc | 8 +- src/native/java_lang_VMClassLoader.cc | 6 +- src/native/java_lang_reflect_Array.cc | 17 +- src/native/java_lang_reflect_Constructor.cc | 11 +- src/native/java_lang_reflect_Field.cc | 39 +- src/native/java_lang_reflect_Method.cc | 17 +- src/native/java_lang_reflect_Proxy.cc | 24 +- src/native/sun_misc_Unsafe.cc | 55 +- src/nth_caller_visitor.h | 7 +- src/oat.cc | 1 + src/oat/jni/arm/jni_internal_arm.cc | 4 +- src/oat/jni/jni_compiler.cc | 4 +- src/oat/jni/mips/jni_internal_mips.cc | 4 +- src/oat/jni/x86/jni_internal_x86.cc | 4 +- src/oat/runtime/argument_visitor.h | 2 +- src/oat/runtime/arm/context_arm.cc | 6 +- src/oat/runtime/arm/oat_support_entrypoints_arm.cc | 21 +- src/oat/runtime/arm/stub_arm.cc | 16 +- src/oat/runtime/callee_save_frame.h | 5 +- src/oat/runtime/mips/context_mips.cc | 2 +- src/oat/runtime/mips/stub_mips.cc | 16 +- src/oat/runtime/oat_support_entrypoints.h | 20 +- src/oat/runtime/stub.h | 22 +- src/oat/runtime/support_alloc.cc | 42 +- src/oat/runtime/support_cast.cc | 18 +- src/oat/runtime/support_debug.cc | 4 +- src/oat/runtime/support_deoptimize.cc | 9 +- src/oat/runtime/support_dexcache.cc | 27 +- src/oat/runtime/support_field.cc | 110 +- src/oat/runtime/support_fillarray.cc | 7 +- src/oat/runtime/support_instrumentation.cc | 8 +- src/oat/runtime/support_interpreter.cc | 11 +- src/oat/runtime/support_invoke.cc | 55 +- src/oat/runtime/support_jni.cc | 31 +- src/oat/runtime/support_locks.cc | 8 +- src/oat/runtime/support_proxy.cc | 15 +- src/oat/runtime/support_stubs.cc | 37 +- src/oat/runtime/support_thread.cc | 2 +- src/oat/runtime/support_throw.cc | 24 +- src/oat/runtime/x86/context_x86.cc | 5 +- src/oat/runtime/x86/oat_support_entrypoints_x86.cc | 21 +- src/oat/runtime/x86/stub_x86.cc | 16 +- src/oat_compilation_unit.h | 5 +- src/oat_file.cc | 18 +- src/oat_file.h | 19 +- src/oat_test.cc | 7 +- src/oat_writer.cc | 30 +- src/oat_writer.h | 7 +- src/oatdump.cc | 81 +- src/oatexec.cc | 2 +- src/object.cc | 1828 ------------- src/object.h | 2748 -------------------- src/object_test.cc | 629 ----- src/object_utils.h | 120 +- src/primitive.h | 5 +- src/reference_table.cc | 30 +- src/reference_table.h | 14 +- src/reference_table_test.cc | 4 +- src/reflection.cc | 45 +- src/reflection.h | 20 +- src/root_visitor.h | 33 + src/runtime.cc | 59 +- src/runtime.h | 64 +- src/runtime_support.cc | 86 +- src/runtime_support.h | 84 +- src/scoped_thread_state_change.h | 14 +- src/stack.cc | 28 +- src/stack.h | 63 +- src/stack_indirect_reference_table.h | 14 +- src/thread.cc | 218 +- src/thread.h | 86 +- src/thread_list.cc | 5 +- src/thread_list.h | 12 +- src/thread_state.h | 46 + src/timing_logger.cc | 98 + src/timing_logger.h | 75 +- src/trace.cc | 14 +- src/trace.h | 6 +- src/utf.cc | 4 +- src/utf.h | 5 +- src/utils.cc | 29 +- src/utils.h | 23 +- src/utils_test.cc | 30 +- src/verifier/method_verifier.cc | 117 +- src/verifier/method_verifier.h | 52 +- src/verifier/method_verifier_test.cc | 2 +- src/verifier/reg_type.cc | 125 +- src/verifier/reg_type.h | 103 +- src/verifier/reg_type_cache.cc | 48 +- src/verifier/reg_type_cache.h | 48 +- src/verifier/register_line.h | 8 +- src/well_known_classes.cc | 5 +- src/well_known_classes.h | 5 +- test/ReferenceMap/stack_walk_refmap_jni.cc | 6 +- test/StackWalk/stack_walk_jni.cc | 6 +- 245 files changed, 11425 insertions(+), 9377 deletions(-) create mode 100644 src/class_linker-inl.h delete mode 100644 src/class_loader.h delete mode 100644 src/dex_cache.cc delete mode 100644 src/dex_cache.h delete mode 100644 src/dex_cache_test.cc create mode 100644 src/gc/card_table-inl.h create mode 100644 src/gc/gc_type.h create mode 100644 src/gc/heap_bitmap-inl.h create mode 100644 src/gc/mark_sweep-inl.h create mode 100644 src/gc/mod_union_table-inl.h create mode 100644 src/gc/space_bitmap-inl.h create mode 100644 src/mirror/abstract_method-inl.h create mode 100644 src/mirror/abstract_method.cc create mode 100644 src/mirror/abstract_method.h create mode 100644 src/mirror/array-inl.h create mode 100644 src/mirror/array.cc create mode 100644 src/mirror/array.h create mode 100644 src/mirror/class-inl.h create mode 100644 src/mirror/class.cc create mode 100644 src/mirror/class.h create mode 100644 src/mirror/class_loader.h create mode 100644 src/mirror/dex_cache.cc create mode 100644 src/mirror/dex_cache.h create mode 100644 src/mirror/dex_cache_test.cc create mode 100644 src/mirror/field-inl.h create mode 100644 src/mirror/field.cc create mode 100644 src/mirror/field.h create mode 100644 src/mirror/iftable-inl.h create mode 100644 src/mirror/iftable.h create mode 100644 src/mirror/object-inl.h create mode 100644 src/mirror/object.cc create mode 100644 src/mirror/object.h create mode 100644 src/mirror/object_array-inl.h create mode 100644 src/mirror/object_array.h create mode 100644 src/mirror/object_test.cc create mode 100644 src/mirror/proxy.h create mode 100644 src/mirror/stack_trace_element.cc create mode 100644 src/mirror/stack_trace_element.h create mode 100644 src/mirror/string.cc create mode 100644 src/mirror/string.h create mode 100644 src/mirror/throwable.cc create mode 100644 src/mirror/throwable.h delete mode 100644 src/object.cc delete mode 100644 src/object.h delete mode 100644 src/object_test.cc create mode 100644 src/root_visitor.h create mode 100644 src/thread_state.h create mode 100644 src/timing_logger.cc (limited to 'src/native/java_lang_System.cc') diff --git a/build/Android.common.mk b/build/Android.common.mk index 16462649af..21e829ce6e 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -154,7 +154,6 @@ LIBART_COMMON_SRC_FILES := \ src/compiled_method.cc \ src/compiler.cc \ src/debugger.cc \ - src/dex_cache.cc \ src/dex_file.cc \ src/dex_file_verifier.cc \ src/dex_instruction.cc \ @@ -193,6 +192,15 @@ LIBART_COMMON_SRC_FILES := \ src/locks.cc \ src/mem_map.cc \ src/memory_region.cc \ + src/mirror/abstract_method.cc \ + src/mirror/array.cc \ + src/mirror/class.cc \ + src/mirror/dex_cache.cc \ + src/mirror/field.cc \ + src/mirror/object.cc \ + src/mirror/stack_trace_element.cc \ + src/mirror/string.cc \ + src/mirror/throwable.cc \ src/monitor.cc \ src/native/dalvik_system_DexFile.cc \ src/native/dalvik_system_VMDebug.cc \ @@ -229,7 +237,6 @@ LIBART_COMMON_SRC_FILES := \ src/oat/utils/x86/managed_register_x86.cc \ src/oat_file.cc \ src/oat_writer.cc \ - src/object.cc \ src/offsets.cc \ src/os_linux.cc \ src/primitive.cc \ @@ -242,6 +249,7 @@ LIBART_COMMON_SRC_FILES := \ src/thread.cc \ src/thread_list.cc \ src/thread_pool.cc \ + src/timing_logger.cc \ src/trace.cc \ src/utf.cc \ src/utils.cc \ @@ -354,6 +362,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ src/compiler/compiler_enums.h \ src/dex_file.h \ src/dex_instruction.h \ + src/gc/gc_type.h \ src/gc/space.h \ src/heap.h \ src/indirect_reference_table.h \ @@ -362,8 +371,9 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ src/jdwp/jdwp.h \ src/jdwp/jdwp_constants.h \ src/locks.h \ - src/object.h \ + src/mirror/class.h \ src/thread.h \ + src/thread_state.h \ src/verifier/method_verifier.h LIBARTTEST_COMMON_SRC_FILES := \ @@ -380,7 +390,6 @@ TEST_COMMON_SRC_FILES := \ src/base/unix_file/string_file_test.cc \ src/class_linker_test.cc \ src/compiler_test.cc \ - src/dex_cache_test.cc \ src/dex_file_test.cc \ src/dex_instruction_visitor_test.cc \ src/elf_writer_test.cc \ @@ -395,10 +404,11 @@ TEST_COMMON_SRC_FILES := \ src/intern_table_test.cc \ src/jni_compiler_test.cc \ src/jni_internal_test.cc \ + src/mirror/dex_cache_test.cc \ + src/mirror/object_test.cc \ src/oat/utils/arm/managed_register_arm_test.cc \ src/oat/utils/x86/managed_register_x86_test.cc \ src/oat_test.cc \ - src/object_test.cc \ src/output_stream_test.cc \ src/reference_table_test.cc \ src/runtime_support_test.cc \ diff --git a/src/barrier_test.cc b/src/barrier_test.cc index 284be57a98..bb7bcb3b17 100644 --- a/src/barrier_test.cc +++ b/src/barrier_test.cc @@ -20,6 +20,7 @@ #include "atomic_integer.h" #include "common_test.h" +#include "mirror/object_array-inl.h" #include "thread_pool.h" #include "UniquePtr.h" diff --git a/src/base/mutex.cc b/src/base/mutex.cc index e2ab51f2f9..d09a6a27f6 100644 --- a/src/base/mutex.cc +++ b/src/base/mutex.cc @@ -21,7 +21,6 @@ #include "base/logging.h" #include "cutils/atomic.h" -#include "cutils/atomic-inline.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/src/check_jni.cc b/src/check_jni.cc index 8f4e92126a..e53e1c49e6 100644 --- a/src/check_jni.cc +++ b/src/check_jni.cc @@ -21,9 +21,16 @@ #include "base/logging.h" #include "class_linker.h" +#include "class_linker-inl.h" +#include "gc/space.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/throwable.h" #include "object_utils.h" #include "scoped_thread_state_change.h" -#include "gc/space.h" #include "thread.h" #include "runtime.h" @@ -36,7 +43,7 @@ namespace art { static void JniAbort(const char* jni_function_name, const char* msg) { Thread* self = Thread::Current(); ScopedObjectAccess soa(self); - AbstractMethod* current_method = self->GetCurrentMethod(); + mirror::AbstractMethod* current_method = self->GetCurrentMethod(); std::ostringstream os; os << "JNI DETECTED ERROR IN APPLICATION: " << msg; @@ -123,7 +130,7 @@ static const char* gBuiltInPrefixes[] = { NULL }; -static bool ShouldTrace(JavaVMExt* vm, const AbstractMethod* method) +static bool ShouldTrace(JavaVMExt* vm, const mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages // when a native method that matches the -Xjnitrace argument calls a JNI function @@ -196,14 +203,14 @@ class ScopedCheck { */ void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* f = CheckFieldID(fid); + mirror::Field* f = CheckFieldID(fid); if (f == NULL) { return; } - Class* field_type = FieldHelper(f).GetType(); + mirror::Class* field_type = FieldHelper(f).GetType(); if (!field_type->IsPrimitive()) { if (java_object != NULL) { - Object* obj = soa_.Decode(java_object); + mirror::Object* obj = soa_.Decode(java_object); // If java_object is a weak global ref whose referent has been cleared, // obj will be NULL. Otherwise, obj should always be non-NULL // and valid. @@ -243,7 +250,7 @@ class ScopedCheck { */ void CheckInstanceFieldID(jobject java_object, jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* o = soa_.Decode(java_object); + mirror::Object* o = soa_.Decode(java_object); if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) { Runtime::Current()->GetHeap()->DumpSpaces(); JniAbortF(function_name_, "field operation on invalid %s: %p", @@ -251,11 +258,11 @@ class ScopedCheck { return; } - Field* f = CheckFieldID(fid); + mirror::Field* f = CheckFieldID(fid); if (f == NULL) { return; } - Class* c = o->GetClass(); + mirror::Class* c = o->GetClass(); FieldHelper fh(f); if (c->FindInstanceField(fh.GetName(), fh.GetTypeDescriptor()) == NULL) { JniAbortF(function_name_, "jfieldID %s not valid for an object of class %s", @@ -278,7 +285,7 @@ class ScopedCheck { */ void CheckSig(jmethodID mid, const char* expectedType, bool isStatic) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = CheckMethodID(mid); + mirror::AbstractMethod* m = CheckMethodID(mid); if (m == NULL) { return; } @@ -304,8 +311,8 @@ class ScopedCheck { */ void CheckStaticFieldID(jclass java_class, jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* c = soa_.Decode(java_class); - const Field* f = CheckFieldID(fid); + mirror::Class* c = soa_.Decode(java_class); + const mirror::Field* f = CheckFieldID(fid); if (f == NULL) { return; } @@ -326,11 +333,11 @@ class ScopedCheck { */ void CheckStaticMethod(jclass java_class, jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const AbstractMethod* m = CheckMethodID(mid); + const mirror::AbstractMethod* m = CheckMethodID(mid); if (m == NULL) { return; } - Class* c = soa_.Decode(java_class); + mirror::Class* c = soa_.Decode(java_class); if (!c->IsAssignableFrom(m->GetDeclaringClass())) { JniAbortF(function_name_, "can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str()); @@ -346,11 +353,11 @@ class ScopedCheck { */ void CheckVirtualMethod(jobject java_object, jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const AbstractMethod* m = CheckMethodID(mid); + const mirror::AbstractMethod* m = CheckMethodID(mid); if (m == NULL) { return; } - Object* o = soa_.Decode(java_object); + mirror::Object* o = soa_.Decode(java_object); if (!o->InstanceOf(m->GetDeclaringClass())) { JniAbortF(function_name_, "can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str()); @@ -397,7 +404,7 @@ class ScopedCheck { SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) { va_list ap; - const AbstractMethod* traceMethod = NULL; + const mirror::AbstractMethod* traceMethod = NULL; if ((!soa_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni)) && has_method_) { // We need to guard some of the invocation interface's calls: a bad caller might // use DetachCurrentThread or GetEnv on a thread that's not yet attached. @@ -455,7 +462,7 @@ class ScopedCheck { msg += (b ? "JNI_TRUE" : "JNI_FALSE"); } else if (ch == 'c') { // jclass jclass jc = va_arg(ap, jclass); - Class* c = reinterpret_cast(Thread::Current()->DecodeJObject(jc)); + mirror::Class* c = reinterpret_cast(Thread::Current()->DecodeJObject(jc)); if (c == NULL) { msg += "NULL"; } else if (c == kInvalidIndirectRefObject || !Runtime::Current()->GetHeap()->IsHeapAddress(c)) { @@ -470,7 +477,7 @@ class ScopedCheck { } } else if (ch == 'f') { // jfieldID jfieldID fid = va_arg(ap, jfieldID); - Field* f = reinterpret_cast(fid); + mirror::Field* f = reinterpret_cast(fid); msg += PrettyField(f); if (!entry) { StringAppendF(&msg, " (%p)", fid); @@ -483,7 +490,7 @@ class ScopedCheck { StringAppendF(&msg, "%d", i); } else if (ch == 'm') { // jmethodID jmethodID mid = va_arg(ap, jmethodID); - AbstractMethod* m = reinterpret_cast(mid); + mirror::AbstractMethod* m = reinterpret_cast(mid); msg += PrettyMethod(m); if (!entry) { StringAppendF(&msg, " (%p)", mid); @@ -623,7 +630,7 @@ class ScopedCheck { return false; } - Object* obj = soa_.Decode(java_object); + mirror::Object* obj = soa_.Decode(java_object); if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) { Runtime::Current()->GetHeap()->DumpSpaces(); JniAbortF(function_name_, "%s is an invalid %s: %p (%p)", @@ -677,7 +684,7 @@ class ScopedCheck { return; } - Array* a = soa_.Decode(java_array); + mirror::Array* a = soa_.Decode(java_array); if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) { Runtime::Current()->GetHeap()->DumpSpaces(); JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)", @@ -693,12 +700,12 @@ class ScopedCheck { } } - Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (fid == NULL) { JniAbortF(function_name_, "jfieldID was NULL"); return NULL; } - Field* f = soa_.DecodeField(fid); + mirror::Field* f = soa_.DecodeField(fid); if (!Runtime::Current()->GetHeap()->IsHeapAddress(f) || !f->IsField()) { Runtime::Current()->GetHeap()->DumpSpaces(); JniAbortF(function_name_, "invalid jfieldID: %p", fid); @@ -707,12 +714,12 @@ class ScopedCheck { return f; } - AbstractMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (mid == NULL) { JniAbortF(function_name_, "jmethodID was NULL"); return NULL; } - AbstractMethod* m = soa_.DecodeMethod(mid); + mirror::AbstractMethod* m = soa_.DecodeMethod(mid); if (!Runtime::Current()->GetHeap()->IsHeapAddress(m) || !m->IsMethod()) { Runtime::Current()->GetHeap()->DumpSpaces(); JniAbortF(function_name_, "invalid jmethodID: %p", mid); @@ -733,7 +740,7 @@ class ScopedCheck { return; } - Object* o = soa_.Decode(java_object); + mirror::Object* o = soa_.Decode(java_object); if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) { Runtime::Current()->GetHeap()->DumpSpaces(); // TODO: when we remove work_around_app_jni_bugs, this should be impossible. @@ -1084,7 +1091,7 @@ struct GuardedCopy { static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) { ScopedObjectAccess soa(env); - Array* a = soa.Decode(java_array); + mirror::Array* a = soa.Decode(java_array); size_t component_size = a->GetClass()->GetComponentSize(); size_t byte_count = a->GetLength() * component_size; void* result = GuardedCopy::Create(a->GetRawData(component_size), byte_count, true); @@ -1104,7 +1111,7 @@ static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf, } ScopedObjectAccess soa(env); - Array* a = soa.Decode(java_array); + mirror::Array* a = soa.Decode(java_array); GuardedCopy::Check(__FUNCTION__, dataBuf, true); @@ -1467,7 +1474,7 @@ FIELD_ACCESSORS(jdouble, Double, "D"); #define NON_VOID_RETURN(_retsig, _ctype) return CHECK_JNI_EXIT(_retsig, (_ctype) result) #define VOID_RETURN CHECK_JNI_EXIT_VOID() -CALL(jobject, Object, Object* result, result = reinterpret_cast, NON_VOID_RETURN("L", jobject), "L"); +CALL(jobject, Object, mirror::Object* result, result = reinterpret_cast, NON_VOID_RETURN("L", jobject), "L"); CALL(jboolean, Boolean, jboolean result, result =, NON_VOID_RETURN("Z", jboolean), "Z"); CALL(jbyte, Byte, jbyte result, result =, NON_VOID_RETURN("B", jbyte), "B"); CALL(jchar, Char, jchar result, result =, NON_VOID_RETURN("C", jchar), "C"); @@ -1492,7 +1499,7 @@ CALL(void, Void, , , VOID_RETURN, "V"); CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, java_string, isCopy); const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy); if (sc.ForceCopy() && result != NULL) { - String* s = sc.soa().Decode(java_string); + mirror::String* s = sc.soa().Decode(java_string); int byteCount = s->GetLength() * 2; result = (const jchar*) GuardedCopy::Create(result, byteCount, false); if (isCopy != NULL) { @@ -1719,7 +1726,7 @@ PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D'); CHECK_JNI_ENTRY(kFlag_CritGet, "Esp", env, java_string, isCopy); const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy); if (sc.ForceCopy() && result != NULL) { - String* s = sc.soa().Decode(java_string); + mirror::String* s = sc.soa().Decode(java_string); int byteCount = s->GetLength() * 2; result = (const jchar*) GuardedCopy::Create(result, byteCount, false); if (isCopy != NULL) { diff --git a/src/class_linker-inl.h b/src/class_linker-inl.h new file mode 100644 index 0000000000..6cf49912a2 --- /dev/null +++ b/src/class_linker-inl.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_CLASS_LINKER_INL_H_ +#define ART_SRC_CLASS_LINKER_INL_H_ + +#include "class_linker.h" + +#include "mirror/dex_cache.h" +#include "mirror/field.h" +#include "mirror/iftable.h" +#include "mirror/object_array.h" + +namespace art { + +inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, + const mirror::AbstractMethod* referrer) { + mirror::String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx); + if (UNLIKELY(resolved_string == NULL)) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + resolved_string = ResolveString(dex_file, string_idx, dex_cache); + } + return resolved_string; +} + +inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, + const mirror::AbstractMethod* referrer) { + mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(resolved_type == NULL)) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); + mirror::ClassLoader* class_loader = declaring_class->GetClassLoader(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); + } + return resolved_type; +} + +inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, const mirror::Field* referrer) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); + mirror::Class* resolved_type = dex_cache->GetResolvedType(type_idx); + if (UNLIKELY(resolved_type == NULL)) { + mirror::ClassLoader* class_loader = declaring_class->GetClassLoader(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); + } + return resolved_type; +} + +inline mirror::AbstractMethod* ClassLinker::ResolveMethod(uint32_t method_idx, + const mirror::AbstractMethod* referrer, + InvokeType type) { + mirror::AbstractMethod* resolved_method = + referrer->GetDexCacheResolvedMethods()->Get(method_idx); + if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); + mirror::ClassLoader* class_loader = declaring_class->GetClassLoader(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type); + } + return resolved_method; +} + +inline mirror::Field* ClassLinker::ResolveField(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + bool is_static) { + mirror::Field* resolved_field = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); + if (UNLIKELY(resolved_field == NULL)) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); + mirror::ClassLoader* class_loader = declaring_class->GetClassLoader(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static); + } + return resolved_field; +} + +template +inline mirror::ObjectArray* ClassLinker::AllocObjectArray(Thread* self, size_t length) { + return mirror::ObjectArray::Alloc(self, GetClassRoot(kObjectArrayClass), length); +} + +inline mirror::ObjectArray* ClassLinker::AllocClassArray(Thread* self, + size_t length) { + return mirror::ObjectArray::Alloc(self, GetClassRoot(kClassArrayClass), length); +} + +inline mirror::ObjectArray* ClassLinker::AllocStringArray(Thread* self, + size_t length) { + return mirror::ObjectArray::Alloc(self, GetClassRoot(kJavaLangStringArrayClass), + length); +} + +inline mirror::ObjectArray* ClassLinker::AllocAbstractMethodArray(Thread* self, + size_t length) { + return mirror::ObjectArray::Alloc(self, + GetClassRoot(kJavaLangReflectAbstractMethodArrayClass), length); +} + +inline mirror::ObjectArray* ClassLinker::AllocMethodArray(Thread* self, + size_t length) { + return mirror::ObjectArray::Alloc(self, + GetClassRoot(kJavaLangReflectMethodArrayClass), length); +} + +inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) { + return down_cast( + mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), ifcount * mirror::IfTable::kMax)); +} + +inline mirror::ObjectArray* ClassLinker::AllocFieldArray(Thread* self, + size_t length) { + return mirror::ObjectArray::Alloc(self, + GetClassRoot(kJavaLangReflectFieldArrayClass), + length); +} + +inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(class_roots_ != NULL); + mirror::Class* klass = class_roots_->Get(class_root); + DCHECK(klass != NULL); + return klass; +} + +} // namespace art + +#endif // ART_SRC_CLASS_LINKER_INL_H_ diff --git a/src/class_linker.cc b/src/class_linker.cc index c43c39778e..9aa4dda6f6 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -31,17 +31,28 @@ #include "base/logging.h" #include "base/stl_util.h" #include "base/unix_file/fd_file.h" -#include "class_loader.h" +#include "class_linker-inl.h" #include "debugger.h" -#include "dex_cache.h" #include "dex_file.h" +#include "gc/card_table-inl.h" #include "heap.h" #include "intern_table.h" #include "interpreter/interpreter.h" #include "leb128.h" #include "oat.h" #include "oat_file.h" -#include "object.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache.h" +#include "mirror/field-inl.h" +#include "mirror/iftable-inl.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" +#include "mirror/stack_trace_element.h" #include "object_utils.h" #include "os.h" #include "runtime.h" @@ -58,6 +69,7 @@ #include "thread.h" #include "UniquePtr.h" #include "utils.h" +#include "verifier/method_verifier.h" #include "well_known_classes.h" namespace art { @@ -92,7 +104,7 @@ static void ThrowLinkageError(const char* fmt, ...) { va_end(args); } -static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const StringPiece& type, +static void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, const StringPiece& type, const StringPiece& name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(c); @@ -116,7 +128,7 @@ static void ThrowNullPointerException(const char* fmt, ...) { va_end(args); } -static void ThrowEarlierClassFailure(Class* c) +static void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // The class failed to initialize on a previous attempt, so we want to throw // a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we @@ -239,45 +251,47 @@ void ClassLinker::InitFromCompiler(const std::vector& boot_class // java_lang_Class comes first, it's needed for AllocClass Thread* self = Thread::Current(); Heap* heap = Runtime::Current()->GetHeap(); - SirtRef - java_lang_Class(self, down_cast(heap->AllocObject(self, NULL, sizeof(ClassClass)))); + SirtRef + java_lang_Class(self, + down_cast(heap->AllocObject(self, NULL, + sizeof(mirror::ClassClass)))); CHECK(java_lang_Class.get() != NULL); - Class::SetClassClass(java_lang_Class.get()); + mirror::Class::SetClassClass(java_lang_Class.get()); java_lang_Class->SetClass(java_lang_Class.get()); - java_lang_Class->SetClassSize(sizeof(ClassClass)); - // AllocClass(Class*) can now be used + java_lang_Class->SetClassSize(sizeof(mirror::ClassClass)); + // AllocClass(mirror::Class*) can now be used // Class[] is used for reflection support. - SirtRef class_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef class_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); class_array_class->SetComponentType(java_lang_Class.get()); // java_lang_Object comes next so that object_array_class can be created. - SirtRef java_lang_Object(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef java_lang_Object(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); CHECK(java_lang_Object.get() != NULL); // backfill Object as the super class of Class. java_lang_Class->SetSuperClass(java_lang_Object.get()); - java_lang_Object->SetStatus(Class::kStatusLoaded); + java_lang_Object->SetStatus(mirror::Class::kStatusLoaded); // Object[] next to hold class roots. - SirtRef object_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef object_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); object_array_class->SetComponentType(java_lang_Object.get()); // Setup the char class to be used for char[]. - SirtRef char_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef char_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); // Setup the char[] class to be used for String. - SirtRef char_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef char_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); char_array_class->SetComponentType(char_class.get()); - CharArray::SetArrayClass(char_array_class.get()); + mirror::CharArray::SetArrayClass(char_array_class.get()); // Setup String. - SirtRef java_lang_String(self, AllocClass(self, java_lang_Class.get(), sizeof(StringClass))); - String::SetClass(java_lang_String.get()); - java_lang_String->SetObjectSize(sizeof(String)); - java_lang_String->SetStatus(Class::kStatusResolved); + SirtRef java_lang_String(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::StringClass))); + mirror::String::SetClass(java_lang_String.get()); + java_lang_String->SetObjectSize(sizeof(mirror::String)); + java_lang_String->SetStatus(mirror::Class::kStatusResolved); // Create storage for root classes, save away our work so far (requires descriptors). - class_roots_ = ObjectArray::Alloc(self, object_array_class.get(), kClassRootsMax); + class_roots_ = mirror::ObjectArray::Alloc(self, object_array_class.get(), kClassRootsMax); CHECK(class_roots_ != NULL); SetClassRoot(kJavaLangClass, java_lang_Class.get()); SetClassRoot(kJavaLangObject, java_lang_Object.get()); @@ -300,68 +314,69 @@ void ClassLinker::InitFromCompiler(const std::vector& boot_class array_iftable_ = AllocIfTable(self, 2); // Create int array type for AllocDexCache (done in AppendToBootClassPath). - SirtRef int_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef int_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); int_array_class->SetComponentType(GetClassRoot(kPrimitiveInt)); - IntArray::SetArrayClass(int_array_class.get()); + mirror::IntArray::SetArrayClass(int_array_class.get()); SetClassRoot(kIntArrayClass, int_array_class.get()); // now that these are registered, we can use AllocClass() and AllocObjectArray // Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache. - SirtRef - java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(DexCacheClass))); + SirtRef + java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::DexCacheClass))); SetClassRoot(kJavaLangDexCache, java_lang_DexCache.get()); - java_lang_DexCache->SetObjectSize(sizeof(DexCacheClass)); - java_lang_DexCache->SetStatus(Class::kStatusResolved); + java_lang_DexCache->SetObjectSize(sizeof(mirror::DexCacheClass)); + java_lang_DexCache->SetStatus(mirror::Class::kStatusResolved); // Constructor, Field, Method, and AbstractMethod are necessary so that FindClass can link members. - SirtRef java_lang_reflect_Field(self, AllocClass(self, java_lang_Class.get(), - sizeof(FieldClass))); + SirtRef java_lang_reflect_Field(self, AllocClass(self, java_lang_Class.get(), + sizeof(mirror::FieldClass))); CHECK(java_lang_reflect_Field.get() != NULL); - java_lang_reflect_Field->SetObjectSize(sizeof(Field)); + java_lang_reflect_Field->SetObjectSize(sizeof(mirror::Field)); SetClassRoot(kJavaLangReflectField, java_lang_reflect_Field.get()); - java_lang_reflect_Field->SetStatus(Class::kStatusResolved); - Field::SetClass(java_lang_reflect_Field.get()); + java_lang_reflect_Field->SetStatus(mirror::Class::kStatusResolved); + mirror::Field::SetClass(java_lang_reflect_Field.get()); - SirtRef java_lang_reflect_AbstractMethod(self, AllocClass(self, java_lang_Class.get(), - sizeof(MethodClass))); + SirtRef java_lang_reflect_AbstractMethod(self, AllocClass(self, java_lang_Class.get(), + sizeof(mirror::AbstractMethodClass))); CHECK(java_lang_reflect_AbstractMethod.get() != NULL); - java_lang_reflect_AbstractMethod->SetObjectSize(sizeof(AbstractMethod)); + java_lang_reflect_AbstractMethod->SetObjectSize(sizeof(mirror::AbstractMethod)); SetClassRoot(kJavaLangReflectAbstractMethod, java_lang_reflect_AbstractMethod.get()); - java_lang_reflect_AbstractMethod->SetStatus(Class::kStatusResolved); + java_lang_reflect_AbstractMethod->SetStatus(mirror::Class::kStatusResolved); - SirtRef java_lang_reflect_Constructor(self, AllocClass(self, java_lang_Class.get(), - sizeof(MethodClass))); + SirtRef java_lang_reflect_Constructor(self, AllocClass(self, java_lang_Class.get(), + sizeof(mirror::AbstractMethodClass))); CHECK(java_lang_reflect_Constructor.get() != NULL); - java_lang_reflect_Constructor->SetObjectSize(sizeof(Constructor)); + java_lang_reflect_Constructor->SetObjectSize(sizeof(mirror::Constructor)); java_lang_reflect_Constructor->SetSuperClass(java_lang_reflect_AbstractMethod.get()); SetClassRoot(kJavaLangReflectConstructor, java_lang_reflect_Constructor.get()); - java_lang_reflect_Constructor->SetStatus(Class::kStatusResolved); + java_lang_reflect_Constructor->SetStatus(mirror::Class::kStatusResolved); - SirtRef java_lang_reflect_Method(self, AllocClass(self, java_lang_Class.get(), - sizeof(MethodClass))); + SirtRef java_lang_reflect_Method(self, AllocClass(self, java_lang_Class.get(), + sizeof(mirror::AbstractMethodClass))); CHECK(java_lang_reflect_Method.get() != NULL); - java_lang_reflect_Method->SetObjectSize(sizeof(Method)); + java_lang_reflect_Method->SetObjectSize(sizeof(mirror::Method)); java_lang_reflect_Method->SetSuperClass(java_lang_reflect_AbstractMethod.get()); SetClassRoot(kJavaLangReflectMethod, java_lang_reflect_Method.get()); - java_lang_reflect_Method->SetStatus(Class::kStatusResolved); + java_lang_reflect_Method->SetStatus(mirror::Class::kStatusResolved); - AbstractMethod::SetClasses(java_lang_reflect_Constructor.get(), java_lang_reflect_Method.get()); + mirror::AbstractMethod::SetClasses(java_lang_reflect_Constructor.get(), + java_lang_reflect_Method.get()); // Set up array classes for string, field, method - SirtRef object_array_string(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef object_array_string(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); object_array_string->SetComponentType(java_lang_String.get()); SetClassRoot(kJavaLangStringArrayClass, object_array_string.get()); - SirtRef object_array_abstract_method(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef object_array_abstract_method(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); object_array_abstract_method->SetComponentType(java_lang_reflect_AbstractMethod.get()); SetClassRoot(kJavaLangReflectAbstractMethodArrayClass, object_array_abstract_method.get()); - SirtRef object_array_field(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef object_array_field(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); object_array_field->SetComponentType(java_lang_reflect_Field.get()); SetClassRoot(kJavaLangReflectFieldArrayClass, object_array_field.get()); - SirtRef object_array_method(self, AllocClass(self, java_lang_Class.get(), sizeof(Class))); + SirtRef object_array_method(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class))); object_array_method->SetComponentType(java_lang_reflect_Method.get()); SetClassRoot(kJavaLangReflectMethodArrayClass, object_array_method.get()); @@ -382,55 +397,55 @@ void ClassLinker::InitFromCompiler(const std::vector& boot_class SetClassRoot(kPrimitiveChar, char_class.get()); // needs descriptor // Object, String and DexCache need to be rerun through FindSystemClass to finish init - java_lang_Object->SetStatus(Class::kStatusNotReady); - Class* Object_class = FindSystemClass("Ljava/lang/Object;"); + java_lang_Object->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* Object_class = FindSystemClass("Ljava/lang/Object;"); CHECK_EQ(java_lang_Object.get(), Object_class); - CHECK_EQ(java_lang_Object->GetObjectSize(), sizeof(Object)); - java_lang_String->SetStatus(Class::kStatusNotReady); - Class* String_class = FindSystemClass("Ljava/lang/String;"); + CHECK_EQ(java_lang_Object->GetObjectSize(), sizeof(mirror::Object)); + java_lang_String->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* String_class = FindSystemClass("Ljava/lang/String;"); CHECK_EQ(java_lang_String.get(), String_class); - CHECK_EQ(java_lang_String->GetObjectSize(), sizeof(String)); - java_lang_DexCache->SetStatus(Class::kStatusNotReady); - Class* DexCache_class = FindSystemClass("Ljava/lang/DexCache;"); + CHECK_EQ(java_lang_String->GetObjectSize(), sizeof(mirror::String)); + java_lang_DexCache->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* DexCache_class = FindSystemClass("Ljava/lang/DexCache;"); CHECK_EQ(java_lang_String.get(), String_class); CHECK_EQ(java_lang_DexCache.get(), DexCache_class); - CHECK_EQ(java_lang_DexCache->GetObjectSize(), sizeof(DexCache)); + CHECK_EQ(java_lang_DexCache->GetObjectSize(), sizeof(mirror::DexCache)); // Setup the primitive array type classes - can't be done until Object has a vtable. SetClassRoot(kBooleanArrayClass, FindSystemClass("[Z")); - BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass)); + mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass)); SetClassRoot(kByteArrayClass, FindSystemClass("[B")); - ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass)); + mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass)); - Class* found_char_array_class = FindSystemClass("[C"); + mirror::Class* found_char_array_class = FindSystemClass("[C"); CHECK_EQ(char_array_class.get(), found_char_array_class); SetClassRoot(kShortArrayClass, FindSystemClass("[S")); - ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass)); + mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass)); - Class* found_int_array_class = FindSystemClass("[I"); + mirror::Class* found_int_array_class = FindSystemClass("[I"); CHECK_EQ(int_array_class.get(), found_int_array_class); SetClassRoot(kLongArrayClass, FindSystemClass("[J")); - LongArray::SetArrayClass(GetClassRoot(kLongArrayClass)); + mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass)); SetClassRoot(kFloatArrayClass, FindSystemClass("[F")); - FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass)); + mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass)); SetClassRoot(kDoubleArrayClass, FindSystemClass("[D")); - DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass)); + mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass)); - Class* found_class_array_class = FindSystemClass("[Ljava/lang/Class;"); + mirror::Class* found_class_array_class = FindSystemClass("[Ljava/lang/Class;"); CHECK_EQ(class_array_class.get(), found_class_array_class); - Class* found_object_array_class = FindSystemClass("[Ljava/lang/Object;"); + mirror::Class* found_object_array_class = FindSystemClass("[Ljava/lang/Object;"); CHECK_EQ(object_array_class.get(), found_object_array_class); // Setup the single, global copy of "iftable". - Class* java_lang_Cloneable = FindSystemClass("Ljava/lang/Cloneable;"); + mirror::Class* java_lang_Cloneable = FindSystemClass("Ljava/lang/Cloneable;"); CHECK(java_lang_Cloneable != NULL); - Class* java_io_Serializable = FindSystemClass("Ljava/io/Serializable;"); + mirror::Class* java_io_Serializable = FindSystemClass("Ljava/io/Serializable;"); CHECK(java_io_Serializable != NULL); // We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to // crawl up and explicitly list all of the supers as well. @@ -446,79 +461,79 @@ void ClassLinker::InitFromCompiler(const std::vector& boot_class CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1)); // Run Class, Constructor, Field, and Method through FindSystemClass. This initializes their // dex_cache_ fields and register them in classes_. - Class* Class_class = FindSystemClass("Ljava/lang/Class;"); + mirror::Class* Class_class = FindSystemClass("Ljava/lang/Class;"); CHECK_EQ(java_lang_Class.get(), Class_class); - java_lang_reflect_AbstractMethod->SetStatus(Class::kStatusNotReady); - Class* Abstract_method_class = FindSystemClass("Ljava/lang/reflect/AbstractMethod;"); + java_lang_reflect_AbstractMethod->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* Abstract_method_class = FindSystemClass("Ljava/lang/reflect/AbstractMethod;"); CHECK_EQ(java_lang_reflect_AbstractMethod.get(), Abstract_method_class); // Method extends AbstractMethod so must reset after. - java_lang_reflect_Method->SetStatus(Class::kStatusNotReady); - Class* Method_class = FindSystemClass("Ljava/lang/reflect/Method;"); + java_lang_reflect_Method->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* Method_class = FindSystemClass("Ljava/lang/reflect/Method;"); CHECK_EQ(java_lang_reflect_Method.get(), Method_class); // Constructor extends AbstractMethod so must reset after. - java_lang_reflect_Constructor->SetStatus(Class::kStatusNotReady); - Class* Constructor_class = FindSystemClass("Ljava/lang/reflect/Constructor;"); + java_lang_reflect_Constructor->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* Constructor_class = FindSystemClass("Ljava/lang/reflect/Constructor;"); CHECK_EQ(java_lang_reflect_Constructor.get(), Constructor_class); - java_lang_reflect_Field->SetStatus(Class::kStatusNotReady); - Class* Field_class = FindSystemClass("Ljava/lang/reflect/Field;"); + java_lang_reflect_Field->SetStatus(mirror::Class::kStatusNotReady); + mirror::Class* Field_class = FindSystemClass("Ljava/lang/reflect/Field;"); CHECK_EQ(java_lang_reflect_Field.get(), Field_class); - Class* String_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangStringArrayClass]); + mirror::Class* String_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangStringArrayClass]); CHECK_EQ(object_array_string.get(), String_array_class); - Class* Abstract_method_array_class = + mirror::Class* Abstract_method_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangReflectAbstractMethodArrayClass]); CHECK_EQ(object_array_abstract_method.get(), Abstract_method_array_class); - Class* Field_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangReflectFieldArrayClass]); + mirror::Class* Field_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangReflectFieldArrayClass]); CHECK_EQ(object_array_field.get(), Field_array_class); - Class* Method_array_class = + mirror::Class* Method_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangReflectMethodArrayClass]); CHECK_EQ(object_array_method.get(), Method_array_class); // End of special init trickery, subsequent classes may be loaded via FindSystemClass. // Create java.lang.reflect.Proxy root. - Class* java_lang_reflect_Proxy = FindSystemClass("Ljava/lang/reflect/Proxy;"); + mirror::Class* java_lang_reflect_Proxy = FindSystemClass("Ljava/lang/reflect/Proxy;"); SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy); // java.lang.ref classes need to be specially flagged, but otherwise are normal classes - Class* java_lang_ref_Reference = FindSystemClass("Ljava/lang/ref/Reference;"); + mirror::Class* java_lang_ref_Reference = FindSystemClass("Ljava/lang/ref/Reference;"); SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference); - Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;"); + mirror::Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;"); java_lang_ref_FinalizerReference->SetAccessFlags( java_lang_ref_FinalizerReference->GetAccessFlags() | kAccClassIsReference | kAccClassIsFinalizerReference); - Class* java_lang_ref_PhantomReference = FindSystemClass("Ljava/lang/ref/PhantomReference;"); + mirror::Class* java_lang_ref_PhantomReference = FindSystemClass("Ljava/lang/ref/PhantomReference;"); java_lang_ref_PhantomReference->SetAccessFlags( java_lang_ref_PhantomReference->GetAccessFlags() | kAccClassIsReference | kAccClassIsPhantomReference); - Class* java_lang_ref_SoftReference = FindSystemClass("Ljava/lang/ref/SoftReference;"); + mirror::Class* java_lang_ref_SoftReference = FindSystemClass("Ljava/lang/ref/SoftReference;"); java_lang_ref_SoftReference->SetAccessFlags( java_lang_ref_SoftReference->GetAccessFlags() | kAccClassIsReference); - Class* java_lang_ref_WeakReference = FindSystemClass("Ljava/lang/ref/WeakReference;"); + mirror::Class* java_lang_ref_WeakReference = FindSystemClass("Ljava/lang/ref/WeakReference;"); java_lang_ref_WeakReference->SetAccessFlags( java_lang_ref_WeakReference->GetAccessFlags() | kAccClassIsReference | kAccClassIsWeakReference); // Setup the ClassLoader, verifying the object_size_. - Class* java_lang_ClassLoader = FindSystemClass("Ljava/lang/ClassLoader;"); - CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), sizeof(ClassLoader)); + mirror::Class* java_lang_ClassLoader = FindSystemClass("Ljava/lang/ClassLoader;"); + CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), sizeof(mirror::ClassLoader)); SetClassRoot(kJavaLangClassLoader, java_lang_ClassLoader); // Set up java.lang.Throwable, java.lang.ClassNotFoundException, and // java.lang.StackTraceElement as a convenience. SetClassRoot(kJavaLangThrowable, FindSystemClass("Ljava/lang/Throwable;")); - Throwable::SetClass(GetClassRoot(kJavaLangThrowable)); + mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable)); SetClassRoot(kJavaLangClassNotFoundException, FindSystemClass("Ljava/lang/ClassNotFoundException;")); SetClassRoot(kJavaLangStackTraceElement, FindSystemClass("Ljava/lang/StackTraceElement;")); SetClassRoot(kJavaLangStackTraceElementArrayClass, FindSystemClass("[Ljava/lang/StackTraceElement;")); - StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); + mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); FinishInit(); @@ -532,37 +547,37 @@ void ClassLinker::FinishInit() { // Note: we hard code the field indexes here rather than using FindInstanceField // as the types of the field can't be resolved prior to the runtime being // fully initialized - Class* java_lang_ref_Reference = GetClassRoot(kJavaLangRefReference); - Class* java_lang_ref_ReferenceQueue = FindSystemClass("Ljava/lang/ref/ReferenceQueue;"); - Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;"); + mirror::Class* java_lang_ref_Reference = GetClassRoot(kJavaLangRefReference); + mirror::Class* java_lang_ref_ReferenceQueue = FindSystemClass("Ljava/lang/ref/ReferenceQueue;"); + mirror::Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;"); const DexFile& java_lang_dex = *java_lang_ref_Reference->GetDexCache()->GetDexFile(); - Field* pendingNext = java_lang_ref_Reference->GetInstanceField(0); + mirror::Field* pendingNext = java_lang_ref_Reference->GetInstanceField(0); FieldHelper fh(pendingNext, this); CHECK_STREQ(fh.GetName(), "pendingNext"); CHECK_EQ(java_lang_dex.GetFieldId(pendingNext->GetDexFieldIndex()).type_idx_, java_lang_ref_Reference->GetDexTypeIndex()); - Field* queue = java_lang_ref_Reference->GetInstanceField(1); + mirror::Field* queue = java_lang_ref_Reference->GetInstanceField(1); fh.ChangeField(queue); CHECK_STREQ(fh.GetName(), "queue"); CHECK_EQ(java_lang_dex.GetFieldId(queue->GetDexFieldIndex()).type_idx_, java_lang_ref_ReferenceQueue->GetDexTypeIndex()); - Field* queueNext = java_lang_ref_Reference->GetInstanceField(2); + mirror::Field* queueNext = java_lang_ref_Reference->GetInstanceField(2); fh.ChangeField(queueNext); CHECK_STREQ(fh.GetName(), "queueNext"); CHECK_EQ(java_lang_dex.GetFieldId(queueNext->GetDexFieldIndex()).type_idx_, java_lang_ref_Reference->GetDexTypeIndex()); - Field* referent = java_lang_ref_Reference->GetInstanceField(3); + mirror::Field* referent = java_lang_ref_Reference->GetInstanceField(3); fh.ChangeField(referent); CHECK_STREQ(fh.GetName(), "referent"); CHECK_EQ(java_lang_dex.GetFieldId(referent->GetDexFieldIndex()).type_idx_, GetClassRoot(kJavaLangObject)->GetDexTypeIndex()); - Field* zombie = java_lang_ref_FinalizerReference->GetInstanceField(2); + mirror::Field* zombie = java_lang_ref_FinalizerReference->GetInstanceField(2); fh.ChangeField(zombie); CHECK_STREQ(fh.GetName(), "zombie"); CHECK_EQ(java_lang_dex.GetFieldId(zombie->GetDexFieldIndex()).type_idx_, @@ -578,7 +593,7 @@ void ClassLinker::FinishInit() { // ensure all class_roots_ are initialized for (size_t i = 0; i < kClassRootsMax; i++) { ClassRoot class_root = static_cast(i); - Class* klass = GetClassRoot(class_root); + mirror::Class* klass = GetClassRoot(class_root); CHECK(klass != NULL); DCHECK(klass->IsArrayClass() || klass->IsPrimitive() || klass->GetDexCache() != NULL); // note SetClassRoot does additional validation. @@ -597,7 +612,7 @@ void ClassLinker::FinishInit() { void ClassLinker::RunRootClinits() { Thread* self = Thread::Current(); for (size_t i = 0; i < ClassLinker::kClassRootsMax; ++i) { - Class* c = GetClassRoot(ClassRoot(i)); + mirror::Class* c = GetClassRoot(ClassRoot(i)); if (!c->IsArrayClass() && !c->IsPrimitive()) { EnsureInitialized(GetClassRoot(ClassRoot(i)), true, true); self->AssertNoPendingException(); @@ -699,7 +714,8 @@ OatFile* ClassLinker::OpenOat(const ImageSpace* space) { const ImageHeader& image_header = space->GetImageHeader(); // Grab location but don't use Object::AsString as we haven't yet initialized the roots to // check the down cast - String* oat_location = down_cast(image_header.GetImageRoot(ImageHeader::kOatLocation)); + mirror::String* oat_location = + down_cast(image_header.GetImageRoot(ImageHeader::kOatLocation)); std::string oat_filename; oat_filename += runtime->GetHostPrefix(); oat_filename += oat_location->ToModifiedUtf8(); @@ -971,21 +987,22 @@ void ClassLinker::InitFromImage() { CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U); CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U); CHECK(oat_file->GetOatHeader().GetImageFileLocation().empty()); - Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); - ObjectArray* dex_caches = dex_caches_object->AsObjectArray(); + mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); + mirror::ObjectArray* dex_caches = + dex_caches_object->AsObjectArray(); - ObjectArray* class_roots = - space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray(); + mirror::ObjectArray* class_roots = + space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray(); // Special case of setting up the String class early so that we can test arbitrary objects // as being Strings or not - String::SetClass(class_roots->Get(kJavaLangString)); + mirror::String::SetClass(class_roots->Get(kJavaLangString)); CHECK_EQ(oat_file->GetOatHeader().GetDexFileCount(), static_cast(dex_caches->GetLength())); Thread* self = Thread::Current(); for (int i = 0; i < dex_caches->GetLength(); i++) { - SirtRef dex_cache(self, dex_caches->Get(i)); + SirtRef dex_cache(self, dex_caches->Get(i)); const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8()); const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location); CHECK(oat_dex_file != NULL) << oat_file->GetLocation() << " " << dex_file_location; @@ -1008,33 +1025,33 @@ void ClassLinker::InitFromImage() { } // reinit class_roots_ - Class::SetClassClass(class_roots->Get(kJavaLangClass)); + mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass)); class_roots_ = class_roots; // reinit array_iftable_ from any array class instance, they should be == array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable(); DCHECK(array_iftable_ == GetClassRoot(kBooleanArrayClass)->GetIfTable()); // String class root was set above - Field::SetClass(GetClassRoot(kJavaLangReflectField)); - AbstractMethod::SetClasses(GetClassRoot(kJavaLangReflectConstructor), + mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField)); + mirror::AbstractMethod::SetClasses(GetClassRoot(kJavaLangReflectConstructor), GetClassRoot(kJavaLangReflectMethod)); - BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass)); - ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass)); - CharArray::SetArrayClass(GetClassRoot(kCharArrayClass)); - DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass)); - FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass)); - IntArray::SetArrayClass(GetClassRoot(kIntArrayClass)); - LongArray::SetArrayClass(GetClassRoot(kLongArrayClass)); - ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass)); - Throwable::SetClass(GetClassRoot(kJavaLangThrowable)); - StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); + mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass)); + mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass)); + mirror::CharArray::SetArrayClass(GetClassRoot(kCharArrayClass)); + mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass)); + mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass)); + mirror::IntArray::SetArrayClass(GetClassRoot(kIntArrayClass)); + mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass)); + mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass)); + mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable)); + mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); FinishInit(); VLOG(startup) << "ClassLinker::InitFromImage exiting"; } -void ClassLinker::InitFromImageCallback(Object* obj, void* arg) { +void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) { DCHECK(obj != NULL); DCHECK(arg != NULL); ClassLinker* class_linker = reinterpret_cast(arg); @@ -1045,9 +1062,9 @@ void ClassLinker::InitFromImageCallback(Object* obj, void* arg) { } if (obj->IsClass()) { // restore class to ClassLinker::classes_ table - Class* klass = obj->AsClass(); + mirror::Class* klass = obj->AsClass(); ClassHelper kh(klass, class_linker); - Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true); + mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true); DCHECK(existing == NULL) << kh.GetDescriptor(); return; } @@ -1056,7 +1073,7 @@ void ClassLinker::InitFromImageCallback(Object* obj, void* arg) { // Keep in sync with InitCallback. Anything we visit, we need to // reinit references to when reinitializing a ClassLinker from a // mapped image. -void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg) { visitor(class_roots_, arg); Thread* self = Thread::Current(); { @@ -1096,16 +1113,16 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const { } } -static bool GetClassesVisitor(Class* c, void* arg) { - std::set* classes = reinterpret_cast*>(arg); +static bool GetClassesVisitor(mirror::Class* c, void* arg) { + std::set* classes = reinterpret_cast*>(arg); classes->insert(c); return true; } void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const { - std::set classes; + std::set classes; VisitClasses(GetClassesVisitor, &classes); - typedef std::set::const_iterator It; // TODO: C++0x auto + typedef std::set::const_iterator It; // TODO: C++0x auto for (It it = classes.begin(), end = classes.end(); it != end; ++it) { if (!visitor(*it, arg)) { return; @@ -1115,57 +1132,61 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* ar ClassLinker::~ClassLinker() { - Class::ResetClass(); - String::ResetClass(); - Field::ResetClass(); - AbstractMethod::ResetClasses(); - BooleanArray::ResetArrayClass(); - ByteArray::ResetArrayClass(); - CharArray::ResetArrayClass(); - DoubleArray::ResetArrayClass(); - FloatArray::ResetArrayClass(); - IntArray::ResetArrayClass(); - LongArray::ResetArrayClass(); - ShortArray::ResetArrayClass(); - Throwable::ResetClass(); - StackTraceElement::ResetClass(); + mirror::Class::ResetClass(); + mirror::String::ResetClass(); + mirror::Field::ResetClass(); + mirror::AbstractMethod::ResetClasses(); + mirror::BooleanArray::ResetArrayClass(); + mirror::ByteArray::ResetArrayClass(); + mirror::CharArray::ResetArrayClass(); + mirror::DoubleArray::ResetArrayClass(); + mirror::FloatArray::ResetArrayClass(); + mirror::IntArray::ResetArrayClass(); + mirror::LongArray::ResetArrayClass(); + mirror::ShortArray::ResetArrayClass(); + mirror::Throwable::ResetClass(); + mirror::StackTraceElement::ResetClass(); STLDeleteElements(&boot_class_path_); STLDeleteElements(&oat_files_); } -DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) { +mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) { Heap* heap = Runtime::Current()->GetHeap(); - Class* dex_cache_class = GetClassRoot(kJavaLangDexCache); - SirtRef dex_cache(self, - down_cast(heap->AllocObject(self, dex_cache_class, - dex_cache_class->GetObjectSize()))); + mirror::Class* dex_cache_class = GetClassRoot(kJavaLangDexCache); + SirtRef dex_cache(self, + down_cast(heap->AllocObject(self, dex_cache_class, + dex_cache_class->GetObjectSize()))); if (dex_cache.get() == NULL) { return NULL; } - SirtRef location(self, intern_table_->InternStrong(dex_file.GetLocation().c_str())); + SirtRef + location(self, intern_table_->InternStrong(dex_file.GetLocation().c_str())); if (location.get() == NULL) { return NULL; } - SirtRef > strings(self, AllocStringArray(self, dex_file.NumStringIds())); + SirtRef > + strings(self, AllocStringArray(self, dex_file.NumStringIds())); if (strings.get() == NULL) { return NULL; } - SirtRef > types(self, AllocClassArray(self, dex_file.NumTypeIds())); + SirtRef > + types(self, AllocClassArray(self, dex_file.NumTypeIds())); if (types.get() == NULL) { return NULL; } - SirtRef > + SirtRef > methods(self, AllocAbstractMethodArray(self, dex_file.NumMethodIds())); if (methods.get() == NULL) { return NULL; } - SirtRef > fields(self, AllocFieldArray(self, dex_file.NumFieldIds())); + SirtRef > + fields(self, AllocFieldArray(self, dex_file.NumFieldIds())); if (fields.get() == NULL) { return NULL; } - SirtRef > + SirtRef > initialized_static_storage(self, - AllocObjectArray(self, dex_file.NumTypeIds())); + AllocObjectArray(self, dex_file.NumTypeIds())); if (initialized_static_storage.get() == NULL) { return NULL; } @@ -1180,40 +1201,41 @@ DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) { return dex_cache.get(); } -Class* ClassLinker::AllocClass(Thread* self, Class* java_lang_Class, size_t class_size) { - DCHECK_GE(class_size, sizeof(Class)); +mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class, + size_t class_size) { + DCHECK_GE(class_size, sizeof(mirror::Class)); Heap* heap = Runtime::Current()->GetHeap(); - SirtRef klass(self, + SirtRef klass(self, heap->AllocObject(self, java_lang_Class, class_size)->AsClass()); klass->SetPrimitiveType(Primitive::kPrimNot); // default to not being primitive klass->SetClassSize(class_size); return klass.get(); } -Class* ClassLinker::AllocClass(Thread* self, size_t class_size) { +mirror::Class* ClassLinker::AllocClass(Thread* self, size_t class_size) { return AllocClass(self, GetClassRoot(kJavaLangClass), class_size); } -Field* ClassLinker::AllocField(Thread* self) { - return down_cast(GetClassRoot(kJavaLangReflectField)->AllocObject(self)); +mirror::Field* ClassLinker::AllocField(Thread* self) { + return down_cast(GetClassRoot(kJavaLangReflectField)->AllocObject(self)); } -Method* ClassLinker::AllocMethod(Thread* self) { - return down_cast(GetClassRoot(kJavaLangReflectMethod)->AllocObject(self)); +mirror::Method* ClassLinker::AllocMethod(Thread* self) { + return down_cast(GetClassRoot(kJavaLangReflectMethod)->AllocObject(self)); } -Constructor* ClassLinker::AllocConstructor(Thread* self) { - return down_cast(GetClassRoot(kJavaLangReflectConstructor)->AllocObject(self)); +mirror::Constructor* ClassLinker::AllocConstructor(Thread* self) { + return down_cast(GetClassRoot(kJavaLangReflectConstructor)->AllocObject(self)); } -ObjectArray* ClassLinker::AllocStackTraceElementArray(Thread* self, - size_t length) { - return ObjectArray::Alloc(self, - GetClassRoot(kJavaLangStackTraceElementArrayClass), - length); +mirror::ObjectArray* ClassLinker::AllocStackTraceElementArray(Thread* self, + size_t length) { + return mirror::ObjectArray::Alloc(self, + GetClassRoot(kJavaLangStackTraceElementArrayClass), + length); } -static Class* EnsureResolved(Thread* self, Class* klass) +static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass != NULL); // Wait for the class if it has not already been linked. @@ -1223,7 +1245,7 @@ static Class* EnsureResolved(Thread* self, Class* klass) if (!klass->IsResolved() && klass->GetClinitThreadId() == self->GetTid()) { self->ThrowNewException("Ljava/lang/ClassCircularityError;", PrettyDescriptor(klass).c_str()); - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); return NULL; } // Wait for the pending initialization to complete. @@ -1243,11 +1265,11 @@ static Class* EnsureResolved(Thread* self, Class* klass) return klass; } -Class* ClassLinker::FindSystemClass(const char* descriptor) { +mirror::Class* ClassLinker::FindSystemClass(const char* descriptor) { return FindClass(descriptor, NULL); } -Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) { +mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoader* class_loader) { DCHECK_NE(*descriptor, '\0') << "descriptor is empty string"; Thread* self = Thread::Current(); DCHECK(self != NULL); @@ -1258,7 +1280,7 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) return FindPrimitiveClass(descriptor[0]); } // Find the class in the loaded classes table. - Class* klass = LookupClass(descriptor, class_loader); + mirror::Class* klass = LookupClass(descriptor, class_loader); if (klass != NULL) { return EnsureResolved(self, klass); } @@ -1274,7 +1296,7 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) } else if (Runtime::Current()->UseCompileTimeClassPath()) { // first try the boot class path - Class* system_class = FindSystemClass(descriptor); + mirror::Class* system_class = FindSystemClass(descriptor); if (system_class != NULL) { return system_class; } @@ -1321,8 +1343,8 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) class_name_string.c_str()); return NULL; } else { - // success, return Class* - return soa.Decode(result.get()); + // success, return mirror::Class* + return soa.Decode(result.get()); } } @@ -1330,12 +1352,12 @@ Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) return NULL; } -Class* ClassLinker::DefineClass(const StringPiece& descriptor, - ClassLoader* class_loader, - const DexFile& dex_file, - const DexFile::ClassDef& dex_class_def) { +mirror::Class* ClassLinker::DefineClass(const StringPiece& descriptor, + mirror::ClassLoader* class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def) { Thread* self = Thread::Current(); - SirtRef klass(self, NULL); + SirtRef klass(self, NULL); // Load the class from the dex file. if (!init_done_) { // finish up init of hand crafted class_roots_ @@ -1365,13 +1387,13 @@ Class* ClassLinker::DefineClass(const StringPiece& descriptor, LoadClass(dex_file, dex_class_def, klass, class_loader); // Check for a pending exception during load if (self->IsExceptionPending()) { - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); return NULL; } ObjectLock lock(self, klass.get()); klass->SetClinitThreadId(self->GetTid()); // Add the newly loaded class to the loaded classes table. - SirtRef existing(self, InsertClass(descriptor, klass.get(), false)); + SirtRef existing(self, InsertClass(descriptor, klass.get(), false)); if (existing.get() != NULL) { // We failed to insert because we raced with another thread. return EnsureResolved(self, existing.get()); @@ -1380,7 +1402,7 @@ Class* ClassLinker::DefineClass(const StringPiece& descriptor, CHECK(!klass->IsLoaded()); if (!LoadSuperAndInterfaces(klass, dex_file)) { // Loading failed. - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); lock.NotifyAll(); return NULL; } @@ -1389,7 +1411,7 @@ Class* ClassLinker::DefineClass(const StringPiece& descriptor, CHECK(!klass->IsResolved()); if (!LinkClass(klass, NULL)) { // Linking failed. - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); lock.NotifyAll(); return NULL; } @@ -1433,7 +1455,7 @@ size_t ClassLinker::SizeOfClass(const DexFile& dex_file, } } // start with generic class data - size_t size = sizeof(Class); + size_t size = sizeof(mirror::Class); // follow with reference fields which must be contiguous at start size += (num_ref * sizeof(uint32_t)); // if there are 64-bit fields to add, make sure they are aligned @@ -1502,10 +1524,10 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint32 return 0; } -const OatFile::OatMethod ClassLinker::GetOatMethodFor(const AbstractMethod* method) { +const OatFile::OatMethod ClassLinker::GetOatMethodFor(const mirror::AbstractMethod* method) { // Although we overwrite the trampoline of non-static methods, we may get here via the resolution // method for direct methods (or virtual methods made direct). - Class* declaring_class = method->GetDeclaringClass(); + mirror::Class* declaring_class = method->GetDeclaringClass(); size_t oat_method_index; if (method->IsStatic() || method->IsDirect()) { // Simple case where the oat method index was stashed at load time. @@ -1536,7 +1558,7 @@ const OatFile::OatMethod ClassLinker::GetOatMethodFor(const AbstractMethod* meth } // Special case to get oat code without overwriting a trampoline. -const void* ClassLinker::GetOatCodeFor(const AbstractMethod* method) { +const void* ClassLinker::GetOatCodeFor(const mirror::AbstractMethod* method) { CHECK(Runtime::Current()->IsCompiler() || method->GetDeclaringClass()->IsInitializing()); return GetOatMethodFor(method).GetCode(); } @@ -1550,7 +1572,7 @@ const void* ClassLinker::GetOatCodeFor(const DexFile& dex_file, uint32_t method_ return oat_class->GetOatMethod(oat_method_idx).GetCode(); } -void ClassLinker::FixupStaticTrampolines(Class* klass) { +void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { ClassHelper kh(klass); const DexFile::ClassDef* dex_class_def = kh.GetClassDef(); CHECK(dex_class_def != NULL); @@ -1577,7 +1599,7 @@ void ClassLinker::FixupStaticTrampolines(Class* klass) { // Link the code of methods skipped by LinkCode const void* trampoline = Runtime::Current()->GetResolutionStubArray(Runtime::kStaticMethod)->GetData(); for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { - AbstractMethod* method = klass->GetDirectMethod(i); + mirror::AbstractMethod* method = klass->GetDirectMethod(i); if (Runtime::Current()->IsMethodTracingActive()) { Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); if (instrumentation->GetSavedCodeFromMap(method) == trampoline) { @@ -1596,7 +1618,7 @@ void ClassLinker::FixupStaticTrampolines(Class* klass) { } } -static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class, +static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class, uint32_t method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Every kind of method should at least get an invoke stub from the oat_method. @@ -1627,11 +1649,11 @@ static void LinkCode(SirtRef& method, const OatFile::OatClass* o void ClassLinker::LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - SirtRef& klass, - ClassLoader* class_loader) { + SirtRef& klass, + mirror::ClassLoader* class_loader) { CHECK(klass.get() != NULL); CHECK(klass->GetDexCache() != NULL); - CHECK_EQ(Class::kStatusNotReady, klass->GetStatus()); + CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus()); const char* descriptor = dex_file.GetClassDescriptor(dex_class_def); CHECK(descriptor != NULL); @@ -1643,7 +1665,7 @@ void ClassLinker::LoadClass(const DexFile& dex_file, klass->SetAccessFlags(access_flags); klass->SetClassLoader(class_loader); DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot); - klass->SetStatus(Class::kStatusIdx); + klass->SetStatus(mirror::Class::kStatusIdx); klass->SetDexTypeIndex(dex_class_def.class_idx_); @@ -1661,12 +1683,12 @@ void ClassLinker::LoadClass(const DexFile& dex_file, klass->SetIFields(AllocFieldArray(self, it.NumInstanceFields())); } for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) { - SirtRef sfield(self, AllocField(self)); + SirtRef sfield(self, AllocField(self)); klass->SetStaticField(i, sfield.get()); LoadField(dex_file, it, klass, sfield); } for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) { - SirtRef ifield(self, AllocField(self)); + SirtRef ifield(self, AllocField(self)); klass->SetInstanceField(i, ifield.get()); LoadField(dex_file, it, klass, ifield); } @@ -1687,7 +1709,7 @@ void ClassLinker::LoadClass(const DexFile& dex_file, } size_t class_def_method_index = 0; for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { - SirtRef method(self, LoadMethod(self, dex_file, it, klass)); + SirtRef method(self, LoadMethod(self, dex_file, it, klass)); klass->SetDirectMethod(i, method.get()); if (oat_class.get() != NULL) { LinkCode(method, oat_class.get(), class_def_method_index); @@ -1696,7 +1718,7 @@ void ClassLinker::LoadClass(const DexFile& dex_file, class_def_method_index++; } for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) { - SirtRef method(self, LoadMethod(self, dex_file, it, klass)); + SirtRef method(self, LoadMethod(self, dex_file, it, klass)); klass->SetVirtualMethod(i, method.get()); DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i); if (oat_class.get() != NULL) { @@ -1708,21 +1730,21 @@ void ClassLinker::LoadClass(const DexFile& dex_file, } void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it, - SirtRef& klass, SirtRef& dst) { + SirtRef& klass, SirtRef& dst) { uint32_t field_idx = it.GetMemberIndex(); dst->SetDexFieldIndex(field_idx); dst->SetDeclaringClass(klass.get()); dst->SetAccessFlags(it.GetMemberAccessFlags()); } -AbstractMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, - const ClassDataItemIterator& it, - SirtRef& klass) { +mirror::AbstractMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, + const ClassDataItemIterator& it, + SirtRef& klass) { uint32_t dex_method_idx = it.GetMemberIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); StringPiece method_name(dex_file.GetMethodName(method_id)); - AbstractMethod* dst = NULL; + mirror::AbstractMethod* dst = NULL; if (method_name == "") { dst = AllocConstructor(self); } else { @@ -1780,11 +1802,11 @@ AbstractMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) { Thread* self = Thread::Current(); - SirtRef dex_cache(self, AllocDexCache(self, dex_file)); + SirtRef dex_cache(self, AllocDexCache(self, dex_file)); AppendToBootClassPath(dex_file, dex_cache); } -void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache) { +void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache) { CHECK(dex_cache.get() != NULL) << dex_file.GetLocation(); boot_class_path_.push_back(&dex_file); RegisterDexFile(dex_file, dex_cache); @@ -1805,7 +1827,7 @@ bool ClassLinker::IsDexFileRegistered(const DexFile& dex_file) const { return IsDexFileRegisteredLocked(dex_file); } -void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) { +void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) { dex_lock_.AssertHeld(Thread::Current()); CHECK(dex_cache.get() != NULL) << dex_file.GetLocation(); CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation())); @@ -1825,7 +1847,7 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) { // Don't alloc while holding the lock, since allocation may need to // suspend all threads and another thread may need the dex_lock_ to // get to a suspend point. - SirtRef dex_cache(self, AllocDexCache(self, dex_file)); + SirtRef dex_cache(self, AllocDexCache(self, dex_file)); { MutexLock mu(self, dex_lock_); if (IsDexFileRegisteredLocked(dex_file)) { @@ -1835,16 +1857,16 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) { } } -void ClassLinker::RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache) { +void ClassLinker::RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache) { MutexLock mu(Thread::Current(), dex_lock_); RegisterDexFileLocked(dex_file, dex_cache); } -DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const { +mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const { MutexLock mu(Thread::Current(), dex_lock_); // Search assuming unique-ness of dex file. for (size_t i = 0; i != dex_caches_.size(); ++i) { - DexCache* dex_cache = dex_caches_[i]; + mirror::DexCache* dex_cache = dex_caches_[i]; if (dex_cache->GetDexFile() == &dex_file) { return dex_cache; } @@ -1852,35 +1874,39 @@ DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const { // Search matching by location name. std::string location(dex_file.GetLocation()); for (size_t i = 0; i != dex_caches_.size(); ++i) { - DexCache* dex_cache = dex_caches_[i]; + mirror::DexCache* dex_cache = dex_caches_[i]; if (dex_cache->GetDexFile()->GetLocation() == location) { return dex_cache; } } // Failure, dump diagnostic and abort. for (size_t i = 0; i != dex_caches_.size(); ++i) { - DexCache* dex_cache = dex_caches_[i]; + mirror::DexCache* dex_cache = dex_caches_[i]; LOG(ERROR) << "Registered dex file " << i << " = " << dex_cache->GetDexFile()->GetLocation(); } LOG(FATAL) << "Failed to find DexCache for DexFile " << location; return NULL; } -void ClassLinker::FixupDexCaches(AbstractMethod* resolution_method) const { +void ClassLinker::FixupDexCaches(mirror::AbstractMethod* resolution_method) const { MutexLock mu(Thread::Current(), dex_lock_); for (size_t i = 0; i != dex_caches_.size(); ++i) { dex_caches_[i]->Fixup(resolution_method); } } -Class* ClassLinker::InitializePrimitiveClass(Class* primitive_class, Primitive::Type type) { +mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) { + return InitializePrimitiveClass(AllocClass(self, sizeof(mirror::Class)), type); +} + +mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type) { CHECK(primitive_class != NULL); // Must hold lock on object when initializing. ObjectLock lock(Thread::Current(), primitive_class); primitive_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract); primitive_class->SetPrimitiveType(type); - primitive_class->SetStatus(Class::kStatusInitialized); - Class* existing = InsertClass(Primitive::Descriptor(type), primitive_class, false); + primitive_class->SetStatus(mirror::Class::kStatusInitialized); + mirror::Class* existing = InsertClass(Primitive::Descriptor(type), primitive_class, false); CHECK(existing == NULL) << "InitPrimitiveClass(" << type << ") failed"; return primitive_class; } @@ -1898,11 +1924,12 @@ Class* ClassLinker::InitializePrimitiveClass(Class* primitive_class, Primitive:: // array class; that always comes from the base element class. // // Returns NULL with an exception raised on failure. -Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) { +mirror::Class* ClassLinker::CreateArrayClass(const std::string& descriptor, + mirror::ClassLoader* class_loader) { CHECK_EQ('[', descriptor[0]); // Identify the underlying component type - Class* component_type = FindClass(descriptor.substr(1).c_str(), class_loader); + mirror::Class* component_type = FindClass(descriptor.substr(1).c_str(), class_loader); if (component_type == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return NULL; @@ -1926,7 +1953,7 @@ Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* // class to the hash table --- necessary because of possible races with // other threads.) if (class_loader != component_type->GetClassLoader()) { - Class* new_class = LookupClass(descriptor.c_str(), component_type->GetClassLoader()); + mirror::Class* new_class = LookupClass(descriptor.c_str(), component_type->GetClassLoader()); if (new_class != NULL) { return new_class; } @@ -1941,7 +1968,7 @@ Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* // Array classes are simple enough that we don't need to do a full // link step. Thread* self = Thread::Current(); - SirtRef new_class(self, NULL); + SirtRef new_class(self, NULL); if (!init_done_) { // Classes that were hand created, ie not by FindSystemClass if (descriptor == "[Ljava/lang/Class;") { @@ -1963,7 +1990,7 @@ Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* } } if (new_class.get() == NULL) { - new_class.reset(AllocClass(self, sizeof(Class))); + new_class.reset(AllocClass(self, sizeof(mirror::Class))); if (new_class.get() == NULL) { return NULL; } @@ -1971,12 +1998,12 @@ Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* } ObjectLock lock(self, new_class.get()); // Must hold lock on object when initializing. DCHECK(new_class->GetComponentType() != NULL); - Class* java_lang_Object = GetClassRoot(kJavaLangObject); + mirror::Class* java_lang_Object = GetClassRoot(kJavaLangObject); new_class->SetSuperClass(java_lang_Object); new_class->SetVTable(java_lang_Object->GetVTable()); new_class->SetPrimitiveType(Primitive::kPrimNot); new_class->SetClassLoader(component_type->GetClassLoader()); - new_class->SetStatus(Class::kStatusInitialized); + new_class->SetStatus(mirror::Class::kStatusInitialized); // don't need to set new_class->SetObjectSize(..) // because Object::SizeOf delegates to Array::SizeOf @@ -2006,7 +2033,7 @@ Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* new_class->SetAccessFlags(((new_class->GetComponentType()->GetAccessFlags() & ~kAccInterface) | kAccFinal) & kAccJavaFlagsMask); - Class* existing = InsertClass(descriptor, new_class.get(), false); + mirror::Class* existing = InsertClass(descriptor, new_class.get(), false); if (existing == NULL) { return new_class.get(); } @@ -2019,7 +2046,7 @@ Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* return existing; } -Class* ClassLinker::FindPrimitiveClass(char type) { +mirror::Class* ClassLinker::FindPrimitiveClass(char type) { switch (Primitive::GetType(type)) { case Primitive::kPrimByte: return GetClassRoot(kPrimitiveByte); @@ -2047,9 +2074,9 @@ Class* ClassLinker::FindPrimitiveClass(char type) { return NULL; } -Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, bool image_class) { +mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class) { if (VLOG_IS_ON(class_linker)) { - DexCache* dex_cache = klass->GetDexCache(); + mirror::DexCache* dex_cache = klass->GetDexCache(); std::string source; if (dex_cache != NULL) { source += " from "; @@ -2060,7 +2087,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo size_t hash = StringPieceHash()(descriptor); MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); Table& classes = image_class ? image_classes_ : classes_; - Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes); + mirror::Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes); #ifndef NDEBUG // Check we don't have the class in the other table in error Table& other_classes = image_class ? classes_ : image_classes_; @@ -2074,22 +2101,24 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo return NULL; } -bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_loader) { +bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) { size_t hash = Hash(descriptor); MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); typedef Table::iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh; - for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) { - Class* klass = it->second; + for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; + ++it) { + mirror::Class* klass = it->second; kh.ChangeClass(klass); if (strcmp(kh.GetDescriptor(), descriptor) == 0 && klass->GetClassLoader() == class_loader) { classes_.erase(it); return true; } } - for (It it = image_classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) { - Class* klass = it->second; + for (It it = image_classes_.lower_bound(hash), end = classes_.end(); + it != end && it->first == hash; ++it) { + mirror::Class* klass = it->second; kh.ChangeClass(klass); if (strcmp(kh.GetDescriptor(), descriptor) == 0 && klass->GetClassLoader() == class_loader) { image_classes_.erase(it); @@ -2099,28 +2128,30 @@ bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_l return false; } -Class* ClassLinker::LookupClass(const char* descriptor, const ClassLoader* class_loader) { +mirror::Class* ClassLinker::LookupClass(const char* descriptor, + const mirror::ClassLoader* class_loader) { size_t hash = Hash(descriptor); MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); // TODO: determine if its better to search classes_ or image_classes_ first - Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_); + mirror::Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_); if (klass != NULL) { return klass; } return LookupClassLocked(descriptor, class_loader, hash, image_classes_); } -Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* class_loader, - size_t hash, const Table& classes) { +mirror::Class* ClassLinker::LookupClassLocked(const char* descriptor, + const mirror::ClassLoader* class_loader, + size_t hash, const Table& classes) { ClassHelper kh(NULL, this); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) { - Class* klass = it->second; + mirror::Class* klass = it->second; kh.ChangeClass(klass); if (strcmp(descriptor, kh.GetDescriptor()) == 0 && klass->GetClassLoader() == class_loader) { #ifndef NDEBUG for (++it; it != end && it->first == hash; ++it) { - Class* klass2 = it->second; + mirror::Class* klass2 = it->second; kh.ChangeClass(klass2); CHECK(!(strcmp(descriptor, kh.GetDescriptor()) == 0 && klass2->GetClassLoader() == class_loader)) << PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " " @@ -2133,7 +2164,7 @@ Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* return NULL; } -void ClassLinker::LookupClasses(const char* descriptor, std::vector& classes) { +void ClassLinker::LookupClasses(const char* descriptor, std::vector& classes) { classes.clear(); size_t hash = Hash(descriptor); MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); @@ -2141,14 +2172,14 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector& cla // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh(NULL, this); for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) { - Class* klass = it->second; + mirror::Class* klass = it->second; kh.ChangeClass(klass); if (strcmp(descriptor, kh.GetDescriptor()) == 0) { classes.push_back(klass); } } for (It it = image_classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) { - Class* klass = it->second; + mirror::Class* klass = it->second; kh.ChangeClass(klass); if (strcmp(descriptor, kh.GetDescriptor()) == 0) { classes.push_back(klass); @@ -2156,7 +2187,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector& cla } } -void ClassLinker::VerifyClass(Class* klass) { +void ClassLinker::VerifyClass(mirror::Class* klass) { // TODO: assert that the monitor on the Class is held Thread* self = Thread::Current(); ObjectLock lock(self, klass); @@ -2174,16 +2205,17 @@ void ClassLinker::VerifyClass(Class* klass) { return; } - if (klass->GetStatus() == Class::kStatusResolved) { - klass->SetStatus(Class::kStatusVerifying); + if (klass->GetStatus() == mirror::Class::kStatusResolved) { + klass->SetStatus(mirror::Class::kStatusVerifying); } else { - CHECK_EQ(klass->GetStatus(), Class::kStatusRetryVerificationAtRuntime) << PrettyClass(klass); + CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime) + << PrettyClass(klass); CHECK(!Runtime::Current()->IsCompiler()); - klass->SetStatus(Class::kStatusVerifyingAtRuntime); + klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime); } // Verify super class. - Class* super = klass->GetSuperClass(); + mirror::Class* super = klass->GetSuperClass(); std::string error_msg; if (super != NULL) { // Acquire lock to prevent races on verifying the super class. @@ -2198,7 +2230,7 @@ void ClassLinker::VerifyClass(Class* klass) { error_msg += " that attempts to sub-class erroneous class "; error_msg += PrettyDescriptor(super); LOG(ERROR) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8(); - SirtRef cause(self, self->GetException()); + SirtRef cause(self, self->GetException()); if (cause.get() != NULL) { self->ClearException(); } @@ -2206,24 +2238,24 @@ void ClassLinker::VerifyClass(Class* klass) { if (cause.get() != NULL) { self->GetException()->SetCause(cause.get()); } - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); return; } } // Try to use verification information from the oat file, otherwise do runtime verification. const DexFile& dex_file = *klass->GetDexCache()->GetDexFile(); - Class::Status oat_file_class_status(Class::kStatusNotReady); + mirror::Class::Status oat_file_class_status(mirror::Class::kStatusNotReady); bool preverified = VerifyClassUsingOatFile(dex_file, klass, oat_file_class_status); verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure; - if (oat_file_class_status == Class::kStatusError) { + if (oat_file_class_status == mirror::Class::kStatusError) { LOG(WARNING) << "Skipping runtime verification of erroneous class " << PrettyDescriptor(klass) << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8(); error_msg = "Rejecting class "; error_msg += PrettyDescriptor(klass); error_msg += " because it failed compile-time verification"; Thread::Current()->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str()); - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); return; } if (!preverified) { @@ -2239,16 +2271,16 @@ void ClassLinker::VerifyClass(Class* klass) { // Make sure all classes referenced by catch blocks are resolved. ResolveClassExceptionHandlerTypes(dex_file, klass); if (verifier_failure == verifier::MethodVerifier::kNoFailure) { - klass->SetStatus(Class::kStatusVerified); + klass->SetStatus(mirror::Class::kStatusVerified); } else { CHECK_EQ(verifier_failure, verifier::MethodVerifier::kSoftFailure); // Soft failures at compile time should be retried at runtime. Soft // failures at runtime will be handled by slow paths in the generated // code. Set status accordingly. if (Runtime::Current()->IsCompiler()) { - klass->SetStatus(Class::kStatusRetryVerificationAtRuntime); + klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime); } else { - klass->SetStatus(Class::kStatusVerified); + klass->SetStatus(mirror::Class::kStatusVerified); } } } else { @@ -2257,12 +2289,12 @@ void ClassLinker::VerifyClass(Class* klass) { << " because: " << error_msg; self->AssertNoPendingException(); self->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str()); - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); } } -bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, - Class::Status& oat_file_class_status) { +bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass, + mirror::Class::Status& oat_file_class_status) { if (!Runtime::Current()->IsStarted()) { return false; } @@ -2281,11 +2313,11 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, CHECK(oat_class.get() != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass) << " " << descriptor; oat_file_class_status = oat_class->GetStatus(); - if (oat_file_class_status == Class::kStatusVerified || - oat_file_class_status == Class::kStatusInitialized) { + if (oat_file_class_status == mirror::Class::kStatusVerified || + oat_file_class_status == mirror::Class::kStatusInitialized) { return true; } - if (oat_file_class_status == Class::kStatusRetryVerificationAtRuntime) { + if (oat_file_class_status == mirror::Class::kStatusRetryVerificationAtRuntime) { // Compile time verification failed with a soft error. Compile time verification can fail // because we have incomplete type information. Consider the following: // class ... { @@ -2305,12 +2337,12 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, // at compile time). return false; } - if (oat_file_class_status == Class::kStatusError) { + if (oat_file_class_status == mirror::Class::kStatusError) { // Compile time verification failed with a hard error. This is caused by invalid instructions // in the class. These errors are unrecoverable. return false; } - if (oat_file_class_status == Class::kStatusNotReady) { + if (oat_file_class_status == mirror::Class::kStatusNotReady) { // Status is uninitialized if we couldn't determine the status at compile time, for example, // not loading the class. // TODO: when the verifier doesn't rely on Class-es failing to resolve/load the type hierarchy @@ -2323,7 +2355,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, return false; } -void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass) { +void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file, mirror::Class* klass) { for (size_t i = 0; i < klass->NumDirectMethods(); i++) { ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i)); } @@ -2332,7 +2364,8 @@ void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Cla } } -void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, AbstractMethod* method) { +void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, + mirror::AbstractMethod* method) { // similar to DexVerifier::ScanTryCatchBlocks and dex2oat's ResolveExceptionsForMethod. const DexFile::CodeItem* code_item = dex_file.GetCodeItem(method->GetCodeItemOffset()); if (code_item == NULL) { @@ -2350,7 +2383,7 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, Ab // Ensure exception types are resolved so that they don't need resolution to be delivered, // unresolved exception types will be ignored by exception delivery if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) { - Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method); + mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method); if (exception_type == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); @@ -2361,26 +2394,29 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, Ab } } -static void CheckProxyConstructor(AbstractMethod* constructor); -static void CheckProxyMethod(AbstractMethod* method, SirtRef& prototype); +static void CheckProxyConstructor(mirror::AbstractMethod* constructor); +static void CheckProxyMethod(mirror::AbstractMethod* method, + SirtRef& prototype); -Class* ClassLinker::CreateProxyClass(String* name, ObjectArray* interfaces, - ClassLoader* loader, ObjectArray* methods, - ObjectArray >* throws) { +mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name, + mirror::ObjectArray* interfaces, + mirror::ClassLoader* loader, + mirror::ObjectArray* methods, + mirror::ObjectArray >* throws) { Thread* self = Thread::Current(); - SirtRef klass(self, AllocClass(self, GetClassRoot(kJavaLangClass), - sizeof(SynthesizedProxyClass))); + SirtRef klass(self, AllocClass(self, GetClassRoot(kJavaLangClass), + sizeof(mirror::SynthesizedProxyClass))); CHECK(klass.get() != NULL); DCHECK(klass->GetClass() != NULL); - klass->SetObjectSize(sizeof(Proxy)); + klass->SetObjectSize(sizeof(mirror::Proxy)); klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal); klass->SetClassLoader(loader); DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot); klass->SetName(name); - Class* proxy_class = GetClassRoot(kJavaLangReflectProxy); + mirror::Class* proxy_class = GetClassRoot(kJavaLangReflectProxy); klass->SetDexCache(proxy_class->GetDexCache()); - klass->SetStatus(Class::kStatusIdx); + klass->SetStatus(mirror::Class::kStatusIdx); klass->SetDexTypeIndex(DexFile::kDexNoIndex16); @@ -2388,13 +2424,13 @@ Class* ClassLinker::CreateProxyClass(String* name, ObjectArray* interface klass->SetSFields(AllocFieldArray(self, 2)); // 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by // our proxy, so Class.getInterfaces doesn't return the flattened set. - SirtRef interfaces_sfield(self, AllocField(self)); + SirtRef interfaces_sfield(self, AllocField(self)); klass->SetStaticField(0, interfaces_sfield.get()); interfaces_sfield->SetDexFieldIndex(0); interfaces_sfield->SetDeclaringClass(klass.get()); interfaces_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); // 2. Create a static field 'throws' that holds exceptions thrown by our methods. - SirtRef throws_sfield(self, AllocField(self)); + SirtRef throws_sfield(self, AllocField(self)); klass->SetStaticField(1, throws_sfield.get()); throws_sfield->SetDexFieldIndex(1); throws_sfield->SetDeclaringClass(klass.get()); @@ -2408,24 +2444,24 @@ Class* ClassLinker::CreateProxyClass(String* name, ObjectArray* interface size_t num_virtual_methods = methods->GetLength(); klass->SetVirtualMethods(AllocMethodArray(self, num_virtual_methods)); for (size_t i = 0; i < num_virtual_methods; ++i) { - SirtRef prototype(self, methods->Get(i)); + SirtRef prototype(self, methods->Get(i)); klass->SetVirtualMethod(i, CreateProxyMethod(self, klass, prototype)); } klass->SetSuperClass(proxy_class); // The super class is java.lang.reflect.Proxy - klass->SetStatus(Class::kStatusLoaded); // Class is now effectively in the loaded state + klass->SetStatus(mirror::Class::kStatusLoaded); // Class is now effectively in the loaded state DCHECK(!Thread::Current()->IsExceptionPending()); // Link the fields and virtual methods, creating vtable and iftables if (!LinkClass(klass, interfaces)) { - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); return NULL; } { ObjectLock lock(self, klass.get()); // Must hold lock on object when initializing. interfaces_sfield->SetObject(klass.get(), interfaces); throws_sfield->SetObject(klass.get(), throws); - klass->SetStatus(Class::kStatusInitialized); + klass->SetStatus(mirror::Class::kStatusInitialized); } // sanity checks @@ -2433,7 +2469,7 @@ Class* ClassLinker::CreateProxyClass(String* name, ObjectArray* interface CHECK(klass->GetIFields() == NULL); CheckProxyConstructor(klass->GetDirectMethod(0)); for (size_t i = 0; i < num_virtual_methods; ++i) { - SirtRef prototype(self, methods->Get(i)); + SirtRef prototype(self, methods->Get(i)); CheckProxyMethod(klass->GetVirtualMethod(i), prototype); } @@ -2445,27 +2481,29 @@ Class* ClassLinker::CreateProxyClass(String* name, ObjectArray* interface name->ToModifiedUtf8().c_str())); CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name); - SynthesizedProxyClass* synth_proxy_class = down_cast(klass.get()); + mirror::SynthesizedProxyClass* synth_proxy_class = + down_cast(klass.get()); CHECK_EQ(synth_proxy_class->GetInterfaces(), interfaces); CHECK_EQ(synth_proxy_class->GetThrows(), throws); } return klass.get(); } -std::string ClassLinker::GetDescriptorForProxy(const Class* proxy_class) { +std::string ClassLinker::GetDescriptorForProxy(const mirror::Class* proxy_class) { DCHECK(proxy_class->IsProxyClass()); - String* name = proxy_class->GetName(); + mirror::String* name = proxy_class->GetName(); DCHECK(name != NULL); return DotToDescriptor(name->ToModifiedUtf8().c_str()); } -AbstractMethod* ClassLinker::FindMethodForProxy(const Class* proxy_class, const AbstractMethod* proxy_method) { +mirror::AbstractMethod* ClassLinker::FindMethodForProxy(const mirror::Class* proxy_class, + const mirror::AbstractMethod* proxy_method) { DCHECK(proxy_class->IsProxyClass()); DCHECK(proxy_method->IsProxyMethod()); // Locate the dex cache of the original interface/Object - DexCache* dex_cache = NULL; + mirror::DexCache* dex_cache = NULL; { - ObjectArray* resolved_types = proxy_method->GetDexCacheResolvedTypes(); + mirror::ObjectArray* resolved_types = proxy_method->GetDexCacheResolvedTypes(); MutexLock mu(Thread::Current(), dex_lock_); for (size_t i = 0; i != dex_caches_.size(); ++i) { if (dex_caches_[i]->GetResolvedTypes() == resolved_types) { @@ -2476,27 +2514,31 @@ AbstractMethod* ClassLinker::FindMethodForProxy(const Class* proxy_class, const } CHECK(dex_cache != NULL); uint32_t method_idx = proxy_method->GetDexMethodIndex(); - AbstractMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx); + mirror::AbstractMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx); CHECK(resolved_method != NULL); return resolved_method; } -AbstractMethod* ClassLinker::CreateProxyConstructor(Thread* self, SirtRef& klass, Class* proxy_class) { +mirror::AbstractMethod* ClassLinker::CreateProxyConstructor(Thread* self, + SirtRef& klass, + mirror::Class* proxy_class) { // Create constructor for Proxy that must initialize h - ObjectArray* proxy_direct_methods = proxy_class->GetDirectMethods(); + mirror::ObjectArray* proxy_direct_methods = + proxy_class->GetDirectMethods(); CHECK_EQ(proxy_direct_methods->GetLength(), 15); - AbstractMethod* proxy_constructor = proxy_direct_methods->Get(2); + mirror::AbstractMethod* proxy_constructor = proxy_direct_methods->Get(2); // Clone the existing constructor of Proxy (our constructor would just invoke it so steal its // code_ too) - AbstractMethod* constructor = down_cast(proxy_constructor->Clone(self)); + mirror::AbstractMethod* constructor = + down_cast(proxy_constructor->Clone(self)); // Make this constructor public and fix the class to be our Proxy version constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic); constructor->SetDeclaringClass(klass.get()); return constructor; } -static void CheckProxyConstructor(AbstractMethod* constructor) +static void CheckProxyConstructor(mirror::AbstractMethod* constructor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(constructor->IsConstructor()); MethodHelper mh(constructor); @@ -2505,15 +2547,15 @@ static void CheckProxyConstructor(AbstractMethod* constructor) DCHECK(constructor->IsPublic()); } -AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef& klass, - SirtRef& prototype) { +mirror::AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef& klass, + SirtRef& prototype) { // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden // prototype method prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(), prototype.get()); // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize // as necessary - AbstractMethod* method = down_cast(prototype->Clone(self)); + mirror::AbstractMethod* method = down_cast(prototype->Clone(self)); // Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to // the intersection of throw exceptions as defined in Proxy @@ -2522,7 +2564,8 @@ AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef& kla // At runtime the method looks like a reference and argument saving method, clone the code // related parameters from this method. - AbstractMethod* refs_and_args = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs); + mirror::AbstractMethod* refs_and_args = + Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs); method->SetCoreSpillMask(refs_and_args->GetCoreSpillMask()); method->SetFpSpillMask(refs_and_args->GetFpSpillMask()); method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes()); @@ -2536,7 +2579,8 @@ AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef& kla return method; } -static void CheckProxyMethod(AbstractMethod* method, SirtRef& prototype) +static void CheckProxyMethod(mirror::AbstractMethod* method, + SirtRef& prototype) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Basic sanity CHECK(!prototype->IsFinal()); @@ -2560,18 +2604,18 @@ static void CheckProxyMethod(AbstractMethod* method, SirtRef& pr CHECK_EQ(mh.GetReturnType(), mh2.GetReturnType()); } -bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics) { +bool ClassLinker::InitializeClass(mirror::Class* klass, bool can_run_clinit, bool can_init_statics) { CHECK(klass->IsResolved() || klass->IsErroneous()) << PrettyClass(klass) << ": state=" << klass->GetStatus(); Thread* self = Thread::Current(); - AbstractMethod* clinit = NULL; + mirror::AbstractMethod* clinit = NULL; { // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol ObjectLock lock(self, klass); - if (klass->GetStatus() == Class::kStatusInitialized) { + if (klass->GetStatus() == mirror::Class::kStatusInitialized) { return true; } @@ -2580,11 +2624,11 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in return false; } - if (klass->GetStatus() == Class::kStatusResolved || - klass->GetStatus() == Class::kStatusRetryVerificationAtRuntime) { + if (klass->GetStatus() == mirror::Class::kStatusResolved || + klass->GetStatus() == mirror::Class::kStatusRetryVerificationAtRuntime) { VerifyClass(klass); - if (klass->GetStatus() != Class::kStatusVerified) { - if (klass->GetStatus() == Class::kStatusError) { + if (klass->GetStatus() != mirror::Class::kStatusVerified) { + if (klass->GetStatus() == mirror::Class::kStatusError) { CHECK(self->IsExceptionPending()); } return false; @@ -2606,7 +2650,7 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in // to initializing and we need to wait. Either way, this // invocation of InitializeClass will not be responsible for // running and will return. - if (klass->GetStatus() == Class::kStatusInitializing) { + if (klass->GetStatus() == mirror::Class::kStatusInitializing) { // We caught somebody else in the act; was it us? if (klass->GetClinitThreadId() == self->GetTid()) { // Yes. That's fine. Return so we can continue initializing. @@ -2617,15 +2661,15 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in } if (!ValidateSuperClassDescriptors(klass)) { - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); lock.NotifyAll(); return false; } - DCHECK_EQ(klass->GetStatus(), Class::kStatusVerified) << PrettyClass(klass); + DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass); klass->SetClinitThreadId(self->GetTid()); - klass->SetStatus(Class::kStatusInitializing); + klass->SetStatus(mirror::Class::kStatusInitializing); } uint64_t t0 = NanoTime(); @@ -2665,7 +2709,7 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in if (self->IsExceptionPending()) { WrapExceptionInInitializer(); - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); success = false; } else { RuntimeStats* global_stats = Runtime::Current()->GetStats(); @@ -2677,10 +2721,10 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in // Set the class as initialized except if we can't initialize static fields and static field // initialization is necessary. if (!can_init_statics && has_static_field_initializers) { - klass->SetStatus(Class::kStatusVerified); // Don't leave class in initializing state. + klass->SetStatus(mirror::Class::kStatusVerified); // Don't leave class in initializing state. success = false; } else { - klass->SetStatus(Class::kStatusInitialized); + klass->SetStatus(mirror::Class::kStatusInitialized); } if (VLOG_IS_ON(class_linker)) { ClassHelper kh(klass); @@ -2692,7 +2736,7 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in return success; } -bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock) +bool ClassLinker::WaitForInitializeClass(mirror::Class* klass, Thread* self, ObjectLock& lock) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { while (true) { self->AssertNoPendingException(); @@ -2703,14 +2747,14 @@ bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& // "interruptShouldThrow" was set), bail out. if (self->IsExceptionPending()) { WrapExceptionInInitializer(); - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); return false; } // Spurious wakeup? Go back to waiting. - if (klass->GetStatus() == Class::kStatusInitializing) { + if (klass->GetStatus() == mirror::Class::kStatusInitializing) { continue; } - if (klass->GetStatus() == Class::kStatusVerified && Runtime::Current()->IsCompiler()) { + if (klass->GetStatus() == mirror::Class::kStatusVerified && Runtime::Current()->IsCompiler()) { // Compile time initialization failed. return false; } @@ -2729,16 +2773,16 @@ bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& LOG(FATAL) << "Not Reached" << PrettyClass(klass); } -bool ClassLinker::ValidateSuperClassDescriptors(const Class* klass) { +bool ClassLinker::ValidateSuperClassDescriptors(const mirror::Class* klass) { if (klass->IsInterface()) { return true; } // begin with the methods local to the superclass if (klass->HasSuperClass() && klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) { - const Class* super = klass->GetSuperClass(); + const mirror::Class* super = klass->GetSuperClass(); for (int i = super->GetVTable()->GetLength() - 1; i >= 0; --i) { - const AbstractMethod* method = klass->GetVTable()->Get(i); + const mirror::AbstractMethod* method = klass->GetVTable()->Get(i); if (method != super->GetVTable()->Get(i) && !IsSameMethodSignatureInDifferentClassContexts(method, super, klass)) { ThrowLinkageError("Class %s method %s resolves differently in superclass %s", @@ -2748,12 +2792,12 @@ bool ClassLinker::ValidateSuperClassDescriptors(const Class* klass) { } } } - IfTable* iftable = klass->GetIfTable(); + mirror::IfTable* iftable = klass->GetIfTable(); for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { - Class* interface = iftable->GetInterface(i); + mirror::Class* interface = iftable->GetInterface(i); if (klass->GetClassLoader() != interface->GetClassLoader()) { for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) { - const AbstractMethod* method = iftable->GetMethodArray(i)->Get(j); + const mirror::AbstractMethod* method = iftable->GetMethodArray(i)->Get(j); if (!IsSameMethodSignatureInDifferentClassContexts(method, interface, method->GetDeclaringClass())) { ThrowLinkageError("Class %s method %s resolves differently in interface %s", @@ -2770,9 +2814,9 @@ bool ClassLinker::ValidateSuperClassDescriptors(const Class* klass) { // Returns true if classes referenced by the signature of the method are the // same classes in klass1 as they are in klass2. -bool ClassLinker::IsSameMethodSignatureInDifferentClassContexts(const AbstractMethod* method, - const Class* klass1, - const Class* klass2) { +bool ClassLinker::IsSameMethodSignatureInDifferentClassContexts(const mirror::AbstractMethod* method, + const mirror::Class* klass1, + const mirror::Class* klass2) { if (klass1 == klass2) { return true; } @@ -2803,29 +2847,29 @@ bool ClassLinker::IsSameMethodSignatureInDifferentClassContexts(const AbstractMe // Returns true if the descriptor resolves to the same class in the context of klass1 and klass2. bool ClassLinker::IsSameDescriptorInDifferentClassContexts(const char* descriptor, - const Class* klass1, - const Class* klass2) { + const mirror::Class* klass1, + const mirror::Class* klass2) { CHECK(descriptor != NULL); CHECK(klass1 != NULL); CHECK(klass2 != NULL); if (klass1 == klass2) { return true; } - Class* found1 = FindClass(descriptor, klass1->GetClassLoader()); + mirror::Class* found1 = FindClass(descriptor, klass1->GetClassLoader()); if (found1 == NULL) { Thread::Current()->ClearException(); } - Class* found2 = FindClass(descriptor, klass2->GetClassLoader()); + mirror::Class* found2 = FindClass(descriptor, klass2->GetClassLoader()); if (found2 == NULL) { Thread::Current()->ClearException(); } return found1 == found2; } -bool ClassLinker::InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields) { +bool ClassLinker::InitializeSuperClass(mirror::Class* klass, bool can_run_clinit, bool can_init_fields) { CHECK(klass != NULL); if (!klass->IsInterface() && klass->HasSuperClass()) { - Class* super_class = klass->GetSuperClass(); + mirror::Class* super_class = klass->GetSuperClass(); if (!super_class->IsInitialized()) { CHECK(!super_class->IsInterface()); // Must hold lock on object when initializing and setting status. @@ -2835,11 +2879,11 @@ bool ClassLinker::InitializeSuperClass(Class* klass, bool can_run_clinit, bool c if (!super_initialized) { if (!can_run_clinit) { // Don't set status to error when we can't run . - CHECK_EQ(klass->GetStatus(), Class::kStatusInitializing) << PrettyClass(klass); - klass->SetStatus(Class::kStatusVerified); + CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusInitializing) << PrettyClass(klass); + klass->SetStatus(mirror::Class::kStatusVerified); return false; } - klass->SetStatus(Class::kStatusError); + klass->SetStatus(mirror::Class::kStatusError); klass->NotifyAll(); return false; } @@ -2848,7 +2892,7 @@ bool ClassLinker::InitializeSuperClass(Class* klass, bool can_run_clinit, bool c return true; } -bool ClassLinker::EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) { +bool ClassLinker::EnsureInitialized(mirror::Class* c, bool can_run_clinit, bool can_init_fields) { DCHECK(c != NULL); if (c->IsInitialized()) { return true; @@ -2864,8 +2908,8 @@ bool ClassLinker::EnsureInitialized(Class* c, bool can_run_clinit, bool can_init } void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - Class* c, SafeMap& field_map) { - ClassLoader* cl = c->GetClassLoader(); + mirror::Class* c, SafeMap& field_map) { + mirror::ClassLoader* cl = c->GetClassLoader(); const byte* class_data = dex_file.GetClassData(dex_class_def); ClassDataItemIterator it(dex_file, class_data); for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) { @@ -2873,12 +2917,12 @@ void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::Clas } } -bool ClassLinker::InitializeStaticFields(Class* klass) { +bool ClassLinker::InitializeStaticFields(mirror::Class* klass) { size_t num_static_fields = klass->NumStaticFields(); if (num_static_fields == 0) { return false; } - DexCache* dex_cache = klass->GetDexCache(); + mirror::DexCache* dex_cache = klass->GetDexCache(); // TODO: this seems like the wrong check. do we really want !IsPrimitive && !IsArray? if (dex_cache == NULL) { return false; @@ -2892,7 +2936,7 @@ bool ClassLinker::InitializeStaticFields(Class* klass) { if (it.HasNext()) { // We reordered the fields, so we need to be able to map the field indexes to the right fields. - SafeMap field_map; + SafeMap field_map; ConstructFieldMap(dex_file, *dex_class_def, klass, field_map); for (size_t i = 0; it.HasNext(); i++, it.Next()) { it.ReadValueToField(field_map.Get(i)); @@ -2902,8 +2946,9 @@ bool ClassLinker::InitializeStaticFields(Class* klass) { return false; } -bool ClassLinker::LinkClass(SirtRef& klass, ObjectArray* interfaces) { - CHECK_EQ(Class::kStatusLoaded, klass->GetStatus()); +bool ClassLinker::LinkClass(SirtRef& klass, + mirror::ObjectArray* interfaces) { + CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus()); if (!LinkSuperClass(klass)) { return false; } @@ -2918,19 +2963,19 @@ bool ClassLinker::LinkClass(SirtRef& klass, ObjectArray* interface } CreateReferenceInstanceOffsets(klass); CreateReferenceStaticOffsets(klass); - CHECK_EQ(Class::kStatusLoaded, klass->GetStatus()); - klass->SetStatus(Class::kStatusResolved); + CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus()); + klass->SetStatus(mirror::Class::kStatusResolved); return true; } -bool ClassLinker::LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file) { - CHECK_EQ(Class::kStatusIdx, klass->GetStatus()); +bool ClassLinker::LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file) { + CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus()); StringPiece descriptor(dex_file.StringByTypeIdx(klass->GetDexTypeIndex())); const DexFile::ClassDef* class_def = dex_file.FindClassDef(descriptor); CHECK(class_def != NULL); uint16_t super_class_idx = class_def->superclass_idx_; if (super_class_idx != DexFile::kDexNoIndex16) { - Class* super_class = ResolveType(dex_file, super_class_idx, klass.get()); + mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.get()); if (super_class == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return false; @@ -2949,7 +2994,7 @@ bool ClassLinker::LoadSuperAndInterfaces(SirtRef& klass, const DexFile& d if (interfaces != NULL) { for (size_t i = 0; i < interfaces->Size(); i++) { uint16_t idx = interfaces->GetTypeItem(i).type_idx_; - Class* interface = ResolveType(dex_file, idx, klass.get()); + mirror::Class* interface = ResolveType(dex_file, idx, klass.get()); if (interface == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return false; @@ -2966,13 +3011,13 @@ bool ClassLinker::LoadSuperAndInterfaces(SirtRef& klass, const DexFile& d } } // Mark the class as loaded. - klass->SetStatus(Class::kStatusLoaded); + klass->SetStatus(mirror::Class::kStatusLoaded); return true; } -bool ClassLinker::LinkSuperClass(SirtRef& klass) { +bool ClassLinker::LinkSuperClass(SirtRef& klass) { CHECK(!klass->IsPrimitive()); - Class* super = klass->GetSuperClass(); + mirror::Class* super = klass->GetSuperClass(); if (klass.get() == GetClassRoot(kJavaLangObject)) { if (super != NULL) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassFormatError;", @@ -3031,7 +3076,8 @@ bool ClassLinker::LinkSuperClass(SirtRef& klass) { } // Populate the class vtable and itable. Compute return type indices. -bool ClassLinker::LinkMethods(SirtRef& klass, ObjectArray* interfaces) { +bool ClassLinker::LinkMethods(SirtRef& klass, + mirror::ObjectArray* interfaces) { if (klass->IsInterface()) { // No vtable. size_t count = klass->NumVirtualMethods(); @@ -3051,24 +3097,24 @@ bool ClassLinker::LinkMethods(SirtRef& klass, ObjectArray* interfa return true; } -bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { +bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { Thread* self = Thread::Current(); if (klass->HasSuperClass()) { uint32_t max_count = klass->NumVirtualMethods() + klass->GetSuperClass()->GetVTable()->GetLength(); size_t actual_count = klass->GetSuperClass()->GetVTable()->GetLength(); CHECK_LE(actual_count, max_count); // TODO: do not assign to the vtable field until it is fully constructed. - SirtRef > + SirtRef > vtable(self, klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count)); // See if any of our virtual methods override the superclass. MethodHelper local_mh(NULL, this); MethodHelper super_mh(NULL, this); for (size_t i = 0; i < klass->NumVirtualMethods(); ++i) { - AbstractMethod* local_method = klass->GetVirtualMethodDuringLinking(i); + mirror::AbstractMethod* local_method = klass->GetVirtualMethodDuringLinking(i); local_mh.ChangeMethod(local_method); size_t j = 0; for (; j < actual_count; ++j) { - AbstractMethod* super_method = vtable->Get(j); + mirror::AbstractMethod* super_method = vtable->Get(j); super_mh.ChangeMethod(super_method); if (local_mh.HasSameNameAndSignature(&super_mh)) { if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) { @@ -3112,10 +3158,10 @@ bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { ThrowClassFormatError("Too many methods: %d", num_virtual_methods); return false; } - SirtRef > + SirtRef > vtable(self, AllocMethodArray(self, num_virtual_methods)); for (size_t i = 0; i < num_virtual_methods; ++i) { - AbstractMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i); + mirror::AbstractMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i); vtable->Set(i, virtual_method); virtual_method->SetMethodIndex(i & 0xFFFF); } @@ -3124,7 +3170,8 @@ bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { return true; } -bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray* interfaces) { +bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, + mirror::ObjectArray* interfaces) { size_t super_ifcount; if (klass->HasSuperClass()) { super_ifcount = klass->GetSuperClass()->GetIfTableCount(); @@ -3136,7 +3183,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray uint32_t num_interfaces = interfaces == NULL ? kh.NumDirectInterfaces() : interfaces->GetLength(); ifcount += num_interfaces; for (size_t i = 0; i < num_interfaces; i++) { - Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i); + mirror::Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i); ifcount += interface->GetIfTableCount(); } if (ifcount == 0) { @@ -3148,7 +3195,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray if (ifcount == super_ifcount) { // Class implements same interfaces as parent, are any of these not marker interfaces? bool has_non_marker_interface = false; - IfTable* super_iftable = klass->GetSuperClass()->GetIfTable(); + mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable(); for (size_t i = 0; i < ifcount; ++i) { if (super_iftable->GetMethodArrayCount(i) > 0) { has_non_marker_interface = true; @@ -3162,18 +3209,18 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray } } Thread* self = Thread::Current(); - SirtRef iftable(self, AllocIfTable(self, ifcount)); + SirtRef iftable(self, AllocIfTable(self, ifcount)); if (super_ifcount != 0) { - IfTable* super_iftable = klass->GetSuperClass()->GetIfTable(); + mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable(); for (size_t i = 0; i < super_ifcount; i++) { - Class* super_interface = super_iftable->GetInterface(i); + mirror::Class* super_interface = super_iftable->GetInterface(i); iftable->SetInterface(i, super_interface); } } // Flatten the interface inheritance hierarchy. size_t idx = super_ifcount; for (size_t i = 0; i < num_interfaces; i++) { - Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i); + mirror::Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i); DCHECK(interface != NULL); if (!interface->IsInterface()) { ClassHelper ih(interface); @@ -3186,7 +3233,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray // Check if interface is already in iftable bool duplicate = false; for (size_t j = 0; j < idx; j++) { - Class* existing_interface = iftable->GetInterface(j); + mirror::Class* existing_interface = iftable->GetInterface(j); if (existing_interface == interface) { duplicate = true; break; @@ -3197,10 +3244,10 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray iftable->SetInterface(idx++, interface); // Add this interface's non-duplicate super-interfaces. for (int32_t j = 0; j < interface->GetIfTableCount(); j++) { - Class* super_interface = interface->GetIfTable()->GetInterface(j); + mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j); bool super_duplicate = false; for (size_t k = 0; k < idx; k++) { - Class* existing_interface = iftable->GetInterface(k); + mirror::Class* existing_interface = iftable->GetInterface(k); if (existing_interface == super_interface) { super_duplicate = true; break; @@ -3214,7 +3261,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray } // Shrink iftable in case duplicates were found if (idx < ifcount) { - iftable.reset(down_cast(iftable->CopyOf(self, idx * IfTable::kMax))); + iftable.reset(down_cast(iftable->CopyOf(self, idx * mirror::IfTable::kMax))); ifcount = idx; } else { CHECK_EQ(idx, ifcount); @@ -3225,18 +3272,19 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray if (klass->IsInterface()) { return true; } - std::vector miranda_list; + std::vector miranda_list; MethodHelper vtable_mh(NULL, this); MethodHelper interface_mh(NULL, this); for (size_t i = 0; i < ifcount; ++i) { - Class* interface = iftable->GetInterface(i); + mirror::Class* interface = iftable->GetInterface(i); size_t num_methods = interface->NumVirtualMethods(); if (num_methods > 0) { - ObjectArray* method_array = AllocMethodArray(self, num_methods); + mirror::ObjectArray* method_array = + AllocMethodArray(self, num_methods); iftable->SetMethodArray(i, method_array); - ObjectArray* vtable = klass->GetVTableDuringLinking(); + mirror::ObjectArray* vtable = klass->GetVTableDuringLinking(); for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) { - AbstractMethod* interface_method = interface->GetVirtualMethod(j); + mirror::AbstractMethod* interface_method = interface->GetVirtualMethod(j); interface_mh.ChangeMethod(interface_method); int32_t k; // For each method listed in the interface's method list, find the @@ -3248,7 +3296,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray // those don't end up in the virtual method table, so it shouldn't // matter which direction we go. We walk it backward anyway.) for (k = vtable->GetLength() - 1; k >= 0; --k) { - AbstractMethod* vtable_method = vtable->Get(k); + mirror::AbstractMethod* vtable_method = vtable->Get(k); vtable_mh.ChangeMethod(vtable_method); if (interface_mh.HasSameNameAndSignature(&vtable_mh)) { if (!vtable_method->IsPublic()) { @@ -3262,9 +3310,9 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray } } if (k < 0) { - SirtRef miranda_method(self, NULL); + SirtRef miranda_method(self, NULL); for (size_t mir = 0; mir < miranda_list.size(); mir++) { - AbstractMethod* mir_method = miranda_list[mir]; + mirror::AbstractMethod* mir_method = miranda_list[mir]; vtable_mh.ChangeMethod(mir_method); if (interface_mh.HasSameNameAndSignature(&vtable_mh)) { miranda_method.reset(miranda_list[mir]); @@ -3273,7 +3321,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray } if (miranda_method.get() == NULL) { // point the interface table at a phantom slot - miranda_method.reset(down_cast(interface_method->Clone(self))); + miranda_method.reset(down_cast(interface_method->Clone(self))); miranda_list.push_back(miranda_method.get()); } method_array->Set(j, miranda_method.get()); @@ -3288,13 +3336,14 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray ? AllocMethodArray(self, new_method_count) : klass->GetVirtualMethods()->CopyOf(self, new_method_count)); - SirtRef > vtable(self, klass->GetVTableDuringLinking()); + SirtRef > + vtable(self, klass->GetVTableDuringLinking()); CHECK(vtable.get() != NULL); int old_vtable_count = vtable->GetLength(); int new_vtable_count = old_vtable_count + miranda_list.size(); vtable.reset(vtable->CopyOf(self, new_vtable_count)); for (size_t i = 0; i < miranda_list.size(); ++i) { - AbstractMethod* method = miranda_list[i]; + mirror::AbstractMethod* method = miranda_list[i]; // Leave the declaring class alone as type indices are relative to it method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda); method->SetMethodIndex(0xFFFF & (old_vtable_count + i)); @@ -3305,7 +3354,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray klass->SetVTable(vtable.get()); } - ObjectArray* vtable = klass->GetVTableDuringLinking(); + mirror::ObjectArray* vtable = klass->GetVTableDuringLinking(); for (int i = 0; i < vtable->GetLength(); ++i) { CHECK(vtable->Get(i) != NULL); } @@ -3315,12 +3364,12 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, ObjectArray return true; } -bool ClassLinker::LinkInstanceFields(SirtRef& klass) { +bool ClassLinker::LinkInstanceFields(SirtRef& klass) { CHECK(klass.get() != NULL); return LinkFields(klass, false); } -bool ClassLinker::LinkStaticFields(SirtRef& klass) { +bool ClassLinker::LinkStaticFields(SirtRef& klass) { CHECK(klass.get() != NULL); size_t allocated_class_size = klass->GetClassSize(); bool success = LinkFields(klass, true); @@ -3333,7 +3382,8 @@ struct LinkFieldsComparator { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : fh_(fh) {} // No thread safety analysis as will be called from STL. Checked lock held in constructor. - bool operator()(const Field* field1, const Field* field2) NO_THREAD_SAFETY_ANALYSIS { + bool operator()(const mirror::Field* field1, const mirror::Field* field2) + NO_THREAD_SAFETY_ANALYSIS { // First come reference fields, then 64-bit, and finally 32-bit fh_->ChangeField(field1); Primitive::Type type1 = fh_->GetTypeAsPrimitiveType(); @@ -3360,11 +3410,11 @@ struct LinkFieldsComparator { FieldHelper* fh_; }; -bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { +bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { size_t num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields(); - ObjectArray* fields = + mirror::ObjectArray* fields = is_static ? klass->GetSFields() : klass->GetIFields(); // Initialize size and field_offset @@ -3372,9 +3422,9 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { MemberOffset field_offset(0); if (is_static) { size = klass->GetClassSize(); - field_offset = Class::FieldsOffset(); + field_offset = mirror::Class::FieldsOffset(); } else { - Class* super_class = klass->GetSuperClass(); + mirror::Class* super_class = klass->GetSuperClass(); if (super_class != NULL) { CHECK(super_class->IsResolved()); field_offset = MemberOffset(super_class->GetObjectSize()); @@ -3386,7 +3436,7 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { // we want a relatively stable order so that adding new fields // minimizes disruption of C++ version such as Class and Method. - std::deque grouped_and_sorted_fields; + std::deque grouped_and_sorted_fields; for (size_t i = 0; i < num_fields; i++) { grouped_and_sorted_fields.push_back(fields->Get(i)); } @@ -3399,7 +3449,7 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { size_t current_field = 0; size_t num_reference_fields = 0; for (; current_field < num_fields; current_field++) { - Field* field = grouped_and_sorted_fields.front(); + mirror::Field* field = grouped_and_sorted_fields.front(); fh.ChangeField(field); Primitive::Type type = fh.GetTypeAsPrimitiveType(); bool isPrimitive = type != Primitive::kPrimNot; @@ -3418,7 +3468,7 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { // into place. If we can't find one, we'll have to pad it. if (current_field != num_fields && !IsAligned<8>(field_offset.Uint32Value())) { for (size_t i = 0; i < grouped_and_sorted_fields.size(); i++) { - Field* field = grouped_and_sorted_fields[i]; + mirror::Field* field = grouped_and_sorted_fields[i]; fh.ChangeField(field); Primitive::Type type = fh.GetTypeAsPrimitiveType(); CHECK(type != Primitive::kPrimNot); // should only be working on primitive types @@ -3439,7 +3489,7 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { // finish assigning field offsets to all fields. DCHECK(current_field == num_fields || IsAligned<8>(field_offset.Uint32Value())); while (!grouped_and_sorted_fields.empty()) { - Field* field = grouped_and_sorted_fields.front(); + mirror::Field* field = grouped_and_sorted_fields.front(); grouped_and_sorted_fields.pop_front(); fh.ChangeField(field); Primitive::Type type = fh.GetTypeAsPrimitiveType(); @@ -3469,12 +3519,13 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { // non-reference fields, and all double-wide fields are aligned. bool seen_non_ref = false; for (size_t i = 0; i < num_fields; i++) { - Field* field = fields->Get(i); + mirror::Field* field = fields->Get(i); if (false) { // enable to debug field layout LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance") << " class=" << PrettyClass(klass.get()) << " field=" << PrettyField(field) - << " offset=" << field->GetField32(MemberOffset(Field::OffsetOffset()), false); + << " offset=" << field->GetField32(MemberOffset(mirror::Field::OffsetOffset()), + false); } fh.ChangeField(field); Primitive::Type type = fh.GetTypeAsPrimitiveType(); @@ -3512,9 +3563,9 @@ bool ClassLinker::LinkFields(SirtRef& klass, bool is_static) { // Set the bitmap of reference offsets, refOffsets, from the ifields // list. -void ClassLinker::CreateReferenceInstanceOffsets(SirtRef& klass) { +void ClassLinker::CreateReferenceInstanceOffsets(SirtRef& klass) { uint32_t reference_offsets = 0; - Class* super_class = klass->GetSuperClass(); + mirror::Class* super_class = klass->GetSuperClass(); if (super_class != NULL) { reference_offsets = super_class->GetReferenceInstanceOffsets(); // If our superclass overflowed, we don't stand a chance. @@ -3526,23 +3577,23 @@ void ClassLinker::CreateReferenceInstanceOffsets(SirtRef& klass) { CreateReferenceOffsets(klass, false, reference_offsets); } -void ClassLinker::CreateReferenceStaticOffsets(SirtRef& klass) { +void ClassLinker::CreateReferenceStaticOffsets(SirtRef& klass) { CreateReferenceOffsets(klass, true, 0); } -void ClassLinker::CreateReferenceOffsets(SirtRef& klass, bool is_static, +void ClassLinker::CreateReferenceOffsets(SirtRef& klass, bool is_static, uint32_t reference_offsets) { size_t num_reference_fields = is_static ? klass->NumReferenceStaticFieldsDuringLinking() : klass->NumReferenceInstanceFieldsDuringLinking(); - const ObjectArray* fields = + const mirror::ObjectArray* fields = is_static ? klass->GetSFields() : klass->GetIFields(); // All of the fields that contain object references are guaranteed // to be at the beginning of the fields list. for (size_t i = 0; i < num_reference_fields; ++i) { // Note that byte_offset is the offset from the beginning of // object, not the offset into instance data - const Field* field = fields->Get(i); + const mirror::Field* field = fields->Get(i); MemberOffset byte_offset = field->GetOffsetDuringLinking(); CHECK_EQ(byte_offset.Uint32Value() & (CLASS_OFFSET_ALIGNMENT - 1), 0U); if (CLASS_CAN_ENCODE_OFFSET(byte_offset.Uint32Value())) { @@ -3562,27 +3613,27 @@ void ClassLinker::CreateReferenceOffsets(SirtRef& klass, bool is_static, } } -String* ClassLinker::ResolveString(const DexFile& dex_file, - uint32_t string_idx, DexCache* dex_cache) { +mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, + uint32_t string_idx, mirror::DexCache* dex_cache) { DCHECK(dex_cache != NULL); - String* resolved = dex_cache->GetResolvedString(string_idx); + mirror::String* resolved = dex_cache->GetResolvedString(string_idx); if (resolved != NULL) { return resolved; } const DexFile::StringId& string_id = dex_file.GetStringId(string_idx); int32_t utf16_length = dex_file.GetStringLength(string_id); const char* utf8_data = dex_file.GetStringData(string_id); - String* string = intern_table_->InternStrong(utf16_length, utf8_data); + mirror::String* string = intern_table_->InternStrong(utf16_length, utf8_data); dex_cache->SetResolvedString(string_idx, string); return string; } -Class* ClassLinker::ResolveType(const DexFile& dex_file, - uint16_t type_idx, - DexCache* dex_cache, - ClassLoader* class_loader) { +mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, + uint16_t type_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader) { DCHECK(dex_cache != NULL); - Class* resolved = dex_cache->GetResolvedType(type_idx); + mirror::Class* resolved = dex_cache->GetResolvedType(type_idx); if (resolved == NULL) { const char* descriptor = dex_file.StringByTypeIdx(type_idx); resolved = FindClass(descriptor, class_loader); @@ -3604,21 +3655,21 @@ Class* ClassLinker::ResolveType(const DexFile& dex_file, return resolved; } -AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, - uint32_t method_idx, - DexCache* dex_cache, - ClassLoader* class_loader, - const AbstractMethod* referrer, - InvokeType type) { +mirror::AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, + uint32_t method_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, + const mirror::AbstractMethod* referrer, + InvokeType type) { DCHECK(dex_cache != NULL); // Check for hit in the dex cache. - AbstractMethod* resolved = dex_cache->GetResolvedMethod(method_idx); + mirror::AbstractMethod* resolved = dex_cache->GetResolvedMethod(method_idx); if (resolved != NULL) { return resolved; } // Fail, get the declaring class. const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx); - Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader); + mirror::Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader); if (klass == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return NULL; @@ -3689,8 +3740,8 @@ AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, // If we found something, check that it can be accessed by the referrer. if (resolved != NULL && referrer != NULL) { - Class* methods_class = resolved->GetDeclaringClass(); - Class* referring_class = referrer->GetDeclaringClass(); + mirror::Class* methods_class = resolved->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); if (!referring_class->CanAccess(methods_class)) { ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, referrer, resolved, type); @@ -3751,18 +3802,18 @@ AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, } } -Field* ClassLinker::ResolveField(const DexFile& dex_file, - uint32_t field_idx, - DexCache* dex_cache, - ClassLoader* class_loader, - bool is_static) { +mirror::Field* ClassLinker::ResolveField(const DexFile& dex_file, + uint32_t field_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, + bool is_static) { DCHECK(dex_cache != NULL); - Field* resolved = dex_cache->GetResolvedField(field_idx); + mirror::Field* resolved = dex_cache->GetResolvedField(field_idx); if (resolved != NULL) { return resolved; } const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx); - Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader); + mirror::Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader); if (klass == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return NULL; @@ -3791,17 +3842,17 @@ Field* ClassLinker::ResolveField(const DexFile& dex_file, return resolved; } -Field* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, - uint32_t field_idx, - DexCache* dex_cache, - ClassLoader* class_loader) { +mirror::Field* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, + uint32_t field_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader) { DCHECK(dex_cache != NULL); - Field* resolved = dex_cache->GetResolvedField(field_idx); + mirror::Field* resolved = dex_cache->GetResolvedField(field_idx); if (resolved != NULL) { return resolved; } const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx); - Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader); + mirror::Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader); if (klass == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return NULL; @@ -3818,9 +3869,10 @@ Field* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, return resolved; } -const char* ClassLinker::MethodShorty(uint32_t method_idx, AbstractMethod* referrer, uint32_t* length) { - Class* declaring_class = referrer->GetDeclaringClass(); - DexCache* dex_cache = declaring_class->GetDexCache(); +const char* ClassLinker::MethodShorty(uint32_t method_idx, mirror::AbstractMethod* referrer, + uint32_t* length) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx); return dex_file.GetMethodShorty(method_id, length); @@ -3829,7 +3881,7 @@ const char* ClassLinker::MethodShorty(uint32_t method_idx, AbstractMethod* refer void ClassLinker::DumpAllClasses(int flags) const { // TODO: at the time this was written, it wasn't safe to call PrettyField with the ClassLinker // lock held, because it might need to resolve a field's type, which would try to take the lock. - std::vector all_classes; + std::vector all_classes; { MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto @@ -3865,7 +3917,7 @@ pid_t ClassLinker::GetDexLockOwner() { return dex_lock_.GetExclusiveOwnerTid(); } -void ClassLinker::SetClassRoot(ClassRoot class_root, Class* klass) { +void ClassLinker::SetClassRoot(ClassRoot class_root, mirror::Class* klass) { DCHECK(!init_done_); DCHECK(klass != NULL); diff --git a/src/class_linker.h b/src/class_linker.h index 09a43c533c..3039d55f7a 100644 --- a/src/class_linker.h +++ b/src/class_linker.h @@ -23,23 +23,26 @@ #include "base/macros.h" #include "base/mutex.h" -#include "dex_cache.h" #include "dex_file.h" #include "gtest/gtest.h" -#include "heap.h" +#include "root_visitor.h" #include "oat_file.h" -#include "object.h" -#include "safe_map.h" namespace art { - +namespace mirror { class ClassLoader; +class DexCache; +class DexCacheTest_Open_Test; +class IfTable; +template class ObjectArray; +class StackTraceElement; +} class ImageSpace; class InternTable; class ObjectLock; template class SirtRef; -typedef bool (ClassVisitor)(Class* c, void* arg); +typedef bool (ClassVisitor)(mirror::Class* c, void* arg); class ClassLinker { public: @@ -56,33 +59,33 @@ class ClassLinker { // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. - Class* FindClass(const char* descriptor, ClassLoader* class_loader) + mirror::Class* FindClass(const char* descriptor, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* FindSystemClass(const char* descriptor) + mirror::Class* FindSystemClass(const char* descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Define a new a class based on a ClassDef from a DexFile - Class* DefineClass(const StringPiece& descriptor, ClassLoader* class_loader, - const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) + mirror::Class* DefineClass(const StringPiece& descriptor, mirror::ClassLoader* class_loader, + const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded // by the given 'class_loader'. - Class* LookupClass(const char* descriptor, const ClassLoader* class_loader) + mirror::Class* LookupClass(const char* descriptor, const mirror::ClassLoader* class_loader) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. - void LookupClasses(const char* descriptor, std::vector& classes) + void LookupClasses(const char* descriptor, std::vector& classes) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. - bool RemoveClass(const char* descriptor, const ClassLoader* class_loader) + bool RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -98,27 +101,20 @@ class ClassLinker { // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - String* ResolveString(uint32_t string_idx, const AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx); - if (UNLIKELY(resolved_string == NULL)) { - Class* declaring_class = referrer->GetDeclaringClass(); - DexCache* dex_cache = declaring_class->GetDexCache(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - resolved_string = ResolveString(dex_file, string_idx, dex_cache); - } - return resolved_string; - } + mirror::String* ResolveString(uint32_t string_idx, const mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. - String* ResolveString(const DexFile& dex_file, uint32_t string_idx, DexCache* dex_cache) + mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx, + mirror::DexCache* dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. - Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, const Class* referrer) + mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, + const mirror::Class* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveType(dex_file, type_idx, @@ -129,40 +125,20 @@ class ClassLinker { // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - Class* ResolveType(uint16_t type_idx, const AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(resolved_type == NULL)) { - Class* declaring_class = referrer->GetDeclaringClass(); - DexCache* dex_cache = declaring_class->GetDexCache(); - ClassLoader* class_loader = declaring_class->GetClassLoader(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); - } - return resolved_type; - } + mirror::Class* ResolveType(uint16_t type_idx, const mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* ResolveType(uint16_t type_idx, const Field* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* declaring_class = referrer->GetDeclaringClass(); - DexCache* dex_cache = declaring_class->GetDexCache(); - Class* resolved_type = dex_cache->GetResolvedType(type_idx); - if (UNLIKELY(resolved_type == NULL)) { - ClassLoader* class_loader = declaring_class->GetClassLoader(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); - } - return resolved_type; - } + mirror::Class* ResolveType(uint16_t type_idx, const mirror::Field* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a type with the given ID from the DexFile, storing the // result in DexCache. The ClassLoader is used to search for the // type, since it may be referenced from but not contained within // the given DexFile. - Class* ResolveType(const DexFile& dex_file, - uint16_t type_idx, - DexCache* dex_cache, - ClassLoader* class_loader) + mirror::Class* ResolveType(const DexFile& dex_file, + uint16_t type_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a method with a given ID from the DexFile, storing the @@ -170,50 +146,31 @@ class ClassLinker { // in ResolveType. What is unique is the method type argument which // is used to determine if this method is a direct, static, or // virtual method. - AbstractMethod* ResolveMethod(const DexFile& dex_file, - uint32_t method_idx, - DexCache* dex_cache, - ClassLoader* class_loader, - const AbstractMethod* referrer, - InvokeType type) + mirror::AbstractMethod* ResolveMethod(const DexFile& dex_file, + uint32_t method_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, + const mirror::AbstractMethod* referrer, + InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* ResolveMethod(uint32_t method_idx, const AbstractMethod* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* resolved_method = referrer->GetDexCacheResolvedMethods()->Get(method_idx); - if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) { - Class* declaring_class = referrer->GetDeclaringClass(); - DexCache* dex_cache = declaring_class->GetDexCache(); - ClassLoader* class_loader = declaring_class->GetClassLoader(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type); - } - return resolved_method; - } + mirror::AbstractMethod* ResolveMethod(uint32_t method_idx, const mirror::AbstractMethod* referrer, + InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Field* ResolveField(uint32_t field_idx, const AbstractMethod* referrer, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* resolved_field = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); - if (UNLIKELY(resolved_field == NULL)) { - Class* declaring_class = referrer->GetDeclaringClass(); - DexCache* dex_cache = declaring_class->GetDexCache(); - ClassLoader* class_loader = declaring_class->GetClassLoader(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static); - } - return resolved_field; - } + mirror::Field* ResolveField(uint32_t field_idx, const mirror::AbstractMethod* referrer, + bool is_static) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. What is unique is the is_static argument which is // used to determine if we are resolving a static or non-static // field. - Field* ResolveField(const DexFile& dex_file, + mirror::Field* ResolveField(const DexFile& dex_file, uint32_t field_idx, - DexCache* dex_cache, - ClassLoader* class_loader, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -221,20 +178,20 @@ class ClassLinker { // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. No is_static argument is provided so that Java // field resolution semantics are followed. - Field* ResolveFieldJLS(const DexFile& dex_file, - uint32_t field_idx, - DexCache* dex_cache, - ClassLoader* class_loader) + mirror::Field* ResolveFieldJLS(const DexFile& dex_file, + uint32_t field_idx, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get shorty from method index without resolution. Used to do handlerization. - const char* MethodShorty(uint32_t method_idx, AbstractMethod* referrer, uint32_t* length) + const char* MethodShorty(uint32_t method_idx, mirror::AbstractMethod* referrer, uint32_t* length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no execution is possible. - bool EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) + bool EnsureInitialized(mirror::Class* c, bool can_run_clinit, bool can_init_fields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initializes classes that have instances in the image but that have @@ -244,7 +201,7 @@ class ClassLinker { void RegisterDexFile(const DexFile& dex_file) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache) + void RegisterDexFile(const DexFile& dex_file, SirtRef& dex_cache) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -262,15 +219,15 @@ class ClassLinker { void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); - void VisitRoots(Heap::RootVisitor* visitor, void* arg) + void VisitRoots(RootVisitor* visitor, void* arg) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_); - DexCache* FindDexCache(const DexFile& dex_file) const + mirror::DexCache* FindDexCache(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_); - void FixupDexCaches(AbstractMethod* resolution_method) const + void FixupDexCaches(mirror::AbstractMethod* resolution_method) const LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -310,66 +267,54 @@ class ClassLinker { // TODO: replace this with multiple methods that allocate the correct managed type. template - ObjectArray* AllocObjectArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ObjectArray::Alloc(self, GetClassRoot(kObjectArrayClass), length); - } + mirror::ObjectArray* AllocObjectArray(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* AllocClassArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ObjectArray::Alloc(self, GetClassRoot(kClassArrayClass), length); - } + mirror::ObjectArray* AllocClassArray(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* AllocStringArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ObjectArray::Alloc(self, GetClassRoot(kJavaLangStringArrayClass), length); - } + mirror::ObjectArray* AllocStringArray(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* AllocAbstractMethodArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ObjectArray::Alloc(self, - GetClassRoot(kJavaLangReflectAbstractMethodArrayClass), length); - } + mirror::ObjectArray* AllocAbstractMethodArray(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* AllocMethodArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ObjectArray::Alloc(self, - GetClassRoot(kJavaLangReflectMethodArrayClass), length); - } + mirror::ObjectArray* AllocMethodArray(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - IfTable* AllocIfTable(Thread* self, size_t ifcount) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return down_cast( - IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), ifcount * IfTable::kMax)); - } + mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* AllocFieldArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ObjectArray::Alloc(self, GetClassRoot(kJavaLangReflectFieldArrayClass), length); - } + mirror::ObjectArray* AllocFieldArray(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* AllocStackTraceElementArray(Thread* self, size_t length) + mirror::ObjectArray* AllocStackTraceElementArray(Thread* self, + size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, - Class::Status& oat_file_class_status) + void VerifyClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass, + mirror::Class::Status& oat_file_class_status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass) + void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, AbstractMethod* klass) + void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::AbstractMethod* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* CreateProxyClass(String* name, ObjectArray* interfaces, ClassLoader* loader, - ObjectArray* methods, ObjectArray >* throws) + mirror::Class* CreateProxyClass(mirror::String* name, mirror::ObjectArray* interfaces, + mirror::ClassLoader* loader, + mirror::ObjectArray* methods, + mirror::ObjectArray >* throws) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - std::string GetDescriptorForProxy(const Class* proxy_class) + std::string GetDescriptorForProxy(const mirror::Class* proxy_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* FindMethodForProxy(const Class* proxy_class, const AbstractMethod* proxy_method) + mirror::AbstractMethod* FindMethodForProxy(const mirror::Class* proxy_class, + const mirror::AbstractMethod* proxy_method) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized - const void* GetOatCodeFor(const AbstractMethod* method) + const void* GetOatCodeFor(const mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method from a method index. @@ -389,7 +334,7 @@ class ClassLinker { private: explicit ClassLinker(InternTable*); - const OatFile::OatMethod GetOatMethodFor(const AbstractMethod* method) + const OatFile::OatMethod GetOatMethodFor(const mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker by bootstraping from dex files @@ -401,43 +346,41 @@ class ClassLinker { OatFile* OpenOat(const ImageSpace* space) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void InitFromImageCallback(Object* obj, void* arg) + static void InitFromImageCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For early bootstrapping by Init - Class* AllocClass(Thread* self, Class* java_lang_Class, size_t class_size) + mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Alloc* convenience functions to avoid needing to pass in Class* + // Alloc* convenience functions to avoid needing to pass in mirror::Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. - Class* AllocClass(Thread* self, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - DexCache* AllocDexCache(Thread* self, const DexFile& dex_file) + mirror::Class* AllocClass(Thread* self, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Field* AllocField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Method* AllocMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Constructor* AllocConstructor(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Field* AllocField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Method* AllocMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Constructor* AllocConstructor(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* CreatePrimitiveClass(Thread* self, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return InitializePrimitiveClass(AllocClass(self, sizeof(Class)), type); - } - Class* InitializePrimitiveClass(Class* primitive_class, Primitive::Type type) + mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) + mirror::Class* CreateArrayClass(const std::string& descriptor, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache) + void AppendToBootClassPath(const DexFile& dex_file, SirtRef& dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - Class* c, SafeMap& field_map) + mirror::Class* c, SafeMap& field_map) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t SizeOfClass(const DexFile& dex_file, @@ -445,18 +388,20 @@ class ClassLinker { void LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - SirtRef& klass, - ClassLoader* class_loader) + SirtRef& klass, + mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef& klass, - SirtRef& dst) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, + SirtRef& klass, SirtRef& dst) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* LoadMethod(Thread* self, const DexFile& dex_file, - const ClassDataItemIterator& dex_method, - SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::AbstractMethod* LoadMethod(Thread* self, const DexFile& dex_file, + const ClassDataItemIterator& dex_method, + SirtRef& klass) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupStaticTrampolines(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor); @@ -464,74 +409,75 @@ class ClassLinker { // Attempts to insert a class into a class table. Returns NULL if // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. - Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class) + mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) + void RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); - bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics) + bool InitializeClass(mirror::Class* klass, bool can_run_clinit, bool can_init_statics) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock); - bool ValidateSuperClassDescriptors(const Class* klass) + bool WaitForInitializeClass(mirror::Class* klass, Thread* self, ObjectLock& lock); + bool ValidateSuperClassDescriptors(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields) + bool InitializeSuperClass(mirror::Class* klass, bool can_run_clinit, bool can_init_fields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize static fields, returns true if fields were initialized. - bool InitializeStaticFields(Class* klass) + bool InitializeStaticFields(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameDescriptorInDifferentClassContexts(const char* descriptor, - const Class* klass1, - const Class* klass2) + const mirror::Class* klass1, + const mirror::Class* klass2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsSameMethodSignatureInDifferentClassContexts(const AbstractMethod* descriptor, - const Class* klass1, - const Class* klass2) + bool IsSameMethodSignatureInDifferentClassContexts(const mirror::AbstractMethod* method, + const mirror::Class* klass1, + const mirror::Class* klass2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkClass(SirtRef& klass, ObjectArray* interfaces) + bool LinkClass(SirtRef& klass, mirror::ObjectArray* interfaces) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkSuperClass(SirtRef& klass) + bool LinkSuperClass(SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file) + bool LoadSuperAndInterfaces(SirtRef& klass, const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkMethods(SirtRef& klass, ObjectArray* interfaces) + bool LinkMethods(SirtRef& klass, mirror::ObjectArray* interfaces) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkVirtualMethods(SirtRef& klass) + bool LinkVirtualMethods(SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkInterfaceMethods(SirtRef& klass, ObjectArray* interfaces) + bool LinkInterfaceMethods(SirtRef& klass, + mirror::ObjectArray* interfaces) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkStaticFields(SirtRef& klass) + bool LinkStaticFields(SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkInstanceFields(SirtRef& klass) + bool LinkInstanceFields(SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkFields(SirtRef& klass, bool is_static) + bool LinkFields(SirtRef& klass, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CreateReferenceInstanceOffsets(SirtRef& klass) + void CreateReferenceInstanceOffsets(SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CreateReferenceStaticOffsets(SirtRef& klass) + void CreateReferenceStaticOffsets(SirtRef& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CreateReferenceOffsets(SirtRef& klass, bool is_static, + void CreateReferenceOffsets(SirtRef& klass, bool is_static, uint32_t reference_offsets) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots - const std::vector& GetDexCaches() { + const std::vector& GetDexCaches() { return dex_caches_; } @@ -547,28 +493,29 @@ class ClassLinker { EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* CreateProxyConstructor(Thread* self, SirtRef& klass, Class* proxy_class) + mirror::AbstractMethod* CreateProxyConstructor(Thread* self, SirtRef& klass, + mirror::Class* proxy_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* CreateProxyMethod(Thread* self, SirtRef& klass, - SirtRef& prototype) + mirror::AbstractMethod* CreateProxyMethod(Thread* self, SirtRef& klass, + SirtRef& prototype) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::vector boot_class_path_; mutable Mutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - std::vector dex_caches_ GUARDED_BY(dex_lock_); + std::vector dex_caches_ GUARDED_BY(dex_lock_); std::vector oat_files_ GUARDED_BY(dex_lock_); // multimap from a string hash code of a class descriptor to - // Class* instances. Results should be compared for a matching + // mirror::Class* instances. Results should be compared for a matching // Class::descriptor_ and Class::class_loader_. - typedef std::multimap Table; - Table image_classes_ GUARDED_BY(Locks::classlinker_classes_lock_); + typedef std::multimap Table; + Table image_classes_ GUARDED_BY(Locks::classlinker_classes_lock_); Table classes_ GUARDED_BY(Locks::classlinker_classes_lock_); - Class* LookupClassLocked(const char* descriptor, const ClassLoader* class_loader, - size_t hash, const Table& classes) + mirror::Class* LookupClassLocked(const char* descriptor, const mirror::ClassLoader* class_loader, + size_t hash, const Table& classes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::classlinker_classes_lock_); @@ -615,20 +562,14 @@ class ClassLinker { kJavaLangStackTraceElementArrayClass, kClassRootsMax, }; - ObjectArray* class_roots_; + mirror::ObjectArray* class_roots_; - Class* GetClassRoot(ClassRoot class_root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(class_roots_ != NULL); - Class* klass = class_roots_->Get(class_root); - DCHECK(klass != NULL); - return klass; - } + mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetClassRoot(ClassRoot class_root, Class* klass) + void SetClassRoot(ClassRoot class_root, mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* GetClassRoots() { + mirror::ObjectArray* GetClassRoots() { DCHECK(class_roots_ != NULL); return class_roots_; } @@ -641,7 +582,7 @@ class ClassLinker { return descriptor; } - IfTable* array_iftable_; + mirror::IfTable* array_iftable_; bool init_done_; bool is_dirty_; @@ -652,7 +593,7 @@ class ClassLinker { friend class ImageWriter; // for GetClassRoots friend class ObjectTest; FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors); - FRIEND_TEST(DexCacheTest, Open); + FRIEND_TEST(mirror::DexCacheTest, Open); FRIEND_TEST(ExceptionTest, FindExceptionHandler); FRIEND_TEST(ObjectTest, AllocObjectArray); DISALLOW_COPY_AND_ASSIGN(ClassLinker); diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc index d32e91e98a..893e7a4a6b 100644 --- a/src/class_linker_test.cc +++ b/src/class_linker_test.cc @@ -19,13 +19,24 @@ #include #include "UniquePtr.h" +#include "class_linker-inl.h" #include "common_test.h" -#include "dex_cache.h" #include "dex_file.h" #include "heap.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" +#include "mirror/stack_trace_element.h" #include "runtime_support.h" #include "sirt_ref.h" +using namespace art::mirror; + namespace art { class ClassLinkerTest : public CommonTest { @@ -600,10 +611,10 @@ struct FieldClassOffsets : public CheckOffsets { }; }; -struct MethodClassOffsets : public CheckOffsets { - MethodClassOffsets() : CheckOffsets(true, "Ljava/lang/reflect/Method;") { +struct MethodClassOffsets : public CheckOffsets { + MethodClassOffsets() : CheckOffsets(true, "Ljava/lang/reflect/Method;") { // alphabetical references - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(MethodClass, ORDER_BY_SIGNATURE_), "ORDER_BY_SIGNATURE")); + offsets.push_back(CheckOffset(OFFSETOF_MEMBER(AbstractMethodClass, ORDER_BY_SIGNATURE_), "ORDER_BY_SIGNATURE")); }; }; diff --git a/src/class_loader.h b/src/class_loader.h deleted file mode 100644 index 029c4a20ca..0000000000 --- a/src/class_loader.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_SRC_CLASS_LOADER_H_ -#define ART_SRC_CLASS_LOADER_H_ - -#include - -#include "dex_file.h" -#include "object.h" - -namespace art { - -// C++ mirror of java.lang.ClassLoader -class MANAGED ClassLoader : public Object { - private: - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - Object* packages_; - ClassLoader* parent_; - Object* proxyCache_; - - friend struct ClassLoaderOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader); -}; - -} // namespace art - -#endif // ART_SRC_CLASS_LOADER_H_ diff --git a/src/common_test.h b/src/common_test.h index 5a5479a05d..46a8309c2a 100644 --- a/src/common_test.h +++ b/src/common_test.h @@ -26,12 +26,12 @@ #include "base/stringprintf.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" -#include "class_loader.h" #include "compiler.h" #include "dex_file.h" #include "gtest/gtest.h" #include "heap.h" #include "instruction_set.h" +#include "mirror/class_loader.h" #include "oat_file.h" #include "object_utils.h" #include "os.h" @@ -171,7 +171,7 @@ class ScratchFile { class CommonTest : public testing::Test { public: - static void MakeExecutable(const ByteArray* code_array) { + static void MakeExecutable(const mirror::ByteArray* code_array) { CHECK(code_array != NULL); MakeExecutable(code_array->GetData(), code_array->GetLength()); } @@ -189,7 +189,7 @@ class CommonTest : public testing::Test { const uint32_t* mapping_table, const uint16_t* vmap_table, const uint8_t* gc_map, - const AbstractMethod::InvokeStub* invoke_stub) { + const mirror::AbstractMethod::InvokeStub* invoke_stub) { return OatFile::OatMethod(NULL, reinterpret_cast(code), frame_size_in_bytes, @@ -205,7 +205,7 @@ class CommonTest : public testing::Test { ); } - void MakeExecutable(AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void MakeExecutable(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); MethodHelper mh(method); @@ -215,8 +215,8 @@ class CommonTest : public testing::Test { const std::vector& invoke_stub = compiled_invoke_stub->GetCode(); MakeExecutable(invoke_stub); - const AbstractMethod::InvokeStub* method_invoke_stub = - reinterpret_cast( + const mirror::AbstractMethod::InvokeStub* method_invoke_stub = + reinterpret_cast( CompiledCode::CodePointer(&invoke_stub[0], compiled_invoke_stub->GetInstructionSet())); @@ -224,7 +224,7 @@ class CommonTest : public testing::Test { << " invoke_stub=" << reinterpret_cast(method_invoke_stub); if (!method->IsAbstract()) { - const DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); + const mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); const CompiledMethod* compiled_method = compiler_->GetCompiledMethod(Compiler::MethodReference(&dex_file, @@ -473,14 +473,14 @@ class CommonTest : public testing::Test { ScopedLocalRef class_loader_local(soa.Env(), soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader)); jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get()); - soa.Self()->SetClassLoaderOverride(soa.Decode(class_loader_local.get())); + soa.Self()->SetClassLoaderOverride(soa.Decode(class_loader_local.get())); Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path); return class_loader; } - void CompileClass(ClassLoader* class_loader, const char* class_name) { + void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) { std::string class_descriptor(DotToDescriptor(class_name)); - Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); + mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; for (size_t i = 0; i < klass->NumDirectMethods(); i++) { CompileMethod(klass->GetDirectMethod(i)); @@ -490,7 +490,7 @@ class CommonTest : public testing::Test { } } - void CompileMethod(AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void CompileMethod(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); compiler_->CompileOne(method); MakeExecutable(method); @@ -498,29 +498,29 @@ class CommonTest : public testing::Test { MakeExecutable(runtime_->GetJniDlsymLookupStub()); } - void CompileDirectMethod(ClassLoader* class_loader, + void CompileDirectMethod(mirror::ClassLoader* class_loader, const char* class_name, const char* method_name, const char* signature) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); - Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); + mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; - AbstractMethod* method = klass->FindDirectMethod(method_name, signature); + mirror::AbstractMethod* method = klass->FindDirectMethod(method_name, signature); CHECK(method != NULL) << "Direct method not found: " << class_name << "." << method_name << signature; CompileMethod(method); } - void CompileVirtualMethod(ClassLoader* class_loader, + void CompileVirtualMethod(mirror::ClassLoader* class_loader, const char* class_name, const char* method_name, const char* signature) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); - Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); + mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; - AbstractMethod* method = klass->FindVirtualMethod(method_name, signature); + mirror::AbstractMethod* method = klass->FindVirtualMethod(method_name, signature); CHECK(method != NULL) << "Virtual method not found: " << class_name << "." << method_name << signature; CompileMethod(method); diff --git a/src/common_throws.cc b/src/common_throws.cc index cefc4abdf9..734d544bd2 100644 --- a/src/common_throws.cc +++ b/src/common_throws.cc @@ -17,8 +17,12 @@ #include "common_throws.h" #include "base/logging.h" +#include "class_linker-inl.h" #include "dex_instruction.h" #include "invoke_type.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "thread.h" @@ -26,7 +30,7 @@ namespace art { -static void AddReferrerLocation(std::ostream& os, const AbstractMethod* referrer) +static void AddReferrerLocation(std::ostream& os, const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { ClassHelper kh(referrer->GetDeclaringClass()); @@ -37,7 +41,7 @@ static void AddReferrerLocation(std::ostream& os, const AbstractMethod* referrer } } -static void AddReferrerLocationFromClass(std::ostream& os, Class* referrer) +static void AddReferrerLocationFromClass(std::ostream& os, mirror::Class* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { ClassHelper kh(referrer); @@ -51,16 +55,16 @@ static void AddReferrerLocationFromClass(std::ostream& os, Class* referrer) // NullPointerException -void ThrowNullPointerExceptionForFieldAccess(Field* field, bool is_read) { +void ThrowNullPointerExceptionForFieldAccess(mirror::Field* field, bool is_read) { std::ostringstream msg; msg << "Attempt to " << (is_read ? "read from" : "write to") << " field '" << PrettyField(field, true) << "' on a null object reference"; Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str()); } -void ThrowNullPointerExceptionForMethodAccess(AbstractMethod* caller, uint32_t method_idx, +void ThrowNullPointerExceptionForMethodAccess(mirror::AbstractMethod* caller, uint32_t method_idx, InvokeType type) { - DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache(); + mirror::DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); std::ostringstream msg; msg << "Attempt to invoke " << type << " method '" @@ -68,7 +72,7 @@ void ThrowNullPointerExceptionForMethodAccess(AbstractMethod* caller, uint32_t m Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str()); } -void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t dex_pc) { +void ThrowNullPointerExceptionFromDexPC(mirror::AbstractMethod* throw_method, uint32_t dex_pc) { const DexFile::CodeItem* code = MethodHelper(throw_method).GetCodeItem(); CHECK_LT(dex_pc, code->insns_size_in_code_units_); const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); @@ -93,7 +97,7 @@ void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t d case Instruction::IGET_BYTE: case Instruction::IGET_CHAR: case Instruction::IGET_SHORT: { - Field* field = + mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false); ThrowNullPointerExceptionForFieldAccess(field, true /* read */); break; @@ -105,7 +109,7 @@ void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t d case Instruction::IPUT_BYTE: case Instruction::IPUT_CHAR: case Instruction::IPUT_SHORT: { - Field* field = + mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false); ThrowNullPointerExceptionForFieldAccess(field, false /* write */); break; @@ -149,7 +153,7 @@ void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t d // IllegalAccessError -void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) { +void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) { std::ostringstream msg; msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '" << PrettyDescriptor(accessed) << "'"; @@ -157,9 +161,9 @@ void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) { Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); } -void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* accessed, - const AbstractMethod* caller, - const AbstractMethod* called, +void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, + const mirror::AbstractMethod* caller, + const mirror::AbstractMethod* called, InvokeType type) { std::ostringstream msg; msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '" @@ -169,7 +173,7 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* acces Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); } -void ThrowIllegalAccessErrorMethod(Class* referrer, AbstractMethod* accessed) { +void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::AbstractMethod* accessed) { std::ostringstream msg; msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '" << PrettyDescriptor(referrer) << "'"; @@ -177,7 +181,7 @@ void ThrowIllegalAccessErrorMethod(Class* referrer, AbstractMethod* accessed) { Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); } -void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed) { +void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::Field* accessed) { std::ostringstream msg; msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '" << PrettyDescriptor(referrer) << "'"; @@ -185,7 +189,8 @@ void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed) { Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); } -void ThrowIllegalAccessErrorFinalField(const AbstractMethod* referrer, Field* accessed) { +void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, + mirror::Field* accessed) { std::ostringstream msg; msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '" << PrettyMethod(referrer) << "'"; @@ -196,7 +201,8 @@ void ThrowIllegalAccessErrorFinalField(const AbstractMethod* referrer, Field* ac // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, - AbstractMethod* method, const AbstractMethod* referrer) { + mirror::AbstractMethod* method, + const mirror::AbstractMethod* referrer) { std::ostringstream msg; msg << "The method '" << PrettyMethod(method) << "' was expected to be of type " << expected_type << " but instead was found to be of type " << found_type; @@ -205,9 +211,9 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun msg.str().c_str()); } -void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const AbstractMethod* interface_method, - Object* this_object, - const AbstractMethod* referrer) { +void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::AbstractMethod* interface_method, + mirror::Object* this_object, + const mirror::AbstractMethod* referrer) { // Referrer is calling interface_method on this_object, however, the interface_method isn't // implemented by this_object. CHECK(this_object != NULL); @@ -221,8 +227,8 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const AbstractMe msg.str().c_str()); } -void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is_static, - const AbstractMethod* referrer) { +void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, bool is_static, + const mirror::AbstractMethod* referrer) { std::ostringstream msg; msg << "Expected '" << PrettyField(resolved_field) << "' to be a " << (is_static ? "static" : "instance") << " field" << " rather than a " @@ -234,8 +240,8 @@ void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is // NoSuchMethodError -void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name, - const StringPiece& signature, const AbstractMethod* referrer) { +void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name, + const StringPiece& signature, const mirror::AbstractMethod* referrer) { std::ostringstream msg; ClassHelper kh(c); msg << "No " << type << " method " << name << signature @@ -244,8 +250,8 @@ void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name, Thread::Current()->ThrowNewException("Ljava/lang/NoSuchMethodError;", msg.str().c_str()); } -void ThrowNoSuchMethodError(uint32_t method_idx, const AbstractMethod* referrer) { - DexCache* dex_cache = referrer->GetDeclaringClass()->GetDexCache(); +void ThrowNoSuchMethodError(uint32_t method_idx, const mirror::AbstractMethod* referrer) { + mirror::DexCache* dex_cache = referrer->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); std::ostringstream msg; msg << "No method '" << PrettyMethod(method_idx, dex_file, true) << "'"; diff --git a/src/common_throws.h b/src/common_throws.h index 33769c49b8..9e28bd72e3 100644 --- a/src/common_throws.h +++ b/src/common_throws.h @@ -18,62 +18,73 @@ #define ART_SRC_COMMON_THROWS_H_ #include "base/mutex.h" -#include "object.h" +#include "invoke_type.h" namespace art { +namespace mirror { +class AbstractMethod; +class Class; +class Field; +class Object; +} // namespace mirror +class StringPiece; // NullPointerException -void ThrowNullPointerExceptionForFieldAccess(Field* field, bool is_read) +void ThrowNullPointerExceptionForFieldAccess(mirror::Field* field, bool is_read) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowNullPointerExceptionForMethodAccess(AbstractMethod* caller, uint32_t method_idx, InvokeType type) +void ThrowNullPointerExceptionForMethodAccess(mirror::AbstractMethod* caller, uint32_t method_idx, + InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t dex_pc) +void ThrowNullPointerExceptionFromDexPC(mirror::AbstractMethod* throw_method, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IllegalAccessError -void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) +void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* accessed, - const AbstractMethod* caller, const AbstractMethod* called, +void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, + const mirror::AbstractMethod* caller, + const mirror::AbstractMethod* called, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowIllegalAccessErrorMethod(Class* referrer, AbstractMethod* accessed) +void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::AbstractMethod* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed) +void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::Field* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowIllegalAccessErrorFinalField(const AbstractMethod* referrer, Field* accessed) +void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, + mirror::Field* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, - AbstractMethod* method, const AbstractMethod* referrer) + mirror::AbstractMethod* method, + const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const AbstractMethod* interface_method, - Object* this_object, - const AbstractMethod* referrer) +void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::AbstractMethod* interface_method, + mirror::Object* this_object, + const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is_static, - const AbstractMethod* referrer) +void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, bool is_static, + const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // NoSuchMethodError -void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name, - const StringPiece& signature, const AbstractMethod* referrer) +void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name, + const StringPiece& signature, const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowNoSuchMethodError(uint32_t method_idx, const AbstractMethod* referrer) +void ThrowNoSuchMethodError(uint32_t method_idx, const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/compiled_class.h b/src/compiled_class.h index 97dd8ee8d1..f050ee6a7e 100644 --- a/src/compiled_class.h +++ b/src/compiled_class.h @@ -17,19 +17,19 @@ #ifndef ART_SRC_COMPILED_CLASS_H_ #define ART_SRC_COMPILED_CLASS_H_ -#include "object.h" +#include "mirror/class.h" namespace art { class CompiledClass { public: - explicit CompiledClass(Class::Status status) : status_(status) {} + explicit CompiledClass(mirror::Class::Status status) : status_(status) {} ~CompiledClass() {} - Class::Status GetStatus() const { + mirror::Class::Status GetStatus() const { return status_; } private: - const Class::Status status_; + const mirror::Class::Status status_; }; } // namespace art diff --git a/src/compiler.cc b/src/compiler.cc index 18460ceda2..903b70a4ad 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -23,15 +23,22 @@ #include "base/stl_util.h" #include "class_linker.h" -#include "class_loader.h" -#include "dex_cache.h" #include "jni_internal.h" #include "oat_compilation_unit.h" #include "oat_file.h" #include "oat/runtime/stub.h" #include "object_utils.h" #include "runtime.h" +#include "gc/card_table-inl.h" #include "gc/space.h" +#include "mirror/class_loader.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/throwable.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "thread.h" @@ -431,7 +438,7 @@ CompilerTls* Compiler::GetTls() { return res; } -ByteArray* Compiler::CreateResolutionStub(InstructionSet instruction_set, +mirror::ByteArray* Compiler::CreateResolutionStub(InstructionSet instruction_set, Runtime::TrampolineType type) { switch (instruction_set) { case kArm: @@ -447,7 +454,7 @@ ByteArray* Compiler::CreateResolutionStub(InstructionSet instruction_set, } } -ByteArray* Compiler::CreateJniDlsymLookupStub(InstructionSet instruction_set) { +mirror::ByteArray* Compiler::CreateJniDlsymLookupStub(InstructionSet instruction_set) { switch (instruction_set) { case kArm: case kThumb2: @@ -462,7 +469,7 @@ ByteArray* Compiler::CreateJniDlsymLookupStub(InstructionSet instruction_set) { } } -ByteArray* Compiler::CreateAbstractMethodErrorStub(InstructionSet instruction_set) { +mirror::ByteArray* Compiler::CreateAbstractMethodErrorStub(InstructionSet instruction_set) { switch (instruction_set) { case kArm: case kThumb2: @@ -497,7 +504,7 @@ void Compiler::CompileAll(jobject class_loader, } } -void Compiler::CompileOne(const AbstractMethod* method) { +void Compiler::CompileOne(const mirror::AbstractMethod* method) { DCHECK(!Runtime::Current()->IsStarted()); Thread* self = Thread::Current(); jobject class_loader; @@ -566,12 +573,12 @@ void Compiler::RecordClassStatus(ClassReference ref, CompiledClass* compiled_cla bool Compiler::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) { ScopedObjectAccess soa(Thread::Current()); - DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); if (!IsImage()) { stats_->TypeNotInDexCache(); return false; } - Class* resolved_class = dex_cache->GetResolvedType(type_idx); + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == NULL) { stats_->TypeNotInDexCache(); return false; @@ -593,7 +600,7 @@ bool Compiler::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, if (IsImage()) { // We resolve all const-string strings when building for the image. ScopedObjectAccess soa(Thread::Current()); - DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache); result = true; } @@ -608,15 +615,15 @@ bool Compiler::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, bool Compiler::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) { ScopedObjectAccess soa(Thread::Current()); - DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); // Get type from dex cache assuming it was populated by the verifier - Class* resolved_class = dex_cache->GetResolvedType(type_idx); + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == NULL) { stats_->TypeNeedsAccessCheck(); return false; // Unknown class needs access checks. } const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx); - Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); if (referrer_class == NULL) { stats_->TypeNeedsAccessCheck(); return false; // Incomplete referrer knowledge needs access check. @@ -636,15 +643,15 @@ bool Compiler::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) { ScopedObjectAccess soa(Thread::Current()); - DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); // Get type from dex cache assuming it was populated by the verifier. - Class* resolved_class = dex_cache->GetResolvedType(type_idx); + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == NULL) { stats_->TypeNeedsAccessCheck(); return false; // Unknown class needs access checks. } const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx); - Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); if (referrer_class == NULL) { stats_->TypeNeedsAccessCheck(); return false; // Incomplete referrer knowledge needs access check. @@ -660,33 +667,33 @@ bool Compiler::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, return result; } -static Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, - OatCompilationUnit* mUnit) +static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, + OatCompilationUnit* mUnit) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); - ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); + mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + mirror::ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); const DexFile::MethodId& referrer_method_id = mUnit->dex_file_->GetMethodId(mUnit->method_idx_); return mUnit->class_linker_->ResolveType(*mUnit->dex_file_, referrer_method_id.class_idx_, dex_cache, class_loader); } -static Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa, - OatCompilationUnit* mUnit, - uint32_t field_idx) +static mirror::Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa, + OatCompilationUnit* mUnit, + uint32_t field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); - ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); + mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + mirror::ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); return mUnit->class_linker_->ResolveField(*mUnit->dex_file_, field_idx, dex_cache, class_loader, false); } -static AbstractMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa, - OatCompilationUnit* mUnit, - uint32_t method_idx, - InvokeType type) +static mirror::AbstractMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa, + OatCompilationUnit* mUnit, + uint32_t method_idx, + InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); - ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); + mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + mirror::ClassLoader* class_loader = soa.Decode(mUnit->class_loader_); return mUnit->class_linker_->ResolveMethod(*mUnit->dex_file_, method_idx, dex_cache, class_loader, NULL, type); } @@ -698,11 +705,11 @@ bool Compiler::ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* field_offset = -1; is_volatile = true; // Try to resolve field and ignore if an Incompatible Class Change Error (ie is static). - Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx); + mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx); if (resolved_field != NULL && !resolved_field->IsStatic()) { - Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit); + mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit); if (referrer_class != NULL) { - Class* fields_class = resolved_field->GetDeclaringClass(); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); bool access_ok = referrer_class->CanAccess(fields_class) && referrer_class->CanAccessMember(fields_class, resolved_field->GetAccessFlags()); @@ -711,7 +718,7 @@ bool Compiler::ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* // protected field being made public by a sub-class. Resort to the dex file to determine // the correct class for the access check. const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile(); - Class* dex_fields_class = mUnit->class_linker_->ResolveType(dex_file, + mirror::Class* dex_fields_class = mUnit->class_linker_->ResolveType(dex_file, dex_file.GetFieldId(field_idx).class_idx_, referrer_class); access_ok = referrer_class->CanAccess(dex_fields_class) && @@ -746,11 +753,11 @@ bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mU is_referrers_class = false; is_volatile = true; // Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static). - Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx); + mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx); if (resolved_field != NULL && resolved_field->IsStatic()) { - Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit); + mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit); if (referrer_class != NULL) { - Class* fields_class = resolved_field->GetDeclaringClass(); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); if (fields_class == referrer_class) { is_referrers_class = true; // implies no worrying about class initialization field_offset = resolved_field->GetOffset().Int32Value(); @@ -767,7 +774,7 @@ bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mU // the correct class for the access check. Don't change the field's class as that is // used to identify the SSB. const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile(); - Class* dex_fields_class = + mirror::Class* dex_fields_class = mUnit->class_linker_->ResolveType(dex_file, dex_file.GetFieldId(field_idx).class_idx_, referrer_class); @@ -781,7 +788,7 @@ bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mU // in its static storage base (which may fail if it doesn't have a slot for it) // TODO: for images we can elide the static storage base null check // if we know there's a non-null entry in the image - DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); + mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); if (fields_class->GetDexCache() == dex_cache) { // common case where the dex cache of both the referrer and the field are the same, // no need to search the dex file @@ -820,7 +827,8 @@ bool Compiler::ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mU return false; // Incomplete knowledge needs slow path. } -void Compiler::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, AbstractMethod* method, +void Compiler::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, + mirror::AbstractMethod* method, uintptr_t& direct_code, uintptr_t& direct_method) { // For direct and static methods compute possible direct_code and direct_method values, ie // an address for the Method* being invoked and an address of the code for that Method*. @@ -876,15 +884,15 @@ bool Compiler::ComputeInvokeInfo(uint32_t method_idx, OatCompilationUnit* mUnit, vtable_idx = -1; direct_code = 0; direct_method = 0; - AbstractMethod* resolved_method = + mirror::AbstractMethod* resolved_method = ComputeMethodReferencedFromCompilingMethod(soa, mUnit, method_idx, type); if (resolved_method != NULL) { // Don't try to fast-path if we don't understand the caller's class or this appears to be an // Incompatible Class Change Error. - Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit); + mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit); bool icce = resolved_method->CheckIncompatibleClassChange(type); if (referrer_class != NULL && !icce) { - Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); if (!referrer_class->CanAccess(methods_class) || !referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) { @@ -1068,7 +1076,7 @@ class CompilationContext { // classes found in the boot classpath. Since at runtime we will // select the class from the boot classpath, do not attempt to resolve // or compile it now. -static bool SkipClass(ClassLoader* class_loader, +static bool SkipClass(mirror::ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -1077,7 +1085,7 @@ static bool SkipClass(ClassLoader* class_loader, } const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* klass = class_linker->FindClass(descriptor, NULL); + mirror::Class* klass = class_linker->FindClass(descriptor, NULL); if (klass == NULL) { Thread* self = Thread::Current(); CHECK(self->IsExceptionPending()); @@ -1090,7 +1098,7 @@ static bool SkipClass(ClassLoader* class_loader, static void ResolveClassFieldsAndMethods(const CompilationContext* context, size_t class_def_index) LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(Thread::Current()); - ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); + mirror::ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); const DexFile& dex_file = *context->GetDexFile(); // Method and Field are the worst. We can't resolve without either @@ -1115,11 +1123,11 @@ static void ResolveClassFieldsAndMethods(const CompilationContext* context, size } Thread* self = Thread::Current(); ClassLinker* class_linker = context->GetClassLinker(); - DexCache* dex_cache = class_linker->FindDexCache(dex_file); + mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file); ClassDataItemIterator it(dex_file, class_data); while (it.HasNextStaticField()) { - Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, - class_loader, true); + mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, + class_loader, true); if (field == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1134,8 +1142,8 @@ static void ResolveClassFieldsAndMethods(const CompilationContext* context, size requires_constructor_barrier = true; } - Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, - class_loader, false); + mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, + class_loader, false); if (field == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1147,9 +1155,9 @@ static void ResolveClassFieldsAndMethods(const CompilationContext* context, size class_def_index); } while (it.HasNextDirectMethod()) { - AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), dex_cache, - class_loader, NULL, - it.GetMethodInvokeType(class_def)); + mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, NULL, + it.GetMethodInvokeType(class_def)); if (method == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1157,9 +1165,9 @@ static void ResolveClassFieldsAndMethods(const CompilationContext* context, size it.Next(); } while (it.HasNextVirtualMethod()) { - AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), dex_cache, - class_loader, NULL, - it.GetMethodInvokeType(class_def)); + mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, NULL, + it.GetMethodInvokeType(class_def)); if (method == NULL) { CHECK(self->IsExceptionPending()); self->ClearException(); @@ -1175,9 +1183,9 @@ static void ResolveType(const CompilationContext* context, size_t type_idx) ScopedObjectAccess soa(Thread::Current()); ClassLinker* class_linker = context->GetClassLinker(); const DexFile& dex_file = *context->GetDexFile(); - DexCache* dex_cache = class_linker->FindDexCache(dex_file); - ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); - Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file); + mirror::ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); + mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); if (klass == NULL) { CHECK(soa.Self()->IsExceptionPending()); @@ -1214,9 +1222,9 @@ static void VerifyClass(const CompilationContext* context, size_t class_def_inde ScopedObjectAccess soa(Thread::Current()); const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def); - Class* klass = + mirror::Class* klass = context->GetClassLinker()->FindClass(descriptor, - soa.Decode(context->GetClassLoader())); + soa.Decode(context->GetClassLoader())); if (klass == NULL) { Thread* self = Thread::Current(); CHECK(self->IsExceptionPending()); @@ -1227,11 +1235,11 @@ static void VerifyClass(const CompilationContext* context, size_t class_def_inde * This is to ensure the class is structurally sound for compilation. An unsound class * will be rejected by the verifier and later skipped during compilation in the compiler. */ - DexCache* dex_cache = context->GetClassLinker()->FindDexCache(*context->GetDexFile()); + mirror::DexCache* dex_cache = context->GetClassLinker()->FindDexCache(*context->GetDexFile()); std::string error_msg; if (verifier::MethodVerifier::VerifyClass(context->GetDexFile(), dex_cache, - soa.Decode(context->GetClassLoader()), + soa.Decode(context->GetClassLoader()), class_def_index, error_msg) == verifier::MethodVerifier::kHardFailure) { const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); @@ -1446,9 +1454,9 @@ static void InitializeClass(const CompilationContext* context, size_t class_def_ LOCKS_EXCLUDED(Locks::mutator_lock_) { const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); ScopedObjectAccess soa(Thread::Current()); - ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); + mirror::ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def); - Class* klass = context->GetClassLinker()->FindClass(descriptor, class_loader); + mirror::Class* klass = context->GetClassLinker()->FindClass(descriptor, class_loader); Thread* self = Thread::Current(); bool compiling_boot = Runtime::Current()->GetHeap()->GetSpaces().size() == 1; bool can_init_static_fields = compiling_boot && @@ -1480,10 +1488,10 @@ static void InitializeClass(const CompilationContext* context, size_t class_def_ LOG(INFO) << "Initializing: " << descriptor; if (StringPiece(descriptor) == "Ljava/lang/Void;"){ // Hand initialize j.l.Void to avoid Dex file operations in un-started runtime. - ObjectArray* fields = klass->GetSFields(); + mirror::ObjectArray* fields = klass->GetSFields(); CHECK_EQ(fields->GetLength(), 1); fields->Get(0)->SetObj(klass, context->GetClassLinker()->FindPrimitiveClass('V')); - klass->SetStatus(Class::kStatusInitialized); + klass->SetStatus(mirror::Class::kStatusInitialized); } else { context->GetClassLinker()->EnsureInitialized(klass, true, can_init_static_fields); } @@ -1497,7 +1505,7 @@ static void InitializeClass(const CompilationContext* context, size_t class_def_ } } // Record the final class status if necessary. - Class::Status status = klass->GetStatus(); + mirror::Class::Status status = klass->GetStatus(); Compiler::ClassReference ref(context->GetDexFile(), class_def_index); CompiledClass* compiled_class = context->GetCompiler()->GetCompiledClass(ref); if (compiled_class == NULL) { @@ -1550,7 +1558,7 @@ void Compiler::CompileClass(const CompilationContext* context, size_t class_def_ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); { ScopedObjectAccess soa(Thread::Current()); - ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); + mirror::ClassLoader* class_loader = soa.Decode(context->GetClassLoader()); if (SkipClass(class_loader, dex_file, class_def)) { return; } diff --git a/src/compiler.h b/src/compiler.h index 13130d7bd6..0f99f4d2e0 100644 --- a/src/compiler.h +++ b/src/compiler.h @@ -24,12 +24,10 @@ #include "base/mutex.h" #include "compiled_class.h" #include "compiled_method.h" -#include "dex_cache.h" #include "dex_file.h" #include "instruction_set.h" #include "invoke_type.h" #include "oat_file.h" -#include "object.h" #include "runtime.h" #include "safe_map.h" #include "thread_pool.h" @@ -79,7 +77,7 @@ class Compiler { LOCKS_EXCLUDED(Locks::mutator_lock_); // Compile a single Method - void CompileOne(const AbstractMethod* method) + void CompileOne(const mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDebuggingSupported() { @@ -101,16 +99,16 @@ class Compiler { CompilerTls* GetTls(); // Stub to throw AbstractMethodError - static ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set) + static mirror::ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate the trampoline that's invoked by unresolved direct methods - static ByteArray* CreateResolutionStub(InstructionSet instruction_set, - Runtime::TrampolineType type) + static mirror::ByteArray* CreateResolutionStub(InstructionSet instruction_set, + Runtime::TrampolineType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set) + static mirror::ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile @@ -273,7 +271,8 @@ class Compiler { private: // Compute constant code and method pointers when possible - void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, AbstractMethod* method, + void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, + mirror::AbstractMethod* method, uintptr_t& direct_code, uintptr_t& direct_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -397,11 +396,11 @@ class Compiler { CompilerEnableAutoElfLoadingFn compiler_enable_auto_elf_loading_; typedef const void* (*CompilerGetMethodCodeAddrFn) - (const Compiler& compiler, const CompiledMethod* cm, const AbstractMethod* method); + (const Compiler& compiler, const CompiledMethod* cm, const mirror::AbstractMethod* method); CompilerGetMethodCodeAddrFn compiler_get_method_code_addr_; - typedef const AbstractMethod::InvokeStub* (*CompilerGetMethodInvokeStubAddrFn) - (const Compiler& compiler, const CompiledInvokeStub* cm, const AbstractMethod* method); + typedef const mirror::AbstractMethod::InvokeStub* (*CompilerGetMethodInvokeStubAddrFn) + (const Compiler& compiler, const CompiledInvokeStub* cm, const mirror::AbstractMethod* method); CompilerGetMethodInvokeStubAddrFn compiler_get_method_invoke_stub_addr_; diff --git a/src/compiler/codegen/arm/call_arm.cc b/src/compiler/codegen/arm/call_arm.cc index cb3af5eb22..47306f402c 100644 --- a/src/compiler/codegen/arm/call_arm.cc +++ b/src/compiler/codegen/arm/call_arm.cc @@ -489,7 +489,7 @@ void ArmCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags); LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2); NewLIR3(cu, kThumb2Ldrex, r1, r0, - Object::MonitorOffset().Int32Value() >> 2); // Get object->lock + mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock // Align owner OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT); // Is lock unheld on lock or held by us (==thread_id) on unlock? @@ -498,7 +498,7 @@ void ArmCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation OpRegImm(cu, kOpCmp, r1, 0); OpIT(cu, kCondEq, ""); NewLIR4(cu, kThumb2Strex, r1, r2, r0, - Object::MonitorOffset().Int32Value() >> 2); + mirror::Object::MonitorOffset().Int32Value() >> 2); OpRegImm(cu, kOpCmp, r1, 0); OpIT(cu, kCondNe, "T"); // Go expensive route - artLockObjectFromCode(self, obj); @@ -522,7 +522,7 @@ void ArmCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation LoadValueDirectFixed(cu, rl_src, r0); // Get obj LockCallTemps(cu); // Prepare for explicit register usage GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags); - LoadWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock + LoadWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2); // Is lock unheld on lock or held by us (==thread_id) on unlock? OpRegRegImm(cu, kOpAnd, r3, r1, @@ -532,7 +532,7 @@ void ArmCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1); OpRegReg(cu, kOpSub, r1, r2); OpIT(cu, kCondEq, "EE"); - StoreWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r3); + StoreWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r3); // Go expensive route - UnlockObjectFromCode(obj); LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); ClobberCalleeSave(cu); diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc index e86f379694..6a1178e340 100644 --- a/src/compiler/codegen/arm/int_arm.cc +++ b/src/compiler/codegen/arm/int_arm.cc @@ -565,16 +565,16 @@ void ArmCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, Re RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; RegLocation rl_result; rl_array = LoadValue(cu, rl_array, kCoreReg); rl_index = LoadValue(cu, rl_index, kCoreReg); if (rl_dest.wide) { - data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { - data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } /* null object? */ @@ -637,13 +637,13 @@ void ArmCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, Re RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; if (size == kLong || size == kDouble) { - data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { - data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } rl_array = LoadValue(cu, rl_array, kCoreReg); @@ -706,8 +706,8 @@ void ArmCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, Re void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale) { - int len_offset = Array::LengthOffset().Int32Value(); - int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); + int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); FlushAllRegs(cu); // Use explicit registers LockCallTemps(cu); @@ -727,7 +727,7 @@ void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL); // Get the array's class. - LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class); + LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. diff --git a/src/compiler/codegen/gen_common.cc b/src/compiler/codegen/gen_common.cc index e1054db201..acaad5b26b 100644 --- a/src/compiler/codegen/gen_common.cc +++ b/src/compiler/codegen/gen_common.cc @@ -330,7 +330,7 @@ void Codegen::GenFilledNewArray(CompilationUnit* cu, CallInfo* info) SRegOffset(cu, rl_first.s_reg_low)); // Set up the target pointer OpRegRegImm(cu, kOpAdd, r_dst, TargetReg(kRet0), - Array::DataOffset(component_size).Int32Value()); + mirror::Array::DataOffset(component_size).Int32Value()); // Set up the loop counter (known to be > 0) LoadConstant(cu, r_idx, elems - 1); // Generate the copy loop. Going backwards for convenience @@ -342,14 +342,15 @@ void Codegen::GenFilledNewArray(CompilationUnit* cu, CallInfo* info) OpDecAndBranch(cu, kCondGe, r_idx, target); if (cu->instruction_set == kX86) { // Restore the target pointer - OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst, -Array::DataOffset(component_size).Int32Value()); + OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst, + -mirror::Array::DataOffset(component_size).Int32Value()); } } else if (!info->is_range) { // TUNING: interleave for (int i = 0; i < elems; i++) { RegLocation rl_arg = LoadValue(cu, info->args[i], kCoreReg); StoreBaseDisp(cu, TargetReg(kRet0), - Array::DataOffset(component_size).Int32Value() + + mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.low_reg, kWord); // If the LoadValue caused a temp to be allocated, free it if (IsTemp(cu, rl_arg.low_reg)) { @@ -386,7 +387,7 @@ void Codegen::GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_sr RegLocation rl_method = LoadCurrMethod(cu); rBase = AllocTemp(cu); LoadWordDisp(cu, rl_method.low_reg, - AbstractMethod::DeclaringClassOffset().Int32Value(), rBase); + mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase); if (IsTemp(cu, rl_method.low_reg)) { FreeTemp(cu, rl_method.low_reg); } @@ -404,10 +405,10 @@ void Codegen::GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_sr rBase = TargetReg(kArg0); LockTemp(cu, rBase); LoadWordDisp(cu, r_method, - AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(), + mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(), rBase); LoadWordDisp(cu, rBase, - Array::DataOffset(sizeof(Object*)).Int32Value() + + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + sizeof(int32_t*) * ssb_index, rBase); // rBase now points at appropriate static storage base (Class*) // or NULL if not initialized. Check for NULL and call helper if NULL. @@ -480,7 +481,7 @@ void Codegen::GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_de RegLocation rl_method = LoadCurrMethod(cu); rBase = AllocTemp(cu); LoadWordDisp(cu, rl_method.low_reg, - AbstractMethod::DeclaringClassOffset().Int32Value(), rBase); + mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase); } else { // Medium path, static storage base in a different class which // requires checks that the other class is initialized @@ -495,10 +496,10 @@ void Codegen::GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_de rBase = TargetReg(kArg0); LockTemp(cu, rBase); LoadWordDisp(cu, r_method, - AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(), + mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(), rBase); LoadWordDisp(cu, rBase, - Array::DataOffset(sizeof(Object*)).Int32Value() + + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + sizeof(int32_t*) * ssb_index, rBase); // rBase now points at appropriate static storage base (Class*) // or NULL if not initialized. Check for NULL and call helper if NULL. @@ -619,7 +620,7 @@ void Codegen::HandleThrowLaunchPads(CompilationUnit *cu) OpRegCopy(cu, TargetReg(kArg0), v1); if (target_x86) { // x86 leaves the array pointer in v2, so load the array length that the handler expects - OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value()); + OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); } else { OpRegCopy(cu, TargetReg(kArg1), v2); } @@ -629,7 +630,7 @@ void Codegen::HandleThrowLaunchPads(CompilationUnit *cu) OpRegCopy(cu, TargetReg(kArg2), v1); if (target_x86) { // x86 leaves the array pointer in v2; load the array length that the handler expects - OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value()); + OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); } else { OpRegCopy(cu, TargetReg(kArg1), v2); } @@ -637,7 +638,7 @@ void Codegen::HandleThrowLaunchPads(CompilationUnit *cu) } else { if (target_x86) { // x86 leaves the array pointer in v2; load the array length that the handler expects - OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value()); + OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); } else { OpRegCopy(cu, TargetReg(kArg1), v2); } @@ -799,10 +800,10 @@ void Codegen::GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation } else { // We're don't need access checks, load type from dex cache int32_t dex_cache_offset = - AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(); + mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(); LoadWordDisp(cu, rl_method.low_reg, dex_cache_offset, res_reg); int32_t offset_of_type = - Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*) + mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) * type_idx); LoadWordDisp(cu, res_reg, offset_of_type, rl_result.low_reg); if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(*cu->dex_file, @@ -844,8 +845,8 @@ void Codegen::GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest) { /* NOTE: Most strings should be available at compile time */ - int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() + - (sizeof(String*) * string_idx); + int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + + (sizeof(mirror::String*) * string_idx); if (!cu->compiler->CanAssumeStringIsPresentInDexCache( *cu->dex_file, string_idx) || SLOW_STRING_PATH) { // slow path, resolve string if not in dex cache @@ -853,7 +854,7 @@ void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocati LockCallTemps(cu); // Using explicit registers LoadCurrMethodDirect(cu, TargetReg(kArg2)); LoadWordDisp(cu, TargetReg(kArg2), - AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); + mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); // Might call out to helper, which will return resolved string in kRet0 int r_tgt = CallHelperSetup(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode)); LoadWordDisp(cu, TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); @@ -888,7 +889,7 @@ void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocati int res_reg = AllocTemp(cu); RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true); LoadWordDisp(cu, rl_method.low_reg, - AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg); + mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg); LoadWordDisp(cu, res_reg, offset_of_string, rl_result.low_reg); StoreValue(cu, rl_dest, rl_result); } @@ -942,9 +943,9 @@ void Codegen::GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation // Load dex cache entry into class_reg (kArg2) LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref LoadWordDisp(cu, TargetReg(kArg1), - AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); + mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); int32_t offset_of_type = - Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*) + mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) * type_idx); LoadWordDisp(cu, class_reg, offset_of_type, class_reg); if (!cu->compiler->CanAssumeTypeIsPresentInDexCache( @@ -968,8 +969,8 @@ void Codegen::GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation } LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL); /* load object->klass_ */ - DCHECK_EQ(Object::ClassOffset().Int32Value(), 0); - LoadWordDisp(cu, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1)); + DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); + LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ LIR* call_inst; LIR* branchover = NULL; @@ -1026,10 +1027,10 @@ void Codegen::GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation r } else { // Load dex cache entry into class_reg (kArg2) LoadWordDisp(cu, TargetReg(kArg1), - AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); + mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); int32_t offset_of_type = - Array::DataOffset(sizeof(Class*)).Int32Value() + - (sizeof(Class*) * type_idx); + mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + + (sizeof(mirror::Class*) * type_idx); LoadWordDisp(cu, class_reg, offset_of_type, class_reg); if (!cu->compiler->CanAssumeTypeIsPresentInDexCache( *cu->dex_file, type_idx)) { @@ -1051,8 +1052,8 @@ void Codegen::GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation r /* Null is OK - continue */ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL); /* load object->klass_ */ - DCHECK_EQ(Object::ClassOffset().Int32Value(), 0); - LoadWordDisp(cu, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1)); + DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); + LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); /* kArg1 now contains object->klass_ */ LIR* branch2; if (cu->instruction_set == kThumb2) { diff --git a/src/compiler/codegen/gen_invoke.cc b/src/compiler/codegen/gen_invoke.cc index fe60affdca..78425c40bd 100644 --- a/src/compiler/codegen/gen_invoke.cc +++ b/src/compiler/codegen/gen_invoke.cc @@ -365,7 +365,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, break; case 1: // Get method->dex_cache_resolved_methods_ cg->LoadWordDisp(cu, cg->TargetReg(kArg0), - AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0)); + mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0)); // Set up direct code if known. if (direct_code != 0) { if (direct_code != static_cast(-1)) { @@ -384,13 +384,14 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, break; case 2: // Grab target method* cg->LoadWordDisp(cu, cg->TargetReg(kArg0), - Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4, + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4, cg-> TargetReg(kArg0)); break; case 3: // Grab the code from the method* if (cu->instruction_set != kX86) { if (direct_code == 0) { - cg->LoadWordDisp(cu, cg->TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(), + cg->LoadWordDisp(cu, cg->TargetReg(kArg0), + mirror::AbstractMethod::GetCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); } break; @@ -428,20 +429,22 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, case 1: // Is "this" null? [use kArg1] cg->GenNullCheck(cu, info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags); // get this->klass_ [use kArg1, set kInvokeTgt] - cg->LoadWordDisp(cu, cg->TargetReg(kArg1), Object::ClassOffset().Int32Value(), + cg->LoadWordDisp(cu, cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); break; case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt] - cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), Class::VTableOffset().Int32Value(), + cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); break; case 3: // Get target method [use kInvokeTgt, set kArg0] cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), (method_idx * 4) + - Array::DataOffset(sizeof(Object*)).Int32Value(), cg->TargetReg(kArg0)); + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), + cg->TargetReg(kArg0)); break; case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt] if (cu->instruction_set != kX86) { - cg->LoadWordDisp(cu, cg->TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(), + cg->LoadWordDisp(cu, cg->TargetReg(kArg0), + mirror::AbstractMethod::GetCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); break; } @@ -503,12 +506,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, break; case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0] cg->LoadWordDisp(cu, cg->TargetReg(kArg0), - AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), + mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0)); break; case 2: // Grab target method* [set/use kArg0] cg->LoadWordDisp(cu, cg->TargetReg(kArg0), - Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4, + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4, cg->TargetReg(kArg0)); break; default: @@ -837,13 +840,13 @@ bool Codegen::GenInlinedCharAt(CompilationUnit* cu, CallInfo* info) return false; } // Location of reference to data array - int value_offset = String::ValueOffset().Int32Value(); + int value_offset = mirror::String::ValueOffset().Int32Value(); // Location of count - int count_offset = String::CountOffset().Int32Value(); + int count_offset = mirror::String::CountOffset().Int32Value(); // Starting offset within data array - int offset_offset = String::OffsetOffset().Int32Value(); + int offset_offset = mirror::String::OffsetOffset().Int32Value(); // Start of char data with array_ - int data_offset = Array::DataOffset(sizeof(uint16_t)).Int32Value(); + int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); RegLocation rl_obj = info->args[0]; RegLocation rl_idx = info->args[1]; @@ -921,7 +924,7 @@ bool Codegen::GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* inf RegLocation rl_dest = InlineTarget(cu, info); RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true); GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags); - LoadWordDisp(cu, rl_obj.low_reg, String::CountOffset().Int32Value(), + LoadWordDisp(cu, rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg); if (is_empty) { // dst = (dst == 0); @@ -1284,7 +1287,7 @@ void Codegen::GenInvoke(CompilationUnit* cu, CallInfo* info) } else { if (fast_path && info->type != kInterface) { call_inst = OpMem(cu, kOpBlx, TargetReg(kArg0), - AbstractMethod::GetCodeOffset().Int32Value()); + mirror::AbstractMethod::GetCodeOffset().Int32Value()); } else { int trampoline = 0; switch (info->type) { diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc index e2a5a02ccb..86f0527bf1 100644 --- a/src/compiler/codegen/mips/int_mips.cc +++ b/src/compiler/codegen/mips/int_mips.cc @@ -439,16 +439,16 @@ void MipsCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, R RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; RegLocation rl_result; rl_array = LoadValue(cu, rl_array, kCoreReg); rl_index = LoadValue(cu, rl_index, kCoreReg); if (size == kLong || size == kDouble) { - data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { - data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } /* null object? */ @@ -511,13 +511,13 @@ void MipsCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, R RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; if (size == kLong || size == kDouble) { - data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { - data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } rl_array = LoadValue(cu, rl_array, kCoreReg); @@ -583,8 +583,8 @@ void MipsCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, R void MipsCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale) { - int len_offset = Array::LengthOffset().Int32Value(); - int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); + int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); FlushAllRegs(cu); // Use explicit registers LockCallTemps(cu); @@ -604,7 +604,7 @@ void MipsCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL); // Get the array's class. - LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class); + LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. diff --git a/src/compiler/codegen/mir_to_gbc.cc b/src/compiler/codegen/mir_to_gbc.cc index e38977ad10..2657b791ac 100644 --- a/src/compiler/codegen/mir_to_gbc.cc +++ b/src/compiler/codegen/mir_to_gbc.cc @@ -2684,7 +2684,7 @@ static void CvtArrayLength(CompilationUnit* cu, llvm::CallInst* call_inst) cg->GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue()); RegLocation rl_dest = GetLoc(cu, call_inst); RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); cg->LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg); cg->StoreValue(cu, rl_dest, rl_result); } diff --git a/src/compiler/codegen/mir_to_lir.cc b/src/compiler/codegen/mir_to_lir.cc index 2bc0f8642b..77d581fd6e 100644 --- a/src/compiler/codegen/mir_to_lir.cc +++ b/src/compiler/codegen/mir_to_lir.cc @@ -210,7 +210,7 @@ static bool CompileDalvikInstruction(CompilationUnit* cu, MIR* mir, BasicBlock* case Instruction::ARRAY_LENGTH: int len_offset; - len_offset = Array::LengthOffset().Int32Value(); + len_offset = mirror::Array::LengthOffset().Int32Value(); rl_src[0] = cg->LoadValue(cu, rl_src[0], kCoreReg); cg->GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags); rl_result = EvalLoc(cu, rl_dest, kCoreReg, true); diff --git a/src/compiler/codegen/x86/call_x86.cc b/src/compiler/codegen/x86/call_x86.cc index 727c5e861a..f9b25c844c 100644 --- a/src/compiler/codegen/x86/call_x86.cc +++ b/src/compiler/codegen/x86/call_x86.cc @@ -163,7 +163,7 @@ void X86Codegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value()); NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT); NewLIR2(cu, kX86Xor32RR, rAX, rAX); - NewLIR3(cu, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX); + NewLIR3(cu, kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX); LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondEq); // If lock is held, go the expensive route - artLockObjectFromCode(self, obj); CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); @@ -180,10 +180,10 @@ void X86Codegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation // TODO: clear hash state? NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value()); NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT); - NewLIR3(cu, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value()); + NewLIR3(cu, kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value()); OpRegReg(cu, kOpSub, rCX, rDX); LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondNe); - NewLIR3(cu, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX); + NewLIR3(cu, kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX); LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0); branch->target = NewLIR0(cu, kPseudoTargetLabel); // Otherwise, go the expensive route - UnlockObjectFromCode(obj); diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc index 0f1fc53925..f2d023cbad 100644 --- a/src/compiler/codegen/x86/int_x86.cc +++ b/src/compiler/codegen/x86/int_x86.cc @@ -446,16 +446,16 @@ void X86Codegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, Re RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; RegLocation rl_result; rl_array = LoadValue(cu, rl_array, kCoreReg); rl_index = LoadValue(cu, rl_index, kCoreReg); if (size == kLong || size == kDouble) { - data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { - data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } /* null object? */ @@ -494,13 +494,13 @@ void X86Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, Re RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); - int len_offset = Array::LengthOffset().Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; if (size == kLong || size == kDouble) { - data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { - data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value(); + data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } rl_array = LoadValue(cu, rl_array, kCoreReg); @@ -537,8 +537,8 @@ void X86Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, Re void X86Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale) { - int len_offset = Array::LengthOffset().Int32Value(); - int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value(); + int len_offset = mirror::Array::LengthOffset().Int32Value(); + int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); FlushAllRegs(cu); // Use explicit registers LockCallTemps(cu); @@ -558,7 +558,7 @@ void X86Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL); // Get the array's class. - LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class); + LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. diff --git a/src/compiler/compiler_internals.h b/src/compiler/compiler_internals.h index 1379a3b0a7..746999d43c 100644 --- a/src/compiler/compiler_internals.h +++ b/src/compiler/compiler_internals.h @@ -28,11 +28,10 @@ #include "compiler.h" #include "compiler_ir.h" #include "compiler_utility.h" -#include "dex_cache.h" #include "frontend.h" #include "gc/card_table.h" +#include "mirror/dex_cache.h" #include "monitor.h" -#include "object.h" #include "ralloc.h" #include "thread.h" #include "utils.h" diff --git a/src/compiler/frontend.cc b/src/compiler/frontend.cc index 31423ef095..6ccbc07180 100644 --- a/src/compiler/frontend.cc +++ b/src/compiler/frontend.cc @@ -19,7 +19,7 @@ #include "dataflow.h" #include "ssa_transformation.h" #include "leb128.h" -#include "object.h" +#include "mirror/object.h" #include "runtime.h" #include "codegen/codegen_util.h" #include "codegen/mir_to_gbc.h" diff --git a/src/compiler_llvm/compiler_llvm.h b/src/compiler_llvm/compiler_llvm.h index 9aa9791ee8..bb14c49163 100644 --- a/src/compiler_llvm/compiler_llvm.h +++ b/src/compiler_llvm/compiler_llvm.h @@ -21,7 +21,7 @@ #include "compiler.h" #include "dex_file.h" #include "instruction_set.h" -#include "object.h" +#include "mirror/object.h" #include "procedure_linkage_table.h" #include @@ -31,13 +31,15 @@ #include namespace art { - class ClassLoader; class CompiledInvokeStub; class CompiledMethod; class Compiler; class OatCompilationUnit; - class AbstractMethod; -} + namespace mirror { + class AbstractMethod; + class ClassLoader; + } // namespace mirror +} // namespace art namespace llvm { @@ -47,7 +49,7 @@ namespace llvm { class PointerType; class StructType; class Type; -} +} // namespace llvm namespace art { diff --git a/src/compiler_llvm/gbc_expander.cc b/src/compiler_llvm/gbc_expander.cc index a7970d5403..03488747e8 100644 --- a/src/compiler_llvm/gbc_expander.cc +++ b/src/compiler_llvm/gbc_expander.cc @@ -19,8 +19,8 @@ #include "compiler.h" #include "greenland/intrinsic_helper.h" +#include "mirror/object.h" #include "oat_compilation_unit.h" -#include "object.h" #include "thread.h" #include "verifier/method_verifier.h" diff --git a/src/compiler_llvm/jni_compiler.cc b/src/compiler_llvm/jni_compiler.cc index da55d94cce..e81afed8bc 100644 --- a/src/compiler_llvm/jni_compiler.cc +++ b/src/compiler_llvm/jni_compiler.cc @@ -23,8 +23,8 @@ #include "compiler.h" #include "compiler_llvm.h" #include "ir_builder.h" +#include "mirror/object.h" #include "oat_compilation_unit.h" -#include "object.h" #include "runtime.h" #include "runtime_support_func.h" #include "utils_llvm.h" diff --git a/src/compiler_llvm/jni_compiler.h b/src/compiler_llvm/jni_compiler.h index c4287751d1..0731e92a52 100644 --- a/src/compiler_llvm/jni_compiler.h +++ b/src/compiler_llvm/jni_compiler.h @@ -21,14 +21,16 @@ namespace art { class ClassLinker; - class ClassLoader; class CompiledMethod; class Compiler; - class DexCache; class DexFile; - class AbstractMethod; class OatCompilationUnit; -} + namespace mirror { + class AbstractMethod; + class ClassLoader; + class DexCache; + } // namespace mirror +} // namespace art namespace llvm { class AllocaInst; @@ -39,7 +41,7 @@ namespace llvm { class Module; class Type; class Value; -} +} // namespace llvm namespace art { namespace compiler_llvm { diff --git a/src/compiler_llvm/method_compiler.cc b/src/compiler_llvm/method_compiler.cc index ccec7e96ec..bc3fb92075 100644 --- a/src/compiler_llvm/method_compiler.cc +++ b/src/compiler_llvm/method_compiler.cc @@ -25,8 +25,8 @@ #include "dalvik_reg.h" #include "greenland/inferred_reg_category_map.h" #include "ir_builder.h" +#include "mirror/object.h" #include "oat_compilation_unit.h" -#include "object.h" #include "object_utils.h" #include "runtime_support_func.h" #include "runtime_support_llvm.h" diff --git a/src/compiler_llvm/method_compiler.h b/src/compiler_llvm/method_compiler.h index f67866affd..dd9d1829c3 100644 --- a/src/compiler_llvm/method_compiler.h +++ b/src/compiler_llvm/method_compiler.h @@ -34,17 +34,20 @@ namespace art { class ClassLinker; - class ClassLoader; class CompiledMethod; class Compiler; - class DexCache; - class Field; class OatCompilationUnit; + namespace mirror { + class ClassLoader; + class DexCache; + class Field; + } // namespace mirror + namespace greenland { class InferredRegCategoryMap; - } -} + } // namespace greenland +} // namespace art namespace llvm { @@ -55,7 +58,7 @@ namespace llvm { class LLVMContext; class Module; class Type; -} +} // namespace llvm namespace art { diff --git a/src/compiler_llvm/runtime_support_builder.cc b/src/compiler_llvm/runtime_support_builder.cc index 169f8e8b42..36b5fa1974 100644 --- a/src/compiler_llvm/runtime_support_builder.cc +++ b/src/compiler_llvm/runtime_support_builder.cc @@ -19,7 +19,7 @@ #include "gc/card_table.h" #include "ir_builder.h" #include "monitor.h" -#include "object.h" +#include "mirror/object.h" #include "thread.h" #include @@ -179,7 +179,7 @@ void RuntimeSupportBuilder::EmitUnlockObject(llvm::Value* object) { kTBAARuntimeInfo); Value* monitor = irb_.LoadFromObjectOffset(object, - Object::MonitorOffset().Int32Value(), + mirror::Object::MonitorOffset().Int32Value(), irb_.getJIntTy(), kTBAARuntimeInfo); @@ -199,7 +199,7 @@ void RuntimeSupportBuilder::EmitUnlockObject(llvm::Value* object) { irb_.SetInsertPoint(bb_fast); // Set all bits to zero (except hash state) irb_.StoreToObjectOffset(object, - Object::MonitorOffset().Int32Value(), + mirror::Object::MonitorOffset().Int32Value(), hash_state, kTBAARuntimeInfo); irb_.CreateBr(bb_cont); diff --git a/src/compiler_llvm/runtime_support_builder_thumb2.cc b/src/compiler_llvm/runtime_support_builder_thumb2.cc index 3299afe1bd..c18ae834b5 100644 --- a/src/compiler_llvm/runtime_support_builder_thumb2.cc +++ b/src/compiler_llvm/runtime_support_builder_thumb2.cc @@ -17,8 +17,8 @@ #include "runtime_support_builder_thumb2.h" #include "ir_builder.h" +#include "mirror/object.h" #include "monitor.h" -#include "object.h" #include "thread.h" #include "utils_llvm.h" diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc index d3552e94c4..8de90fff56 100644 --- a/src/compiler_llvm/runtime_support_llvm.cc +++ b/src/compiler_llvm/runtime_support_llvm.cc @@ -22,8 +22,8 @@ #include "compiler_runtime_func_list.h" #include "dex_file.h" #include "dex_instruction.h" +#include "mirror/object.h" #include "nth_caller_visitor.h" -#include "object.h" #include "object_utils.h" #include "reflection.h" #include "runtime_support.h" @@ -60,7 +60,7 @@ void* art_set_current_thread_from_code(void* thread_object_addr) { return NULL; } -void art_lock_object_from_code(Object* obj, Thread* thread) +void art_lock_object_from_code(mirror::Object* obj, Thread* thread) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { DCHECK(obj != NULL); // Assumed to have been checked before entry obj->MonitorEnter(thread); // May block @@ -69,7 +69,7 @@ void art_lock_object_from_code(Object* obj, Thread* thread) DCHECK(!thread->IsExceptionPending()); } -void art_unlock_object_from_code(Object* obj, Thread* thread) +void art_unlock_object_from_code(mirror::Object* obj, Thread* thread) UNLOCK_FUNCTION(monitor_lock_) { DCHECK(obj != NULL); // Assumed to have been checked before entry // MonitorExit may throw exception @@ -82,7 +82,7 @@ void art_test_suspend_from_code(Thread* thread) } ShadowFrame* art_push_shadow_frame_from_code(Thread* thread, ShadowFrame* new_shadow_frame, - AbstractMethod* method, uint32_t num_vregs) { + mirror::AbstractMethod* method, uint32_t num_vregs) { ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); new_shadow_frame->SetMethod(method); new_shadow_frame->SetNumberOfVRegs(num_vregs); @@ -120,13 +120,13 @@ void art_throw_array_bounds_from_code(int32_t index, int32_t length) void art_throw_no_such_method_from_code(int32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // We need the calling method as context for the method_idx. - AbstractMethod* method = Thread::Current()->GetCurrentMethod(); + mirror::AbstractMethod* method = Thread::Current()->GetCurrentMethod(); ThrowNoSuchMethodError(method_idx, method); } void art_throw_null_pointer_exception_from_code(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* throw_method = + mirror::AbstractMethod* throw_method = Thread::Current()->GetManagedStack()->GetTopShadowFrame()->GetMethod(); ThrowNullPointerExceptionFromDexPC(throw_method, dex_pc); } @@ -135,21 +135,21 @@ void art_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_l ThrowStackOverflowError(Thread::Current()); } -void art_throw_exception_from_code(Object* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread::Current()->DeliverException(static_cast(exception)); +void art_throw_exception_from_code(mirror::Object* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread::Current()->DeliverException(static_cast(exception)); } void* art_get_and_clear_exception(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(self->IsExceptionPending()); - Throwable* exception = self->GetException(); + mirror::Throwable* exception = self->GetException(); self->ClearException(); return exception; } -int32_t art_find_catch_block_from_code(AbstractMethod* current_method, uint32_t ti_offset) +int32_t art_find_catch_block_from_code(mirror::AbstractMethod* current_method, uint32_t ti_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* exception_type = Thread::Current()->GetException()->GetClass(); + mirror::Class* exception_type = Thread::Current()->GetException()->GetClass(); MethodHelper mh(current_method); const DexFile::CodeItem* code_item = mh.GetCodeItem(); DCHECK_LT(ti_offset, code_item->tries_size_); @@ -164,7 +164,7 @@ int32_t art_find_catch_block_from_code(AbstractMethod* current_method, uint32_t return iter_index; } // Does this catch exception type apply? - Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); + mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); if (iter_exception_type == NULL) { // The verifier should take care of resolving all exception classes early LOG(WARNING) << "Unresolved exception class when finding catch block: " @@ -183,57 +183,57 @@ int32_t art_find_catch_block_from_code(AbstractMethod* current_method, uint32_t // Object Space //---------------------------------------------------------------------------- -Object* art_alloc_object_from_code(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_alloc_object_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectFromCode(type_idx, referrer, thread, false); } -Object* art_alloc_object_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_alloc_object_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectFromCode(type_idx, referrer, thread, true); } -Object* art_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, uint32_t length, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocArrayFromCode(type_idx, referrer, length, self, false); } -Object* art_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, uint32_t length, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocArrayFromCode(type_idx, referrer, length, self, true); } -Object* art_check_and_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_check_and_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, uint32_t length, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); } -Object* art_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, uint32_t length, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); } -static AbstractMethod* FindMethodHelper(uint32_t method_idx, Object* this_object, - AbstractMethod* caller_method, bool access_check, +static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* caller_method, bool access_check, InvokeType type, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); + mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { method = FindMethodFromCode(method_idx, this_object, caller_method, thread, access_check, type); @@ -254,71 +254,71 @@ static AbstractMethod* FindMethodHelper(uint32_t method_idx, Object* this_object return method; } -Object* art_find_static_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, +mirror::Object* art_find_static_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); } -Object* art_find_direct_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, +mirror::Object* art_find_direct_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); } -Object* art_find_virtual_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, +mirror::Object* art_find_virtual_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); } -Object* art_find_super_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, +mirror::Object* art_find_super_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); } -Object* +mirror::Object* art_find_interface_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); } -Object* art_find_interface_method_from_code(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, +mirror::Object* art_find_interface_method_from_code(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); } -Object* art_initialize_static_storage_from_code(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_initialize_static_storage_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); } -Object* art_initialize_type_from_code(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_initialize_type_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); } -Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx, - AbstractMethod* referrer, +mirror::Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be @@ -326,14 +326,14 @@ Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx, return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); } -Object* art_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx) +mirror::Object* art_resolve_string_from_code(mirror::AbstractMethod* referrer, uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveStringFromCode(referrer, string_idx); } -int32_t art_set32_static_from_code(uint32_t field_idx, AbstractMethod* referrer, int32_t new_value) +int32_t art_set32_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, int32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t)); if (LIKELY(field != NULL)) { field->Set32(field->GetDeclaringClass(), new_value); return 0; @@ -347,9 +347,9 @@ int32_t art_set32_static_from_code(uint32_t field_idx, AbstractMethod* referrer, return -1; } -int32_t art_set64_static_from_code(uint32_t field_idx, AbstractMethod* referrer, int64_t new_value) +int32_t art_set64_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, int64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); if (LIKELY(field != NULL)) { field->Set64(field->GetDeclaringClass(), new_value); return 0; @@ -363,15 +363,15 @@ int32_t art_set64_static_from_code(uint32_t field_idx, AbstractMethod* referrer, return -1; } -int32_t art_set_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* new_value) +int32_t art_set_obj_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { field->SetObj(field->GetDeclaringClass(), new_value); return 0; } field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectWrite, sizeof(Object*)); + StaticObjectWrite, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { field->SetObj(field->GetDeclaringClass(), new_value); return 0; @@ -379,9 +379,9 @@ int32_t art_set_obj_static_from_code(uint32_t field_idx, AbstractMethod* referre return -1; } -int32_t art_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer) +int32_t art_get32_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); if (LIKELY(field != NULL)) { return field->Get32(field->GetDeclaringClass()); } @@ -393,9 +393,9 @@ int32_t art_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer) return 0; } -int64_t art_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer) +int64_t art_get64_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); if (LIKELY(field != NULL)) { return field->Get64(field->GetDeclaringClass()); } @@ -407,24 +407,24 @@ int64_t art_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer) return 0; } -Object* art_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer) +mirror::Object* art_get_obj_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { return field->GetObj(field->GetDeclaringClass()); } field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectRead, sizeof(Object*)); + StaticObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { return field->GetObj(field->GetDeclaringClass()); } return 0; } -int32_t art_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, uint32_t new_value) +int32_t art_set32_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, + mirror::Object* obj, uint32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); if (LIKELY(field != NULL)) { field->Set32(obj, new_value); return 0; @@ -438,10 +438,10 @@ int32_t art_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referre return -1; } -int32_t art_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, int64_t new_value) +int32_t art_set64_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, + mirror::Object* obj, int64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); if (LIKELY(field != NULL)) { field->Set64(obj, new_value); return 0; @@ -455,16 +455,16 @@ int32_t art_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referre return -1; } -int32_t art_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, Object* new_value) +int32_t art_set_obj_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, + mirror::Object* obj, mirror::Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { field->SetObj(obj, new_value); return 0; } field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectWrite, sizeof(Object*)); + InstanceObjectWrite, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { field->SetObj(obj, new_value); return 0; @@ -472,9 +472,9 @@ int32_t art_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* refer return -1; } -int32_t art_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) +int32_t art_get32_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); if (LIKELY(field != NULL)) { return field->Get32(obj); } @@ -486,9 +486,9 @@ int32_t art_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referre return 0; } -int64_t art_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) +int64_t art_get64_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); if (LIKELY(field != NULL)) { return field->Get64(obj); } @@ -500,22 +500,22 @@ int64_t art_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referre return 0; } -Object* art_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) +mirror::Object* art_get_obj_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { return field->GetObj(obj); } field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectRead, sizeof(Object*)); + InstanceObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { return field->GetObj(obj); } return 0; } -void art_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc, - Array* array, uint32_t payload_offset) +void art_fill_array_data_from_code(mirror::AbstractMethod* method, uint32_t dex_pc, + mirror::Array* array, uint32_t payload_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Test: Is array equal to null? (Guard NullPointerException) if (UNLIKELY(array == NULL)) { @@ -555,14 +555,14 @@ void art_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc, // Type checking, in the nature of casting //---------------------------------------------------------------------------- -int32_t art_is_assignable_from_code(const Class* dest_type, const Class* src_type) +int32_t art_is_assignable_from_code(const mirror::Class* dest_type, const mirror::Class* src_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(dest_type != NULL); DCHECK(src_type != NULL); return dest_type->IsAssignableFrom(src_type) ? 1 : 0; } -void art_check_cast_from_code(const Class* dest_type, const Class* src_type) +void art_check_cast_from_code(const mirror::Class* dest_type, const mirror::Class* src_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); DCHECK(src_type->IsClass()) << PrettyClass(src_type); @@ -574,16 +574,16 @@ void art_check_cast_from_code(const Class* dest_type, const Class* src_type) } } -void art_check_put_array_element_from_code(const Object* element, const Object* array) +void art_check_put_array_element_from_code(const mirror::Object* element, const mirror::Object* array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (element == NULL) { return; } DCHECK(array != NULL); - Class* array_class = array->GetClass(); + mirror::Class* array_class = array->GetClass(); DCHECK(array_class != NULL); - Class* component_type = array_class->GetComponentType(); - Class* element_class = element->GetClass(); + mirror::Class* component_type = array_class->GetComponentType(); + mirror::Class* element_class = element->GetClass(); if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "%s cannot be stored in an array of type %s", @@ -634,11 +634,11 @@ void art_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, jobject lo PopLocalReferences(saved_local_ref_cookie, self); } -Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie, +mirror::Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie, Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); - Object* o = self->DecodeJObject(result); // Must decode before pop. + mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); // Process result. if (UNLIKELY(self->GetJniEnv()->check_jni)) { @@ -650,13 +650,13 @@ Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_r return o; } -Object* art_jni_method_end_with_reference_synchronized(jobject result, +mirror::Object* art_jni_method_end_with_reference_synchronized(jobject result, uint32_t saved_local_ref_cookie, jobject locked, Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - Object* o = self->DecodeJObject(result); + mirror::Object* o = self->DecodeJObject(result); PopLocalReferences(saved_local_ref_cookie, self); // Process result. if (UNLIKELY(self->GetJniEnv()->check_jni)) { @@ -711,12 +711,12 @@ static void* art_find_compiler_runtime_func(const char* name) { // Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation // handler which is a field within the proxy object receiver. The var args encode the arguments // with the last argument being a pointer to a JValue to store the result in. -void art_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...) +void art_proxy_invoke_handler_from_code(mirror::AbstractMethod* proxy_method, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list ap; va_start(ap, proxy_method); - Object* receiver = va_arg(ap, Object*); + mirror::Object* receiver = va_arg(ap, mirror::Object*); Thread* self = va_arg(ap, Thread*); MethodHelper proxy_mh(proxy_method); @@ -734,19 +734,19 @@ void art_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...) jobject rcvr_jobj = soa.AddLocalReference(receiver); // Convert proxy method into expected interface method. - AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); DCHECK(interface_method != NULL); DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); jobject interface_method_jobj = soa.AddLocalReference(interface_method); - // Record arguments and turn Object* arguments into jobject to survive GC. + // Record arguments and turn mirror::Object* arguments into jobject to survive GC. std::vector args; const size_t num_params = proxy_mh.NumArgs(); for (size_t i = 1; i < num_params; ++i) { jvalue val; switch (proxy_mh.GetParamPrimitiveType(i)) { case Primitive::kPrimNot: - val.l = soa.AddLocalReference(va_arg(ap, Object*)); + val.l = soa.AddLocalReference(va_arg(ap, mirror::Object*)); break; case Primitive::kPrimBoolean: // Fall-through. case Primitive::kPrimByte: // Fall-through. diff --git a/src/compiler_llvm/runtime_support_llvm.h b/src/compiler_llvm/runtime_support_llvm.h index 6c133c9902..6a0b339062 100644 --- a/src/compiler_llvm/runtime_support_llvm.h +++ b/src/compiler_llvm/runtime_support_llvm.h @@ -17,7 +17,7 @@ #ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_H_ #define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_H_ -#include "object.h" +#include "mirror/object.h" namespace art { diff --git a/src/compiler_llvm/stub_compiler.cc b/src/compiler_llvm/stub_compiler.cc index d03400f296..3a28b8733c 100644 --- a/src/compiler_llvm/stub_compiler.cc +++ b/src/compiler_llvm/stub_compiler.cc @@ -22,7 +22,7 @@ #include "compiler.h" #include "compiler_llvm.h" #include "ir_builder.h" -#include "object.h" +#include "mirror/object.h" #include "runtime_support_func.h" #include "utils_llvm.h" diff --git a/src/compiler_test.cc b/src/compiler_test.cc index f5135115b0..bd25eb3a3c 100644 --- a/src/compiler_test.cc +++ b/src/compiler_test.cc @@ -22,10 +22,13 @@ #include "UniquePtr.h" #include "class_linker.h" #include "common_test.h" -#include "dex_cache.h" #include "dex_file.h" #include "heap.h" -#include "object.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" namespace art { @@ -69,7 +72,7 @@ class CompilerTest : public CommonTest { const DexFile::ClassDef& class_def = dex_file.GetClassDef(i); const char* descriptor = dex_file.GetClassDescriptor(class_def); ScopedObjectAccess soa(Thread::Current()); - Class* c = class_linker->FindClass(descriptor, soa.Decode(class_loader)); + mirror::Class* c = class_linker->FindClass(descriptor, soa.Decode(class_loader)); CHECK(c != NULL); for (size_t i = 0; i < c->NumDirectMethods(); i++) { MakeExecutable(c->GetDirectMethod(i)); @@ -92,21 +95,21 @@ TEST_F(CompilerTest, DISABLED_LARGE_CompileDexLibCore) { // All libcore references should resolve ScopedObjectAccess soa(Thread::Current()); const DexFile* dex = java_lang_dex_file_; - DexCache* dex_cache = class_linker_->FindDexCache(*dex); + mirror::DexCache* dex_cache = class_linker_->FindDexCache(*dex); EXPECT_EQ(dex->NumStringIds(), dex_cache->NumStrings()); for (size_t i = 0; i < dex_cache->NumStrings(); i++) { - const String* string = dex_cache->GetResolvedString(i); + const mirror::String* string = dex_cache->GetResolvedString(i); EXPECT_TRUE(string != NULL) << "string_idx=" << i; } EXPECT_EQ(dex->NumTypeIds(), dex_cache->NumResolvedTypes()); for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { - Class* type = dex_cache->GetResolvedType(i); + mirror::Class* type = dex_cache->GetResolvedType(i); EXPECT_TRUE(type != NULL) << "type_idx=" << i << " " << dex->GetTypeDescriptor(dex->GetTypeId(i)); } EXPECT_EQ(dex->NumMethodIds(), dex_cache->NumResolvedMethods()); for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { - AbstractMethod* method = dex_cache->GetResolvedMethod(i); + mirror::AbstractMethod* method = dex_cache->GetResolvedMethod(i); EXPECT_TRUE(method != NULL) << "method_idx=" << i << " " << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i)) << " " << dex->GetMethodName(dex->GetMethodId(i)); @@ -117,7 +120,7 @@ TEST_F(CompilerTest, DISABLED_LARGE_CompileDexLibCore) { } EXPECT_EQ(dex->NumFieldIds(), dex_cache->NumResolvedFields()); for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { - Field* field = dex_cache->GetResolvedField(i); + mirror::Field* field = dex_cache->GetResolvedField(i); EXPECT_TRUE(field != NULL) << "field_idx=" << i << " " << dex->GetFieldDeclaringClassDescriptor(dex->GetFieldId(i)) << " " << dex->GetFieldName(dex->GetFieldId(i)); diff --git a/src/debugger.cc b/src/debugger.cc index 3e93511ce2..312172569c 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -21,10 +21,19 @@ #include #include "class_linker.h" -#include "class_loader.h" +#include "class_linker-inl.h" #include "dex_instruction.h" +#include "gc/card_table-inl.h" #include "gc/large_object_space.h" #include "gc/space.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/throwable.h" #include "oat/runtime/context.h" #include "object_utils.h" #include "safe_map.h" @@ -34,6 +43,7 @@ #include "sirt_ref.h" #include "stack_indirect_reference_table.h" #include "thread_list.h" +#include "utf.h" #include "well_known_classes.h" namespace art { @@ -42,14 +52,14 @@ static const size_t kMaxAllocRecordStackDepth = 16; // Max 255. static const size_t kNumAllocRecords = 512; // Must be power of 2. static const uintptr_t kInvalidId = 1; -static const Object* kInvalidObject = reinterpret_cast(kInvalidId); +static const mirror::Object* kInvalidObject = reinterpret_cast(kInvalidId); class ObjectRegistry { public: ObjectRegistry() : lock_("ObjectRegistry lock") { } - JDWP::ObjectId Add(Object* o) { + JDWP::ObjectId Add(mirror::Object* o) { if (o == NULL) { return 0; } @@ -76,14 +86,14 @@ class ObjectRegistry { } MutexLock mu(Thread::Current(), lock_); - typedef SafeMap::iterator It; // C++0x auto + typedef SafeMap::iterator It; // C++0x auto It it = map_.find(id); return (it != map_.end()) ? reinterpret_cast(it->second) : reinterpret_cast(kInvalidId); } - void VisitRoots(Heap::RootVisitor* visitor, void* arg) { + void VisitRoots(RootVisitor* visitor, void* arg) { MutexLock mu(Thread::Current(), lock_); - typedef SafeMap::iterator It; // C++0x auto + typedef SafeMap::iterator It; // C++0x auto for (It it = map_.begin(); it != map_.end(); ++it) { visitor(it->second, arg); } @@ -91,11 +101,11 @@ class ObjectRegistry { private: Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - SafeMap map_; + SafeMap map_; }; struct AllocRecordStackTraceElement { - AbstractMethod* method; + mirror::AbstractMethod* method; uint32_t dex_pc; int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -104,7 +114,7 @@ struct AllocRecordStackTraceElement { }; struct AllocRecord { - Class* type; + mirror::Class* type; size_t byte_count; uint16_t thin_lock_id; AllocRecordStackTraceElement stack[kMaxAllocRecordStackDepth]; // Unused entries have NULL method. @@ -119,9 +129,9 @@ struct AllocRecord { }; struct Breakpoint { - AbstractMethod* method; + mirror::AbstractMethod* method; uint32_t dex_pc; - Breakpoint(AbstractMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {} + Breakpoint(mirror::AbstractMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {} }; static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) @@ -138,7 +148,7 @@ struct SingleStepControl { JDWP::JdwpStepSize step_size; JDWP::JdwpStepDepth step_depth; - const AbstractMethod* method; + const mirror::AbstractMethod* method; int32_t line_number; // Or -1 for native methods. std::set dex_pcs; int stack_depth; @@ -180,7 +190,7 @@ static size_t gAllocRecordCount GUARDED_BY(gAllocTrackerLock) = 0; static std::vector gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); static SingleStepControl gSingleStepControl GUARDED_BY(Locks::breakpoint_lock_); -static bool IsBreakpoint(AbstractMethod* m, uint32_t dex_pc) +static bool IsBreakpoint(mirror::AbstractMethod* m, uint32_t dex_pc) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); @@ -200,9 +210,9 @@ static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thr return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0; } -static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) +static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* o = gRegistry->Get(id); + mirror::Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; return NULL; @@ -215,9 +225,9 @@ static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) return o->AsArray(); } -static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) +static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* o = gRegistry->Get(id); + mirror::Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; return NULL; @@ -234,13 +244,13 @@ static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::Obje EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* thread_peer = gRegistry->Get(thread_id); + mirror::Object* thread_peer = gRegistry->Get(thread_id); if (thread_peer == NULL || thread_peer == kInvalidObject) { // This isn't even an object. return JDWP::ERR_INVALID_OBJECT; } - Class* java_lang_Thread = soa.Decode(WellKnownClasses::java_lang_Thread); + mirror::Class* java_lang_Thread = soa.Decode(WellKnownClasses::java_lang_Thread); if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) { // This isn't a thread. return JDWP::ERR_INVALID_THREAD; @@ -260,7 +270,7 @@ static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { return static_cast(descriptor[0]); } -static JDWP::JdwpTag TagFromClass(Class* c) +static JDWP::JdwpTag TagFromClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(c != NULL); if (c->IsArrayClass()) { @@ -291,7 +301,7 @@ static JDWP::JdwpTag TagFromClass(Class* c) * * Null objects are tagged JT_OBJECT. */ -static JDWP::JdwpTag TagFromObject(const Object* o) +static JDWP::JdwpTag TagFromObject(const mirror::Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass()); } @@ -567,14 +577,14 @@ void Dbg::Exit(int status) { exit(status); // This is all dalvik did. } -void Dbg::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void Dbg::VisitRoots(RootVisitor* visitor, void* arg) { if (gRegistry != NULL) { gRegistry->VisitRoots(visitor, arg); } } std::string Dbg::GetClassName(JDWP::RefTypeId class_id) { - Object* o = gRegistry->Get(class_id); + mirror::Object* o = gRegistry->Get(class_id); if (o == NULL) { return "NULL"; } @@ -589,7 +599,7 @@ std::string Dbg::GetClassName(JDWP::RefTypeId class_id) { JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) { JDWP::JdwpError status; - Class* c = DecodeClass(id, status); + mirror::Class* c = DecodeClass(id, status); if (c == NULL) { return status; } @@ -599,7 +609,7 @@ JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_ob JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) { JDWP::JdwpError status; - Class* c = DecodeClass(id, status); + mirror::Class* c = DecodeClass(id, status); if (c == NULL) { return status; } @@ -613,7 +623,7 @@ JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& supercla } JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { - Object* o = gRegistry->Get(id); + mirror::Object* o = gRegistry->Get(id); if (o == NULL || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } @@ -623,7 +633,7 @@ JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; - Class* c = DecodeClass(id, status); + mirror::Class* c = DecodeClass(id, status); if (c == NULL) { return status; } @@ -641,7 +651,7 @@ JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* o = gRegistry->Get(object_id); + mirror::Object* o = gRegistry->Get(object_id); if (o == NULL || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } @@ -699,14 +709,14 @@ JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, return true; } - static void AppendOwnedMonitors(Object* owned_monitor, void* arg) { + static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) { OwnedMonitorVisitor* visitor = reinterpret_cast(arg); visitor->monitors.push_back(owned_monitor); visitor->stack_depths.push_back(visitor->current_stack_depth); } size_t current_stack_depth; - std::vector monitors; + std::vector monitors; std::vector stack_depths; }; UniquePtr context(Context::Create()); @@ -743,11 +753,11 @@ JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector& class std::vector& counts) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::vector classes; + std::vector classes; counts.clear(); for (size_t i = 0; i < class_ids.size(); ++i) { JDWP::JdwpError status; - Class* c = DecodeClass(class_ids[i], status); + mirror::Class* c = DecodeClass(class_ids[i], status); if (c == NULL) { return status; } @@ -762,12 +772,12 @@ JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector& class JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector& instances) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } - std::vector raw_instances; + std::vector raw_instances; Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances); for (size_t i = 0; i < raw_instances.size(); ++i) { instances.push_back(gRegistry->Add(raw_instances[i])); @@ -778,12 +788,12 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, s JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, std::vector& referring_objects) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* o = gRegistry->Get(object_id); + mirror::Object* o = gRegistry->Get(object_id); if (o == NULL || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } - std::vector raw_instances; + std::vector raw_instances; Runtime::Current()->GetHeap()->GetReferringObjects(o, max_count, raw_instances); for (size_t i = 0; i < raw_instances.size(); ++i) { referring_objects.push_back(gRegistry->Add(raw_instances[i])); @@ -793,7 +803,7 @@ JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_c JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -811,11 +821,11 @@ void Dbg::GetClassList(std::vector& classes) { explicit ClassListCreator(std::vector& classes) : classes(classes) { } - static bool Visit(Class* c, void* arg) { + static bool Visit(mirror::Class* c, void* arg) { return reinterpret_cast(arg)->Visit(c); } - bool Visit(Class* c) { + bool Visit(mirror::Class* c) { if (!c->IsPrimitive()) { classes.push_back(static_cast(gRegistry->Add(c))); } @@ -831,7 +841,7 @@ void Dbg::GetClassList(std::vector& classes) { JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -855,7 +865,7 @@ JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* p } void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector& ids) { - std::vector classes; + std::vector classes; Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes); ids.clear(); for (size_t i = 0; i < classes.size(); ++i) { @@ -864,7 +874,7 @@ void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vectorGet(object_id); + mirror::Object* o = gRegistry->Get(object_id); if (o == NULL || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } @@ -887,7 +897,7 @@ JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string& signature) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -897,7 +907,7 @@ JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string& signatu JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -906,7 +916,7 @@ JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result } JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) { - Object* o = gRegistry->Get(object_id); + mirror::Object* o = gRegistry->Get(object_id); if (o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } @@ -946,7 +956,7 @@ size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) { JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) { JDWP::JdwpError status; - Array* a = DecodeArray(array_id, status); + mirror::Array* a = DecodeArray(array_id, status); if (a == NULL) { return status; } @@ -956,7 +966,7 @@ JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) { JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; - Array* a = DecodeArray(array_id, status); + mirror::Array* a = DecodeArray(array_id, status); if (a == NULL) { return status; } @@ -988,9 +998,9 @@ JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, memcpy(dst, &src[offset * width], count * width); } } else { - ObjectArray* oa = a->AsObjectArray(); + mirror::ObjectArray* oa = a->AsObjectArray(); for (int i = 0; i < count; ++i) { - Object* element = oa->Get(offset + i); + mirror::Object* element = oa->Get(offset + i); JDWP::JdwpTag specific_tag = (element != NULL) ? TagFromObject(element) : tag; expandBufAdd1(pReply, specific_tag); expandBufAddObjectId(pReply, gRegistry->Add(element)); @@ -1004,7 +1014,7 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c const uint8_t* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; - Array* a = DecodeArray(array_id, status); + mirror::Array* a = DecodeArray(array_id, status); if (a == NULL) { return status; } @@ -1040,10 +1050,10 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c memcpy(&dst[offset * width], src, count * width); } } else { - ObjectArray* oa = a->AsObjectArray(); + mirror::ObjectArray* oa = a->AsObjectArray(); for (int i = 0; i < count; ++i) { JDWP::ObjectId id = JDWP::ReadObjectId(&src); - Object* o = gRegistry->Get(id); + mirror::Object* o = gRegistry->Get(id); if (o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } @@ -1055,12 +1065,12 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c } JDWP::ObjectId Dbg::CreateString(const std::string& str) { - return gRegistry->Add(String::AllocFromModifiedUtf8(Thread::Current(), str.c_str())); + return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str())); } JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -1074,24 +1084,24 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_ JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, JDWP::ObjectId& new_array) { JDWP::JdwpError status; - Class* c = DecodeClass(array_class_id, status); + mirror::Class* c = DecodeClass(array_class_id, status); if (c == NULL) { return status; } - new_array = gRegistry->Add(Array::Alloc(Thread::Current(), c, length)); + new_array = gRegistry->Add(mirror::Array::Alloc(Thread::Current(), c, length)); return JDWP::ERR_NONE; } bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) { JDWP::JdwpError status; - Class* c1 = DecodeClass(instance_class_id, status); + mirror::Class* c1 = DecodeClass(instance_class_id, status); CHECK(c1 != NULL); - Class* c2 = DecodeClass(class_id, status); + mirror::Class* c2 = DecodeClass(class_id, status); CHECK(c2 != NULL); return c1->IsAssignableFrom(c2); } -static JDWP::FieldId ToFieldId(const Field* f) +static JDWP::FieldId ToFieldId(const mirror::Field* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); @@ -1100,7 +1110,7 @@ static JDWP::FieldId ToFieldId(const Field* f) #endif } -static JDWP::MethodId ToMethodId(const AbstractMethod* m) +static JDWP::MethodId ToMethodId(const mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); @@ -1109,30 +1119,30 @@ static JDWP::MethodId ToMethodId(const AbstractMethod* m) #endif } -static Field* FromFieldId(JDWP::FieldId fid) +static mirror::Field* FromFieldId(JDWP::FieldId fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else - return reinterpret_cast(static_cast(fid)); + return reinterpret_cast(static_cast(fid)); #endif } -static AbstractMethod* FromMethodId(JDWP::MethodId mid) +static mirror::AbstractMethod* FromMethodId(JDWP::MethodId mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else - return reinterpret_cast(static_cast(mid)); + return reinterpret_cast(static_cast(mid)); #endif } -static void SetLocation(JDWP::JdwpLocation& location, AbstractMethod* m, uint32_t dex_pc) +static void SetLocation(JDWP::JdwpLocation& location, mirror::AbstractMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m == NULL) { memset(&location, 0, sizeof(location)); } else { - Class* c = m->GetDeclaringClass(); + mirror::Class* c = m->GetDeclaringClass(); location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS; location.class_id = gRegistry->Add(c); location.method_id = ToMethodId(m); @@ -1142,13 +1152,13 @@ static void SetLocation(JDWP::JdwpLocation& location, AbstractMethod* m, uint32_ std::string Dbg::GetMethodName(JDWP::MethodId method_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = FromMethodId(method_id); + mirror::AbstractMethod* m = FromMethodId(method_id); return MethodHelper(m).GetName(); } std::string Dbg::GetFieldName(JDWP::FieldId field_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* f = FromFieldId(field_id); + mirror::Field* f = FromFieldId(field_id); return FieldHelper(f).GetName(); } @@ -1190,7 +1200,7 @@ static uint16_t MangleSlot(uint16_t slot, const char* name) { return newSlot; } -static uint16_t DemangleSlot(uint16_t slot, AbstractMethod* m) +static uint16_t DemangleSlot(uint16_t slot, mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (slot == kEclipseWorkaroundSlot) { return 0; @@ -1204,7 +1214,7 @@ static uint16_t DemangleSlot(uint16_t slot, AbstractMethod* m) JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -1215,7 +1225,7 @@ JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_ge expandBufAdd4BE(pReply, instance_field_count + static_field_count); for (size_t i = 0; i < instance_field_count + static_field_count; ++i) { - Field* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count); + mirror::Field* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count); FieldHelper fh(f); expandBufAddFieldId(pReply, ToFieldId(f)); expandBufAddUtf8String(pReply, fh.GetName()); @@ -1232,7 +1242,7 @@ JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_ge JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -1243,7 +1253,7 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g expandBufAdd4BE(pReply, direct_method_count + virtual_method_count); for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) { - AbstractMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); + mirror::AbstractMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); MethodHelper mh(m); expandBufAddMethodId(pReply, ToMethodId(m)); expandBufAddUtf8String(pReply, mh.GetName()); @@ -1259,7 +1269,7 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { JDWP::JdwpError status; - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } @@ -1287,7 +1297,7 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan return true; } }; - AbstractMethod* m = FromMethodId(method_id); + mirror::AbstractMethod* m = FromMethodId(method_id); MethodHelper mh(m); uint64_t start, end; if (m->IsNative()) { @@ -1341,14 +1351,14 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi ++pContext->variable_count; } }; - AbstractMethod* m = FromMethodId(method_id); + mirror::AbstractMethod* m = FromMethodId(method_id); MethodHelper mh(m); const DexFile::CodeItem* code_item = mh.GetCodeItem(); // arg_count considers doubles and longs to take 2 units. // variable_count considers everything to take 1 unit. std::string shorty(mh.GetShorty()); - expandBufAdd4BE(pReply, AbstractMethod::NumArgRegisters(shorty)); + expandBufAdd4BE(pReply, mirror::AbstractMethod::NumArgRegisters(shorty)); // We don't know the total number of variables yet, so leave a blank and update it later. size_t variable_count_offset = expandBufGetLength(pReply); @@ -1368,7 +1378,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id, std::vector& bytecodes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = FromMethodId(method_id); + mirror::AbstractMethod* m = FromMethodId(method_id); if (m == NULL) { return JDWP::ERR_INVALID_METHODID; } @@ -1396,18 +1406,18 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; - Class* c = DecodeClass(ref_type_id, status); + mirror::Class* c = DecodeClass(ref_type_id, status); if (ref_type_id != 0 && c == NULL) { return status; } - Object* o = gRegistry->Get(object_id); + mirror::Object* o = gRegistry->Get(object_id); if ((!is_static && o == NULL) || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } - Field* f = FromFieldId(field_id); + mirror::Field* f = FromFieldId(field_id); - Class* receiver_class = c; + mirror::Class* receiver_class = c; if (receiver_class == NULL && o != NULL) { receiver_class = o->GetClass(); } @@ -1448,7 +1458,7 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje LOG(FATAL) << "Unknown tag: " << tag; } } else { - Object* value = f->GetObject(o); + mirror::Object* value = f->GetObject(o); expandBufAdd1(pReply, TagFromObject(value)); expandBufAddObjectId(pReply, gRegistry->Add(value)); } @@ -1467,11 +1477,11 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::Fiel static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, int width, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Object* o = gRegistry->Get(object_id); + mirror::Object* o = gRegistry->Get(object_id); if ((!is_static && o == NULL) || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } - Field* f = FromFieldId(field_id); + mirror::Field* f = FromFieldId(field_id); // The RI only enforces the static/non-static mismatch in one direction. // TODO: should we change the tests and check both? @@ -1499,12 +1509,12 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId f->Set32(o, value); } } else { - Object* v = gRegistry->Get(value); + mirror::Object* v = gRegistry->Get(value); if (v == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } if (v != NULL) { - Class* field_type = FieldHelper(f).GetType(); + mirror::Class* field_type = FieldHelper(f).GetType(); if (!field_type->IsAssignableFrom(v->GetClass())) { return JDWP::ERR_INVALID_OBJECT; } @@ -1525,7 +1535,7 @@ JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, } std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) { - String* s = gRegistry->Get(string_id); + mirror::String* s = gRegistry->Get(string_id); return s->ToModifiedUtf8(); } @@ -1539,9 +1549,11 @@ JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) } // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName. - Object* thread_object = gRegistry->Get(thread_id); - Field* java_lang_Thread_name_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); - String* s = reinterpret_cast(java_lang_Thread_name_field->GetObject(thread_object)); + mirror::Object* thread_object = gRegistry->Get(thread_id); + mirror::Field* java_lang_Thread_name_field = + soa.DecodeField(WellKnownClasses::java_lang_Thread_name); + mirror::String* s = + reinterpret_cast(java_lang_Thread_name_field->GetObject(thread_object)); if (s != NULL) { name = s->ToModifiedUtf8(); } @@ -1550,7 +1562,7 @@ JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { ScopedObjectAccess soa(Thread::Current()); - Object* thread_object = gRegistry->Get(thread_id); + mirror::Object* thread_object = gRegistry->Get(thread_id); if (thread_object == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } @@ -1568,11 +1580,11 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p return error; } - Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/Thread;"); + mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/Thread;"); CHECK(c != NULL); - Field* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;"); + mirror::Field* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;"); CHECK(f != NULL); - Object* group = f->GetObject(thread_object); + mirror::Object* group = f->GetObject(thread_object); CHECK(group != NULL); JDWP::ObjectId thread_group_id = gRegistry->Add(group); @@ -1582,40 +1594,40 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) { ScopedObjectAccess soa(Thread::Current()); - Object* thread_group = gRegistry->Get(thread_group_id); + mirror::Object* thread_group = gRegistry->Get(thread_group_id); CHECK(thread_group != NULL); - Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;"); + mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;"); CHECK(c != NULL); - Field* f = c->FindInstanceField("name", "Ljava/lang/String;"); + mirror::Field* f = c->FindInstanceField("name", "Ljava/lang/String;"); CHECK(f != NULL); - String* s = reinterpret_cast(f->GetObject(thread_group)); + mirror::String* s = reinterpret_cast(f->GetObject(thread_group)); return s->ToModifiedUtf8(); } JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) { - Object* thread_group = gRegistry->Get(thread_group_id); + mirror::Object* thread_group = gRegistry->Get(thread_group_id); CHECK(thread_group != NULL); - Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;"); + mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;"); CHECK(c != NULL); - Field* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;"); + mirror::Field* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;"); CHECK(f != NULL); - Object* parent = f->GetObject(thread_group); + mirror::Object* parent = f->GetObject(thread_group); return gRegistry->Add(parent); } JDWP::ObjectId Dbg::GetSystemThreadGroupId() { ScopedObjectAccessUnchecked soa(Thread::Current()); - Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); - Object* group = f->GetObject(f->GetDeclaringClass()); + mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); + mirror::Object* group = f->GetObject(f->GetDeclaringClass()); return gRegistry->Add(group); } JDWP::ObjectId Dbg::GetMainThreadGroupId() { ScopedObjectAccess soa(Thread::Current()); - Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup); - Object* group = f->GetObject(f->GetDeclaringClass()); + mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup); + mirror::Object* group = f->GetObject(f->GetDeclaringClass()); return gRegistry->Add(group); } @@ -1691,7 +1703,7 @@ JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector& thread_ids) { class ThreadListVisitor { public: - ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, Object* desired_thread_group, + ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group, std::vector& thread_ids) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {} @@ -1708,20 +1720,20 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector // query all threads, so it's easier if we just don't tell them about this thread. return; } - Object* peer = t->GetPeer(); + mirror::Object* peer = t->GetPeer(); if (IsInDesiredThreadGroup(peer)) { thread_ids_.push_back(gRegistry->Add(peer)); } } private: - bool IsInDesiredThreadGroup(Object* peer) + bool IsInDesiredThreadGroup(mirror::Object* peer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // peer might be NULL if the thread is still starting up. if (peer == NULL) { // We can't tell the debugger about this thread yet. // TODO: if we identified threads to the debugger by their Thread* - // rather than their peer's Object*, we could fix this. + // rather than their peer's mirror::Object*, we could fix this. // Doing so might help us report ZOMBIE threads too. return false; } @@ -1729,17 +1741,17 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector if (desired_thread_group_ == NULL) { return true; } - Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer); + mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer); return (group == desired_thread_group_); } const ScopedObjectAccessUnchecked& soa_; - Object* const desired_thread_group_; + mirror::Object* const desired_thread_group_; std::vector& thread_ids_; }; ScopedObjectAccessUnchecked soa(Thread::Current()); - Object* thread_group = gRegistry->Get(thread_group_id); + mirror::Object* thread_group = gRegistry->Get(thread_group_id); ThreadListVisitor tlv(soa, thread_group, thread_ids); MutexLock mu(soa.Self(), *Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); @@ -1747,16 +1759,17 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector& child_thread_group_ids) { ScopedObjectAccess soa(Thread::Current()); - Object* thread_group = gRegistry->Get(thread_group_id); + mirror::Object* thread_group = gRegistry->Get(thread_group_id); // Get the ArrayList "groups" out of this thread group... - Field* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;"); - Object* groups_array_list = groups_field->GetObject(thread_group); + mirror::Field* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;"); + mirror::Object* groups_array_list = groups_field->GetObject(thread_group); // Get the array and size out of the ArrayList... - Field* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;"); - Field* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I"); - ObjectArray* groups_array = array_field->GetObject(groups_array_list)->AsObjectArray(); + mirror::Field* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;"); + mirror::Field* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I"); + mirror::ObjectArray* groups_array = + array_field->GetObject(groups_array_list)->AsObjectArray(); const int32_t size = size_field->GetInt(groups_array_list); // Copy the first 'size' elements out of the array into the result. @@ -1871,7 +1884,7 @@ JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspen ScopedLocalRef peer(Thread::Current()->GetJniEnv(), NULL); { ScopedObjectAccess soa(Thread::Current()); - peer.reset(soa.AddLocalReference(gRegistry->Get(thread_id))); + peer.reset(soa.AddLocalReference(gRegistry->Get(thread_id))); } if (peer.get() == NULL) { return JDWP::ERR_THREAD_NOT_ALIVE; @@ -1890,7 +1903,7 @@ JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspen void Dbg::ResumeThread(JDWP::ObjectId thread_id) { ScopedObjectAccessUnchecked soa(Thread::Current()); - Object* peer = gRegistry->Get(thread_id); + mirror::Object* peer = gRegistry->Get(thread_id); Thread* thread; { MutexLock mu(soa.Self(), *Locks::thread_list_lock_); @@ -1925,21 +1938,21 @@ struct GetThisVisitor : public StackVisitor { if (frame_id != GetFrameId()) { return true; // continue } - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m->IsNative() || m->IsStatic()) { this_object = NULL; } else { uint16_t reg = DemangleSlot(0, m); - this_object = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); + this_object = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); } return false; } - Object* this_object; + mirror::Object* this_object; JDWP::FrameId frame_id; }; -static Object* GetThis(Thread* self, AbstractMethod* m, size_t frame_id) +static mirror::Object* GetThis(Thread* self, mirror::AbstractMethod* m, size_t frame_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: should we return the 'this' we passed through to non-static native methods? if (m->IsNative() || m->IsStatic()) { @@ -1989,7 +2002,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl return true; // Not our frame, carry on. } // TODO: check that the tag is compatible with the actual type of the slot! - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); uint16_t reg = DemangleSlot(slot_, m); switch (tag_) { @@ -2037,7 +2050,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl case JDWP::JT_ARRAY: { CHECK_EQ(width_, sizeof(JDWP::ObjectId)); - Object* o = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); + mirror::Object* o = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); VLOG(jdwp) << "get array local " << reg << " = " << o; if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) { LOG(FATAL) << "Register " << reg << " expected to hold array: " << o; @@ -2053,7 +2066,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl case JDWP::JT_THREAD_GROUP: { CHECK_EQ(width_, sizeof(JDWP::ObjectId)); - Object* o = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); + mirror::Object* o = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); VLOG(jdwp) << "get object local " << reg << " = " << o; if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) { LOG(FATAL) << "Register " << reg << " expected to hold object: " << o; @@ -2128,7 +2141,7 @@ void Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl return true; // Not our frame, carry on. } // TODO: check that the tag is compatible with the actual type of the slot! - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); uint16_t reg = DemangleSlot(slot_, m); switch (tag_) { @@ -2155,7 +2168,7 @@ void Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl case JDWP::JT_STRING: { CHECK_EQ(width_, sizeof(JDWP::ObjectId)); - Object* o = gRegistry->Get(static_cast(value_)); + mirror::Object* o = gRegistry->Get(static_cast(value_)); if (o == kInvalidObject) { UNIMPLEMENTED(FATAL) << "return an error code when given an invalid object to store"; } @@ -2198,8 +2211,8 @@ void Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl visitor.WalkStack(); } -void Dbg::PostLocationEvent(const AbstractMethod* m, int dex_pc, Object* this_object, int event_flags) { - Class* c = m->GetDeclaringClass(); +void Dbg::PostLocationEvent(const mirror::AbstractMethod* m, int dex_pc, mirror::Object* this_object, int event_flags) { + mirror::Class* c = m->GetDeclaringClass(); JDWP::JdwpLocation location; location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS; @@ -2220,8 +2233,9 @@ void Dbg::PostLocationEvent(const AbstractMethod* m, int dex_pc, Object* this_ob } void Dbg::PostException(Thread* thread, - JDWP::FrameId throw_frame_id, AbstractMethod* throw_method, uint32_t throw_dex_pc, - AbstractMethod* catch_method, uint32_t catch_dex_pc, Throwable* exception) { + JDWP::FrameId throw_frame_id, mirror::AbstractMethod* throw_method, + uint32_t throw_dex_pc, mirror::AbstractMethod* catch_method, + uint32_t catch_dex_pc, mirror::Throwable* exception) { if (!IsDebuggerActive()) { return; } @@ -2252,7 +2266,7 @@ void Dbg::PostException(Thread* thread, gJdwpState->PostException(&throw_location, exception_id, exception_class_id, &catch_location, this_id); } -void Dbg::PostClassPrepare(Class* c) { +void Dbg::PostClassPrepare(mirror::Class* c) { if (!IsDebuggerActive()) { return; } @@ -2271,7 +2285,7 @@ void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { } size_t frame_id; - AbstractMethod* m = self->GetCurrentMethod(NULL, &frame_id); + mirror::AbstractMethod* m = self->GetCurrentMethod(NULL, &frame_id); //LOG(INFO) << "UpdateDebugger " << PrettyMethod(m) << "@" << dex_pc << " frame " << frame_id; if (dex_pc == -1) { @@ -2375,14 +2389,14 @@ void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { void Dbg::WatchLocation(const JDWP::JdwpLocation* location) { MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); - AbstractMethod* m = FromMethodId(location->method_id); + mirror::AbstractMethod* m = FromMethodId(location->method_id); gBreakpoints.push_back(Breakpoint(m, location->dex_pc)); VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " << gBreakpoints[gBreakpoints.size() - 1]; } void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) { MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); - AbstractMethod* m = FromMethodId(location->method_id); + mirror::AbstractMethod* m = FromMethodId(location->method_id); for (size_t i = 0; i < gBreakpoints.size(); ++i) { if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) { VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i]; @@ -2428,11 +2442,11 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize // annotalysis. bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { Locks::breakpoint_lock_->AssertHeld(Thread::Current()); - const AbstractMethod* m = GetMethod(); + const mirror::AbstractMethod* m = GetMethod(); if (!m->IsRuntimeMethod()) { ++gSingleStepControl.stack_depth; if (gSingleStepControl.method == NULL) { - const DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache(); + const mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache(); gSingleStepControl.method = m; gSingleStepControl.line_number = -1; if (dex_cache != NULL) { @@ -2497,7 +2511,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize uint32_t last_pc; }; gSingleStepControl.dex_pcs.clear(); - const AbstractMethod* m = gSingleStepControl.method; + const mirror::AbstractMethod* m = gSingleStepControl.method; if (m->IsNative()) { gSingleStepControl.line_number = -1; } else { @@ -2618,23 +2632,23 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec } JDWP::JdwpError status; - Object* receiver = gRegistry->Get(object_id); + mirror::Object* receiver = gRegistry->Get(object_id); if (receiver == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } - Object* thread = gRegistry->Get(thread_id); + mirror::Object* thread = gRegistry->Get(thread_id); if (thread == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } // TODO: check that 'thread' is actually a java.lang.Thread! - Class* c = DecodeClass(class_id, status); + mirror::Class* c = DecodeClass(class_id, status); if (c == NULL) { return status; } - AbstractMethod* m = FromMethodId(method_id); + mirror::AbstractMethod* m = FromMethodId(method_id); if (m->IsStatic() != (receiver == NULL)) { return JDWP::ERR_INVALID_METHODID; } @@ -2739,13 +2753,13 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { // We can be called while an exception is pending. We need // to preserve that across the method invocation. - SirtRef old_exception(soa.Self(), soa.Self()->GetException()); + SirtRef old_exception(soa.Self(), soa.Self()->GetException()); soa.Self()->ClearException(); // Translate the method through the vtable, unless the debugger wants to suppress it. - AbstractMethod* m = pReq->method_; + mirror::AbstractMethod* m = pReq->method_; if ((pReq->options_ & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver_ != NULL) { - AbstractMethod* actual_method = pReq->class_->FindVirtualMethodForVirtualOrInterface(pReq->method_); + mirror::AbstractMethod* actual_method = pReq->class_->FindVirtualMethodForVirtualOrInterface(pReq->method_); if (actual_method != m) { VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m) << " to " << PrettyMethod(actual_method); m = actual_method; @@ -2764,7 +2778,7 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { pReq->exception = gRegistry->Add(soa.Self()->GetException()); pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty()); if (pReq->exception != 0) { - Object* exc = soa.Self()->GetException(); + mirror::Object* exc = soa.Self()->GetException(); VLOG(jdwp) << " JDWP invocation returning with exception=" << exc << " " << PrettyTypeOf(exc); soa.Self()->ClearException(); pReq->result_value.SetJ(0); @@ -2801,7 +2815,7 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { * throwing exceptions) we really want to do the registration late. */ void Dbg::RegisterObjectId(JDWP::ObjectId id) { - gRegistry->Add(reinterpret_cast(id)); + gRegistry->Add(reinterpret_cast(id)); } /* @@ -2949,7 +2963,7 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { } else { CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; ScopedObjectAccessUnchecked soa(Thread::Current()); - SirtRef name(soa.Self(), t->GetThreadName(soa)); + SirtRef name(soa.Self(), t->GetThreadName(soa)); size_t char_count = (name.get() != NULL) ? name->GetLength() : 0; const jchar* chars = (name.get() != NULL) ? name->GetCharArray()->GetData() : NULL; @@ -3239,7 +3253,7 @@ class HeapChunkContext { Flush(); } } - const Object *obj = (const Object *)start; + const mirror::Object* obj = reinterpret_cast(start); // Determine the type of this chunk. // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. @@ -3282,7 +3296,7 @@ class HeapChunkContext { *p_++ = length - 1; } - uint8_t ExamineObject(const Object* o, bool is_native_heap) + uint8_t ExamineObject(const mirror::Object* o, bool is_native_heap) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { if (o == NULL) { return HPSG_STATE(SOLIDITY_FREE, 0); @@ -3300,7 +3314,7 @@ class HeapChunkContext { return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } - Class* c = o->GetClass(); + mirror::Class* c = o->GetClass(); if (c == NULL) { // The object was probably just created but hasn't been initialized yet. return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); @@ -3416,7 +3430,7 @@ struct AllocRecordStackVisitor : public StackVisitor { if (depth >= kMaxAllocRecordStackDepth) { return false; } - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (!m->IsRuntimeMethod()) { record->stack[depth].method = m; record->stack[depth].dex_pc = GetDexPc(); @@ -3437,7 +3451,7 @@ struct AllocRecordStackVisitor : public StackVisitor { size_t depth; }; -void Dbg::RecordAllocation(Class* type, size_t byte_count) { +void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) { Thread* self = Thread::Current(); CHECK(self != NULL); @@ -3499,7 +3513,7 @@ void Dbg::DumpRecentAllocations() { << PrettyClass(record->type); for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) { - const AbstractMethod* m = record->stack[stack_frame].method; + const mirror::AbstractMethod* m = record->stack[stack_frame].method; if (m == NULL) { break; } @@ -3619,7 +3633,7 @@ jbyteArray Dbg::GetRecentAllocations() { MethodHelper mh; for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) { - AbstractMethod* m = record->stack[i].method; + mirror::AbstractMethod* m = record->stack[i].method; if (m != NULL) { mh.ChangeMethod(m); class_names.Add(mh.GetDeclaringClassDescriptor()); diff --git a/src/debugger.h b/src/debugger.h index b34a401ceb..a7963496cc 100644 --- a/src/debugger.h +++ b/src/debugger.h @@ -26,10 +26,17 @@ #include #include "jdwp/jdwp.h" -#include "object.h" +#include "jni.h" +#include "jvalue.h" +#include "root_visitor.h" namespace art { - +namespace mirror { +class AbstractMethod; +class Class; +class Object; +class Throwable; +} // namespace mirror struct AllocRecord; class Thread; @@ -53,10 +60,10 @@ struct DebugInvokeReq { bool invoke_needed_; /* request */ - Object* receiver_; /* not used for ClassType.InvokeMethod */ - Object* thread_; - Class* class_; - AbstractMethod* method_; + mirror::Object* receiver_; /* not used for ClassType.InvokeMethod */ + mirror::Object* thread_; + mirror::Class* class_; + mirror::AbstractMethod* method_; uint32_t arg_count_; uint64_t* arg_values_; /* will be NULL if arg_count_ == 0 */ uint32_t options_; @@ -118,7 +125,7 @@ class Dbg { static void Exit(int status); - static void VisitRoots(Heap::RootVisitor* visitor, void* arg); + static void VisitRoots(RootVisitor* visitor, void* arg); /* * Class, Object, Array @@ -311,17 +318,19 @@ class Dbg { kMethodEntry = 0x04, kMethodExit = 0x08, }; - static void PostLocationEvent(const AbstractMethod* method, int pcOffset, Object* thisPtr, int eventFlags) + static void PostLocationEvent(const mirror::AbstractMethod* method, int pcOffset, + mirror::Object* thisPtr, int eventFlags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, AbstractMethod* throw_method, - uint32_t throw_dex_pc, AbstractMethod* catch_method, uint32_t catch_dex_pc, - Throwable* exception) + static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, + mirror::AbstractMethod* throw_method, + uint32_t throw_dex_pc, mirror::AbstractMethod* catch_method, + uint32_t catch_dex_pc, mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStart(Thread* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadDeath(Thread* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void PostClassPrepare(Class* c) + static void PostClassPrepare(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UpdateDebugger(int32_t dex_pc, Thread* self) @@ -373,12 +382,11 @@ class Dbg { /* * Recent allocation tracking support. */ - static void RecordAllocation(Class* type, size_t byte_count) + static void RecordAllocation(mirror::Class* type, size_t byte_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetAllocTrackingEnabled(bool enabled); static inline bool IsAllocTrackingEnabled() { return recent_allocation_records_ != NULL; } - static jbyteArray GetRecentAllocations() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static jbyteArray GetRecentAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DumpRecentAllocations(); enum HpifWhen { diff --git a/src/dex2oat.cc b/src/dex2oat.cc index a2d35e48ad..bc38bdcfc9 100644 --- a/src/dex2oat.cc +++ b/src/dex2oat.cc @@ -27,10 +27,14 @@ #include "base/stringpiece.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" -#include "class_loader.h" #include "compiler.h" #include "image_writer.h" #include "leb128.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "oat_writer.h" #include "object_utils.h" #include "os.h" @@ -161,7 +165,7 @@ class Dex2Oat { continue; } std::string descriptor(DotToDescriptor(dot.c_str())); - SirtRef klass(self, class_linker->FindSystemClass(descriptor.c_str())); + SirtRef klass(self, class_linker->FindSystemClass(descriptor.c_str())); if (klass.get() == NULL) { LOG(WARNING) << "Failed to find class " << descriptor; Thread::Current()->ClearException(); @@ -173,7 +177,7 @@ class Dex2Oat { // exceptions are resolved by the verifier when there is a catch block in an interested method. // Do this here so that exception classes appear to have been specified image classes. std::set > unresolved_exception_types; - SirtRef java_lang_Throwable(self, + SirtRef java_lang_Throwable(self, class_linker->FindSystemClass("Ljava/lang/Throwable;")); do { unresolved_exception_types.clear(); @@ -185,10 +189,10 @@ class Dex2Oat { it != end; ++it) { uint16_t exception_type_idx = it->first; const DexFile* dex_file = it->second; - DexCache* dex_cache = class_linker->FindDexCache(*dex_file); - ClassLoader* class_loader = NULL; - SirtRef klass(self, class_linker->ResolveType(*dex_file, exception_type_idx, - dex_cache, class_loader)); + mirror::DexCache* dex_cache = class_linker->FindDexCache(*dex_file); + mirror:: ClassLoader* class_loader = NULL; + SirtRef klass(self, class_linker->ResolveType(*dex_file, exception_type_idx, + dex_cache, class_loader)); if (klass.get() == NULL) { const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx); const char* descriptor = dex_file->GetTypeDescriptor(type_id); @@ -404,25 +408,25 @@ class Dex2Oat { } } - static bool ResolveCatchBlockExceptionsClassVisitor(Class* c, void* arg) + static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::set >* exceptions_to_resolve = reinterpret_cast >*>(arg); MethodHelper mh; for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { - AbstractMethod* m = c->GetVirtualMethod(i); + mirror::AbstractMethod* m = c->GetVirtualMethod(i); mh.ChangeMethod(m); ResolveExceptionsForMethod(&mh, *exceptions_to_resolve); } for (size_t i = 0; i < c->NumDirectMethods(); ++i) { - AbstractMethod* m = c->GetDirectMethod(i); + mirror::AbstractMethod* m = c->GetDirectMethod(i); mh.ChangeMethod(m); ResolveExceptionsForMethod(&mh, *exceptions_to_resolve); } return true; } - static bool RecordImageClassesVisitor(Class* klass, void* arg) + static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::set* image_classes = reinterpret_cast*>(arg); if (klass->IsArrayClass() || klass->IsPrimitive()) { diff --git a/src/dex_cache.cc b/src/dex_cache.cc deleted file mode 100644 index 98fc93254a..0000000000 --- a/src/dex_cache.cc +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "base/logging.h" -#include "class_linker.h" -#include "dex_cache.h" -#include "heap.h" -#include "globals.h" -#include "object.h" - -namespace art { - -void DexCache::Init(const DexFile* dex_file, - String* location, - ObjectArray* strings, - ObjectArray* resolved_types, - ObjectArray* resolved_methods, - ObjectArray* resolved_fields, - ObjectArray* initialized_static_storage) { - CHECK(dex_file != NULL); - CHECK(location != NULL); - CHECK(strings != NULL); - CHECK(resolved_types != NULL); - CHECK(resolved_methods != NULL); - CHECK(resolved_fields != NULL); - CHECK(initialized_static_storage != NULL); - - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location, false); - SetFieldObject(StringsOffset(), strings, false); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types, false); - SetFieldObject(ResolvedMethodsOffset(), resolved_methods, false); - SetFieldObject(ResolvedFieldsOffset(), resolved_fields, false); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, initialized_static_storage_), - initialized_static_storage, false); - - Runtime* runtime = Runtime::Current(); - if (runtime->HasResolutionMethod()) { - // Initialize the resolve methods array to contain trampolines for resolution. - AbstractMethod* trampoline = runtime->GetResolutionMethod(); - size_t length = resolved_methods->GetLength(); - for (size_t i = 0; i < length; i++) { - resolved_methods->SetWithoutChecks(i, trampoline); - } - } -} - -void DexCache::Fixup(AbstractMethod* trampoline) { - // Fixup the resolve methods array to contain trampoline for resolution. - CHECK(trampoline != NULL); - ObjectArray* resolved_methods = GetResolvedMethods(); - size_t length = resolved_methods->GetLength(); - for (size_t i = 0; i < length; i++) { - if (resolved_methods->GetWithoutChecks(i) == NULL) { - resolved_methods->SetWithoutChecks(i, trampoline); - } - } -} - -} // namespace art diff --git a/src/dex_cache.h b/src/dex_cache.h deleted file mode 100644 index ee44856a4d..0000000000 --- a/src/dex_cache.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_SRC_DEX_CACHE_H_ -#define ART_SRC_DEX_CACHE_H_ - -#include "base/macros.h" -#include "dex_file.h" -#include "globals.h" -#include "object.h" - -namespace art { - -class Class; -class Field; -class ImageWriter; -class AbstractMethod; -class String; -union JValue; - -class MANAGED DexCacheClass : public Class { - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(DexCacheClass); -}; - -class MANAGED DexCache : public Object { - public: - void Init(const DexFile* dex_file, - String* location, - ObjectArray* strings, - ObjectArray* types, - ObjectArray* methods, - ObjectArray* fields, - ObjectArray* initialized_static_storage) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void Fixup(AbstractMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - String* GetLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), false); - } - - static MemberOffset StringsOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_); - } - - static MemberOffset ResolvedFieldsOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_); - } - - static MemberOffset ResolvedMethodsOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_); - } - - size_t NumStrings() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetStrings()->GetLength(); - } - - size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetResolvedTypes()->GetLength(); - } - - size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetResolvedMethods()->GetLength(); - } - - size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetResolvedFields()->GetLength(); - } - - size_t NumInitializedStaticStorage() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetInitializedStaticStorage()->GetLength(); - } - - String* GetResolvedString(uint32_t string_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetStrings()->Get(string_idx); - } - - void SetResolvedString(uint32_t string_idx, String* resolved) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetStrings()->Set(string_idx, resolved); - } - - Class* GetResolvedType(uint32_t type_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetResolvedTypes()->Get(type_idx); - } - - void SetResolvedType(uint32_t type_idx, Class* resolved) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetResolvedTypes()->Set(type_idx, resolved); - } - - AbstractMethod* GetResolvedMethod(uint32_t method_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = GetResolvedMethods()->Get(method_idx); - // Hide resolution trampoline methods from the caller - if (method != NULL && method->GetDexMethodIndex() == DexFile::kDexNoIndex16) { - DCHECK(method == Runtime::Current()->GetResolutionMethod()); - return NULL; - } else { - return method; - } - } - - void SetResolvedMethod(uint32_t method_idx, AbstractMethod* resolved) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetResolvedMethods()->Set(method_idx, resolved); - } - - Field* GetResolvedField(uint32_t field_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetResolvedFields()->Get(field_idx); - } - - void SetResolvedField(uint32_t field_idx, Field* resolved) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetResolvedFields()->Set(field_idx, resolved); - } - - ObjectArray* GetStrings() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject< ObjectArray* >(StringsOffset(), false); - } - - ObjectArray* GetResolvedTypes() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject< ObjectArray* >( - OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), false); - } - - ObjectArray* GetResolvedMethods() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject< ObjectArray* >(ResolvedMethodsOffset(), false); - } - - ObjectArray* GetResolvedFields() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject< ObjectArray* >(ResolvedFieldsOffset(), false); - } - - ObjectArray* GetInitializedStaticStorage() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject< ObjectArray* >( - OFFSET_OF_OBJECT_MEMBER(DexCache, initialized_static_storage_), false); - } - - const DexFile* GetDexFile() const { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), false); - } - - void SetDexFile(const DexFile* dex_file) { - return SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false); - } - - private: - ObjectArray* initialized_static_storage_; - String* location_; - ObjectArray* resolved_fields_; - ObjectArray* resolved_methods_; - ObjectArray* resolved_types_; - ObjectArray* strings_; - uint32_t dex_file_; - - friend struct DexCacheOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache); -}; - -} // namespace art - -#endif // ART_SRC_DEX_CACHE_H_ diff --git a/src/dex_cache_test.cc b/src/dex_cache_test.cc deleted file mode 100644 index b131e4c248..0000000000 --- a/src/dex_cache_test.cc +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "class_linker.h" -#include "common_test.h" -#include "dex_cache.h" -#include "heap.h" -#include "object.h" -#include "sirt_ref.h" - -#include - -namespace art { - -class DexCacheTest : public CommonTest {}; - -TEST_F(DexCacheTest, Open) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef dex_cache(soa.Self(), class_linker_->AllocDexCache(soa.Self(), - *java_lang_dex_file_)); - ASSERT_TRUE(dex_cache.get() != NULL); - - EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings()); - EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes()); - EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods()); - EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields()); - EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumInitializedStaticStorage()); - - EXPECT_LE(0, dex_cache->GetStrings()->GetLength()); - EXPECT_LE(0, dex_cache->GetResolvedTypes()->GetLength()); - EXPECT_LE(0, dex_cache->GetResolvedMethods()->GetLength()); - EXPECT_LE(0, dex_cache->GetResolvedFields()->GetLength()); - EXPECT_LE(0, dex_cache->GetInitializedStaticStorage()->GetLength()); - - EXPECT_EQ(java_lang_dex_file_->NumStringIds(), - static_cast(dex_cache->GetStrings()->GetLength())); - EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), - static_cast(dex_cache->GetResolvedTypes()->GetLength())); - EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), - static_cast(dex_cache->GetResolvedMethods()->GetLength())); - EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), - static_cast(dex_cache->GetResolvedFields()->GetLength())); - EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), - static_cast(dex_cache->GetInitializedStaticStorage()->GetLength())); -} - -} // namespace art diff --git a/src/dex_file.cc b/src/dex_file.cc index 73986162b0..e67e767dba 100644 --- a/src/dex_file.cc +++ b/src/dex_file.cc @@ -30,7 +30,10 @@ #include "dex_file_verifier.h" #include "globals.h" #include "leb128.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/field.h" +#include "mirror/field-inl.h" +#include "mirror/string.h" #include "os.h" #include "safe_map.h" #include "thread.h" @@ -94,6 +97,14 @@ const DexFile* DexFile::Open(const std::string& filename, return DexFile::OpenFile(filename, location, true); } +int DexFile::GetPermissions() const { + if (mem_map_.get() == NULL) { + return 0; + } else { + return mem_map_->GetProtect(); + } +} + const DexFile* DexFile::OpenFile(const std::string& filename, const std::string& location, bool verify) { @@ -146,7 +157,6 @@ const DexFile* DexFile::OpenFile(const std::string& filename, const char* DexFile::kClassesDex = "classes.dex"; -// Open classes.dex from within a .zip, .jar, .apk, ... const DexFile* DexFile::OpenZip(const std::string& filename, const std::string& location) { UniquePtr zip_archive(ZipArchive::Open(filename)); @@ -157,6 +167,16 @@ const DexFile* DexFile::OpenZip(const std::string& filename, return DexFile::Open(*zip_archive.get(), location); } +const DexFile* DexFile::OpenMemory(const std::string& location, + uint32_t location_checksum, + MemMap* mem_map) { + return OpenMemory(mem_map->Begin(), + mem_map->Size(), + location, + location_checksum, + mem_map); +} + const DexFile* DexFile::Open(const ZipArchive& zip_archive, const std::string& location) { CHECK(!location.empty()); UniquePtr zip_entry(zip_archive.Find(kClassesDex)); @@ -584,7 +604,7 @@ std::string DexFile::CreateMethodSignature(uint32_t proto_idx, int32_t* unicode_ return descriptor; } -int32_t DexFile::GetLineNumFromPC(const AbstractMethod* method, uint32_t rel_pc) const { +int32_t DexFile::GetLineNumFromPC(const mirror::AbstractMethod* method, uint32_t rel_pc) const { // For native method, lineno should be -2 to indicate it is native. Note that // "line number == -2" is how libcore tells from StackTraceElement. if (method->GetCodeItemOffset() == 0) { @@ -601,6 +621,12 @@ int32_t DexFile::GetLineNumFromPC(const AbstractMethod* method, uint32_t rel_pc) return context.line_num_; } +const DexFile::TryItem* DexFile::GetTryItems(const CodeItem& code_item, uint32_t offset) { + const uint16_t* insns_end_ = &code_item.insns_[code_item.insns_size_in_code_units_]; + return reinterpret_cast + (RoundUp(reinterpret_cast(insns_end_), 4)) + offset; +} + int32_t DexFile::FindCatchHandlerOffset(const CodeItem &code_item, int32_t tries_size, uint32_t address) { // Note: Signed type is important for max and min. @@ -900,8 +926,8 @@ static uint64_t ReadUnsignedLong(const byte* ptr, int zwidth, bool fill_on_right } EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file, - DexCache* dex_cache, - ClassLoader* class_loader, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, ClassLinker* linker, const DexFile::ClassDef& class_def) : dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker), @@ -976,7 +1002,7 @@ void EncodedStaticFieldValueIterator::Next() { ptr_ += width; } -void EncodedStaticFieldValueIterator::ReadValueToField(Field* field) const { +void EncodedStaticFieldValueIterator::ReadValueToField(mirror::Field* field) const { switch (type_) { case kBoolean: field->SetBoolean(field->GetDeclaringClass(), jval_.z); break; case kByte: field->SetByte(field->GetDeclaringClass(), jval_.b); break; @@ -988,12 +1014,12 @@ void EncodedStaticFieldValueIterator::ReadValueToField(Field* field) const { case kDouble: field->SetDouble(field->GetDeclaringClass(), jval_.d); break; case kNull: field->SetObject(field->GetDeclaringClass(), NULL); break; case kString: { - String* resolved = linker_->ResolveString(dex_file_, jval_.i, dex_cache_); + mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, dex_cache_); field->SetObject(field->GetDeclaringClass(), resolved); break; } case kType: { - Class* resolved = linker_->ResolveType(dex_file_, jval_.i, dex_cache_, class_loader_); + mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, dex_cache_, class_loader_); field->SetObject(field->GetDeclaringClass(), resolved); break; } diff --git a/src/dex_file.h b/src/dex_file.h index 184d9507db..14b4ba0b68 100644 --- a/src/dex_file.h +++ b/src/dex_file.h @@ -29,10 +29,16 @@ #include "modifiers.h" #include "safe_map.h" #include "UniquePtr.h" -#include "utils.h" namespace art { +namespace mirror { +class AbstractMethod; +class ClassLoader; +class DexCache; +class Field; +} // namespace mirror +class ClassLinker; class ZipArchive; // TODO: move all of the macro functionality into the DexCache class. @@ -675,11 +681,7 @@ class DexFile { } } - static const TryItem* GetTryItems(const CodeItem& code_item, uint32_t offset) { - const uint16_t* insns_end_ = &code_item.insns_[code_item.insns_size_in_code_units_]; - return reinterpret_cast - (RoundUp(reinterpret_cast(insns_end_), 4)) + offset; - } + static const TryItem* GetTryItems(const CodeItem& code_item, uint32_t offset); // Get the base of the encoded data for the given DexCode. static const byte* GetCatchHandlerData(const CodeItem& code_item, uint32_t offset) { @@ -775,7 +777,7 @@ class DexFile { // Returns -2 for native methods (as expected in exception traces). // // This is used by runtime; therefore use art::Method not art::DexFile::Method. - int32_t GetLineNumFromPC(const AbstractMethod* method, uint32_t rel_pc) const + int32_t GetLineNumFromPC(const mirror::AbstractMethod* method, uint32_t rel_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, @@ -790,13 +792,7 @@ class DexFile { } } - int GetPermissions() const { - if (mem_map_.get() == NULL) { - return 0; - } else { - return mem_map_->GetProtect(); - } - } + int GetPermissions() const; private: // Opens a .dex file @@ -811,13 +807,7 @@ class DexFile { // Opens a .dex file at the given address backed by a MemMap static const DexFile* OpenMemory(const std::string& location, uint32_t location_checksum, - MemMap* mem_map) { - return OpenMemory(mem_map->Begin(), - mem_map->Size(), - location, - location_checksum, - mem_map); - } + MemMap* mem_map); // Opens a .dex file at the given address, optionally backed by a MemMap static const DexFile* OpenMemory(const byte* dex_file, @@ -1116,19 +1106,14 @@ class ClassDataItemIterator { DISALLOW_IMPLICIT_CONSTRUCTORS(ClassDataItemIterator); }; -class ClassLinker; -class ClassLoader; -class DexCache; -class Field; - class EncodedStaticFieldValueIterator { public: - EncodedStaticFieldValueIterator(const DexFile& dex_file, DexCache* dex_cache, ClassLoader* class_loader, + EncodedStaticFieldValueIterator(const DexFile& dex_file, mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, ClassLinker* linker, const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ReadValueToField(Field* field) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ReadValueToField(mirror::Field* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasNext() { return pos_ < array_size_; } @@ -1158,14 +1143,14 @@ class EncodedStaticFieldValueIterator { static const byte kEncodedValueArgShift = 5; const DexFile& dex_file_; - DexCache* dex_cache_; // dex cache to resolve literal objects - ClassLoader* class_loader_; // ClassLoader to resolve types - ClassLinker* linker_; // linker to resolve literal objects - size_t array_size_; // size of array - size_t pos_; // current position - const byte* ptr_; // pointer into encoded data array - ValueType type_; // type of current encoded value - jvalue jval_; // value of current encoded value + mirror::DexCache* dex_cache_; // Dex cache to resolve literal objects. + mirror::ClassLoader* class_loader_; // ClassLoader to resolve types. + ClassLinker* linker_; // Linker to resolve literal objects. + size_t array_size_; // Size of array. + size_t pos_; // Current position. + const byte* ptr_; // Pointer into encoded data array. + ValueType type_; // Type of current encoded value. + jvalue jval_; // Value of current encoded value. DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedStaticFieldValueIterator); }; std::ostream& operator<<(std::ostream& os, const EncodedStaticFieldValueIterator::ValueType& code); diff --git a/src/dex_file_verifier.cc b/src/dex_file_verifier.cc index 83ef31aada..2f9054e414 100644 --- a/src/dex_file_verifier.cc +++ b/src/dex_file_verifier.cc @@ -18,9 +18,10 @@ #include "base/stringprintf.h" #include "leb128.h" -#include "object.h" #include "safe_map.h" #include "UniquePtr.h" +#include "utf.h" +#include "utils.h" #include "zip_archive.h" namespace art { diff --git a/src/dex_instruction.cc b/src/dex_instruction.cc index d3aa2389ef..55f6eca326 100644 --- a/src/dex_instruction.cc +++ b/src/dex_instruction.cc @@ -17,6 +17,7 @@ #include "dex_instruction.h" #include "dex_file.h" +#include "utils.h" #include namespace art { diff --git a/src/exception_test.cc b/src/exception_test.cc index 58e6533058..240bf957db 100644 --- a/src/exception_test.cc +++ b/src/exception_test.cc @@ -18,6 +18,8 @@ #include "common_test.h" #include "dex_file.h" #include "gtest/gtest.h" +#include "mirror/object_array-inl.h" +#include "mirror/stack_trace_element.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" @@ -32,8 +34,8 @@ class ExceptionTest : public CommonTest { CommonTest::SetUp(); ScopedObjectAccess soa(Thread::Current()); - SirtRef class_loader(soa.Self(), - soa.Decode(LoadDex("ExceptionHandle"))); + SirtRef class_loader(soa.Self(), + soa.Decode(LoadDex("ExceptionHandle"))); my_klass_ = class_linker_->FindClass("LExceptionHandle;", class_loader.get()); ASSERT_TRUE(my_klass_ != NULL); class_linker_->EnsureInitialized(my_klass_, false, true); @@ -90,11 +92,11 @@ class ExceptionTest : public CommonTest { std::vector fake_vmap_table_data_; std::vector fake_gc_map_; - AbstractMethod* method_f_; - AbstractMethod* method_g_; + mirror::AbstractMethod* method_f_; + mirror::AbstractMethod* method_g_; private: - Class* my_klass_; + mirror::Class* my_klass_; }; TEST_F(ExceptionTest, FindCatchHandler) { @@ -193,8 +195,8 @@ TEST_F(ExceptionTest, StackTraceElement) { ASSERT_TRUE(internal != NULL); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal); ASSERT_TRUE(ste_array != NULL); - ObjectArray* trace_array = - soa.Decode*>(ste_array); + mirror::ObjectArray* trace_array = + soa.Decode*>(ste_array); ASSERT_TRUE(trace_array != NULL); ASSERT_TRUE(trace_array->Get(0) != NULL); diff --git a/src/gc/atomic_stack.h b/src/gc/atomic_stack.h index cd1781dcb9..0197bce992 100644 --- a/src/gc/atomic_stack.h +++ b/src/gc/atomic_stack.h @@ -101,11 +101,11 @@ class AtomicStack { } T* Begin() { - return const_cast(begin_ + front_index_); + return const_cast(begin_ + front_index_); } T* End() { - return const_cast(begin_ + back_index_); + return const_cast(begin_ + back_index_); } size_t Capacity() const { @@ -159,6 +159,8 @@ class AtomicStack { DISALLOW_COPY_AND_ASSIGN(AtomicStack); }; +typedef AtomicStack ObjectStack; + } // namespace art #endif // ART_SRC_MARK_STACK_H_ diff --git a/src/gc/card_table-inl.h b/src/gc/card_table-inl.h new file mode 100644 index 0000000000..13590b70a9 --- /dev/null +++ b/src/gc/card_table-inl.h @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_GC_CARDTABLE_INL_H_ +#define ART_SRC_GC_CARDTABLE_INL_H_ + +#include "base/logging.h" +#include "card_table.h" +#include "cutils/atomic-inline.h" +#include "space_bitmap.h" +#include "utils.h" + +namespace art { + +static inline bool byte_cas(byte old_value, byte new_value, byte* address) { + // Little endian means most significant byte is on the left. + const size_t shift = reinterpret_cast(address) % sizeof(uintptr_t); + // Align the address down. + address -= shift; + int32_t* word_address = reinterpret_cast(address); + // Word with the byte we are trying to cas cleared. + const int32_t cur_word = *word_address & ~(0xFF << shift); + const int32_t old_word = cur_word | (static_cast(old_value) << shift); + const int32_t new_word = cur_word | (static_cast(new_value) << shift); + bool success = android_atomic_cas(old_word, new_word, word_address) == 0; + return success; +} + +template +inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, + const Visitor& visitor, const FingerVisitor& finger_visitor, + const byte minimum_age) const { + DCHECK(bitmap->HasAddress(scan_begin)); + DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan. + byte* card_cur = CardFromAddr(scan_begin); + byte* card_end = CardFromAddr(scan_end); + CheckCardValid(card_cur); + CheckCardValid(card_end); + + // Handle any unaligned cards at the start. + while (!IsAligned(card_cur) && card_cur < card_end) { + if (*card_cur >= minimum_age) { + uintptr_t start = reinterpret_cast(AddrFromCard(card_cur)); + uintptr_t end = start + kCardSize; + bitmap->VisitMarkedRange(start, end, visitor, finger_visitor); + } + ++card_cur; + } + + byte* aligned_end = card_end - + (reinterpret_cast(card_end) & (sizeof(uintptr_t) - 1)); + + // Now we have the words, we can send these to be processed in parallel. + uintptr_t* word_cur = reinterpret_cast(card_cur); + uintptr_t* word_end = reinterpret_cast(aligned_end); + + // TODO: Parallelize + while (word_cur < word_end) { + // Find the first dirty card. + while (*word_cur == 0 && word_cur < word_end) { + word_cur++; + } + if (word_cur >= word_end) { + break; + } + uintptr_t start_word = *word_cur; + for (size_t i = 0; i < sizeof(uintptr_t); ++i) { + if ((start_word & 0xFF) >= minimum_age) { + byte* card = reinterpret_cast(word_cur) + i; + const byte card_byte = *card; + DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty) + << "card " << static_cast(card_byte) << " word " << (start_word & 0xFF); + uintptr_t start = reinterpret_cast(AddrFromCard(card)); + uintptr_t end = start + kCardSize; + bitmap->VisitMarkedRange(start, end, visitor, finger_visitor); + } + start_word >>= 8; + } + ++word_cur; + } + + // Handle any unaligned cards at the end. + card_cur = reinterpret_cast(word_end); + while (card_cur < card_end) { + if (*card_cur >= minimum_age) { + uintptr_t start = reinterpret_cast(AddrFromCard(card_cur)); + uintptr_t end = start + kCardSize; + bitmap->VisitMarkedRange(start, end, visitor, finger_visitor); + } + ++card_cur; + } +} + +/* + * Visitor is expected to take in a card and return the new value. When a value is modified, the + * modify visitor is called. + * visitor: The visitor which modifies the cards. Returns the new value for a card given an old + * value. + * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables + * us to know which cards got cleared. + */ +template +inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor, + const ModifiedVisitor& modified) { + byte* card_cur = CardFromAddr(scan_begin); + byte* card_end = CardFromAddr(scan_end); + CheckCardValid(card_cur); + CheckCardValid(card_end); + + // Handle any unaligned cards at the start. + while (!IsAligned(card_cur) && card_cur < card_end) { + byte expected, new_value; + do { + expected = *card_cur; + new_value = visitor(expected); + } while (expected != new_value && UNLIKELY(!byte_cas(expected, new_value, card_cur))); + if (expected != new_value) { + modified(card_cur, expected, new_value); + } + ++card_cur; + } + + // Handle unaligned cards at the end. + while (!IsAligned(card_end) && card_end > card_cur) { + --card_end; + byte expected, new_value; + do { + expected = *card_end; + new_value = visitor(expected); + } while (expected != new_value && UNLIKELY(!byte_cas(expected, new_value, card_end))); + if (expected != new_value) { + modified(card_cur, expected, new_value); + } + } + + // Now we have the words, we can process words in parallel. + uintptr_t* word_cur = reinterpret_cast(card_cur); + uintptr_t* word_end = reinterpret_cast(card_end); + uintptr_t expected_word; + uintptr_t new_word; + + // TODO: Parallelize. + while (word_cur < word_end) { + while ((expected_word = *word_cur) != 0) { + new_word = + (visitor((expected_word >> 0) & 0xFF) << 0) | + (visitor((expected_word >> 8) & 0xFF) << 8) | + (visitor((expected_word >> 16) & 0xFF) << 16) | + (visitor((expected_word >> 24) & 0xFF) << 24); + if (new_word == expected_word) { + // No need to do a cas. + break; + } + if (LIKELY(android_atomic_cas(expected_word, new_word, + reinterpret_cast(word_cur)) == 0)) { + for (size_t i = 0; i < sizeof(uintptr_t); ++i) { + const byte expected_byte = (expected_word >> (8 * i)) & 0xFF; + const byte new_byte = (new_word >> (8 * i)) & 0xFF; + if (expected_byte != new_byte) { + modified(reinterpret_cast(word_cur) + i, expected_byte, new_byte); + } + } + break; + } + } + ++word_cur; + } +} + +inline void* CardTable::AddrFromCard(const byte *card_addr) const { + DCHECK(IsValidCard(card_addr)) + << " card_addr: " << reinterpret_cast(card_addr) + << " begin: " << reinterpret_cast(mem_map_->Begin() + offset_) + << " end: " << reinterpret_cast(mem_map_->End()); + uintptr_t offset = card_addr - biased_begin_; + return reinterpret_cast(offset << kCardShift); +} + +inline byte* CardTable::CardFromAddr(const void *addr) const { + byte *card_addr = biased_begin_ + (reinterpret_cast(addr) >> kCardShift); + // Sanity check the caller was asking for address covered by the card table + DCHECK(IsValidCard(card_addr)) << "addr: " << addr + << " card_addr: " << reinterpret_cast(card_addr); + return card_addr; +} + +inline void CardTable::CheckCardValid(byte* card) const { + DCHECK(IsValidCard(card)) + << " card_addr: " << reinterpret_cast(card) + << " begin: " << reinterpret_cast(mem_map_->Begin() + offset_) + << " end: " << reinterpret_cast(mem_map_->End()); +} + +} // namespace art + +#endif // ART_SRC_GC_CARDTABLE_INL_H_ diff --git a/src/gc/card_table.cc b/src/gc/card_table.cc index f27777bfd8..4331270131 100644 --- a/src/gc/card_table.cc +++ b/src/gc/card_table.cc @@ -19,6 +19,7 @@ #include #include "base/logging.h" +#include "gc/card_table-inl.h" #include "heap.h" #include "heap_bitmap.h" #include "runtime.h" diff --git a/src/gc/card_table.h b/src/gc/card_table.h index 8f1bc922f6..842fcc3aa2 100644 --- a/src/gc/card_table.h +++ b/src/gc/card_table.h @@ -17,19 +17,18 @@ #ifndef ART_SRC_GC_CARDTABLE_H_ #define ART_SRC_GC_CARDTABLE_H_ -#include "base/logging.h" #include "globals.h" +#include "locks.h" #include "mem_map.h" -#include "space_bitmap.h" #include "UniquePtr.h" -#include "utils.h" namespace art { - +namespace mirror { +class Object; +} // namespace mirror class Heap; class ContinuousSpace; class SpaceBitmap; -class Object; // Maintain a card table from the the write barrier. All writes of // non-NULL values to heap addresses should go through an entry in @@ -50,12 +49,12 @@ class CardTable { } // Is the object on a dirty card? - bool IsDirty(const Object* obj) const { + bool IsDirty(const mirror::Object* obj) const { return GetCard(obj) == kCardDirty; } // Return the state of the card at an address. - byte GetCard(const Object* obj) const { + byte GetCard(const mirror::Object* obj) const { return *CardFromAddr(obj); } @@ -88,71 +87,7 @@ class CardTable { */ template void ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor, - const ModifiedVisitor& modified = VoidFunctor()) { - byte* card_cur = CardFromAddr(scan_begin); - byte* card_end = CardFromAddr(scan_end); - CheckCardValid(card_cur); - CheckCardValid(card_end); - - // Handle any unaligned cards at the start. - while (!IsAligned(card_cur) && card_cur < card_end) { - byte expected, new_value; - do { - expected = *card_cur; - new_value = visitor(expected); - } while (expected != new_value && UNLIKELY(byte_cas(expected, new_value, card_cur) != 0)); - if (expected != new_value) { - modified(card_cur, expected, new_value); - } - ++card_cur; - } - - // Handle unaligned cards at the end. - while (!IsAligned(card_end) && card_end > card_cur) { - --card_end; - byte expected, new_value; - do { - expected = *card_end; - new_value = visitor(expected); - } while (expected != new_value && UNLIKELY(byte_cas(expected, new_value, card_end) != 0)); - if (expected != new_value) { - modified(card_cur, expected, new_value); - } - } - - // Now we have the words, we can process words in parallel. - uintptr_t* word_cur = reinterpret_cast(card_cur); - uintptr_t* word_end = reinterpret_cast(card_end); - uintptr_t expected_word; - uintptr_t new_word; - - // TODO: Parallelize. - while (word_cur < word_end) { - while ((expected_word = *word_cur) != 0) { - new_word = - (visitor((expected_word >> 0) & 0xFF) << 0) | - (visitor((expected_word >> 8) & 0xFF) << 8) | - (visitor((expected_word >> 16) & 0xFF) << 16) | - (visitor((expected_word >> 24) & 0xFF) << 24); - if (new_word == expected_word) { - // No need to do a cas. - break; - } - if (LIKELY(android_atomic_cas(expected_word, new_word, - reinterpret_cast(word_cur)) == 0)) { - for (size_t i = 0; i < sizeof(uintptr_t); ++i) { - const byte expected_byte = (expected_word >> (8 * i)) & 0xFF; - const byte new_byte = (new_word >> (8 * i)) & 0xFF; - if (expected_byte != new_byte) { - modified(reinterpret_cast(word_cur) + i, expected_byte, new_byte); - } - } - break; - } - } - ++word_cur; - } - } + const ModifiedVisitor& modified); // For every dirty at least minumum age between begin and end invoke the visitor with the // specified argument. @@ -161,67 +96,7 @@ class CardTable { const Visitor& visitor, const FingerVisitor& finger_visitor, const byte minimum_age = kCardDirty) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(bitmap->HasAddress(scan_begin)); - DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan. - byte* card_cur = CardFromAddr(scan_begin); - byte* card_end = CardFromAddr(scan_end); - CheckCardValid(card_cur); - CheckCardValid(card_end); - - // Handle any unaligned cards at the start. - while (!IsAligned(card_cur) && card_cur < card_end) { - if (*card_cur >= minimum_age) { - uintptr_t start = reinterpret_cast(AddrFromCard(card_cur)); - uintptr_t end = start + kCardSize; - bitmap->VisitMarkedRange(start, end, visitor, finger_visitor); - } - ++card_cur; - } - - byte* aligned_end = card_end - - (reinterpret_cast(card_end) & (sizeof(uintptr_t) - 1)); - - // Now we have the words, we can send these to be processed in parallel. - uintptr_t* word_cur = reinterpret_cast(card_cur); - uintptr_t* word_end = reinterpret_cast(aligned_end); - - // TODO: Parallelize - while (word_cur < word_end) { - // Find the first dirty card. - while (*word_cur == 0 && word_cur < word_end) { - word_cur++; - } - if (word_cur >= word_end) { - break; - } - uintptr_t start_word = *word_cur; - for (size_t i = 0; i < sizeof(uintptr_t); ++i) { - if ((start_word & 0xFF) >= minimum_age) { - byte* card = reinterpret_cast(word_cur) + i; - const byte card_byte = *card; - DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty) - << "card " << static_cast(card_byte) << " word " << (start_word & 0xFF); - uintptr_t start = reinterpret_cast(AddrFromCard(card)); - uintptr_t end = start + kCardSize; - bitmap->VisitMarkedRange(start, end, visitor, finger_visitor); - } - start_word >>= 8; - } - ++word_cur; - } - - // Handle any unaligned cards at the end. - card_cur = reinterpret_cast(word_end); - while (card_cur < card_end) { - if (*card_cur >= minimum_age) { - uintptr_t start = reinterpret_cast(AddrFromCard(card_cur)); - uintptr_t end = start + kCardSize; - bitmap->VisitMarkedRange(start, end, visitor, finger_visitor); - } - ++card_cur; - } - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Assertion used to check the given address is covered by the card table void CheckAddrIsInCardTable(const byte* addr) const; @@ -233,40 +108,14 @@ class CardTable { void ClearSpaceCards(ContinuousSpace* space); // Returns the first address in the heap which maps to this card. - void* AddrFromCard(const byte *card_addr) const { - DCHECK(IsValidCard(card_addr)) - << " card_addr: " << reinterpret_cast(card_addr) - << " begin: " << reinterpret_cast(mem_map_->Begin() + offset_) - << " end: " << reinterpret_cast(mem_map_->End()); - uintptr_t offset = card_addr - biased_begin_; - return reinterpret_cast(offset << kCardShift); - } + void* AddrFromCard(const byte *card_addr) const; // Returns the address of the relevant byte in the card table, given an address on the heap. - byte* CardFromAddr(const void *addr) const { - byte *card_addr = biased_begin_ + (reinterpret_cast(addr) >> kCardShift); - // Sanity check the caller was asking for address covered by the card table - DCHECK(IsValidCard(card_addr)) << "addr: " << addr - << " card_addr: " << reinterpret_cast(card_addr); - return card_addr; - } + byte* CardFromAddr(const void *addr) const; bool AddrIsInCardTable(const void* addr) const; private: - static int byte_cas(byte old_value, byte new_value, byte* address) { - // Little endian means most significant byte is on the left. - const size_t shift = reinterpret_cast(address) % sizeof(uintptr_t); - // Align the address down. - address -= shift; - int32_t* word_address = reinterpret_cast(address); - // Word with the byte we are trying to cas cleared. - const int32_t cur_word = *word_address & ~(0xFF << shift); - const int32_t old_word = cur_word | (static_cast(old_value) << shift); - const int32_t new_word = cur_word | (static_cast(new_value) << shift); - return android_atomic_cas(old_word, new_word, word_address); - } - CardTable(MemMap* begin, byte* biased_begin, size_t offset); // Returns true iff the card table address is within the bounds of the card table. @@ -276,12 +125,7 @@ class CardTable { return card_addr >= begin && card_addr < end; } - void CheckCardValid(byte* card) const { - DCHECK(IsValidCard(card)) - << " card_addr: " << reinterpret_cast(card) - << " begin: " << reinterpret_cast(mem_map_->Begin() + offset_) - << " end: " << reinterpret_cast(mem_map_->End()); - } + void CheckCardValid(byte* card) const; // Verifies that all gray objects are on a dirty card. void VerifyCardTable(); diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc index bcc7b63e73..fbcdbafd0a 100644 --- a/src/gc/garbage_collector.cc +++ b/src/gc/garbage_collector.cc @@ -15,6 +15,7 @@ */ #include "garbage_collector.h" +#include "thread.h" #include "thread_list.h" namespace art { diff --git a/src/gc/garbage_collector.h b/src/gc/garbage_collector.h index 9ddf45f646..a1014c2c7f 100644 --- a/src/gc/garbage_collector.h +++ b/src/gc/garbage_collector.h @@ -14,11 +14,13 @@ * limitations under the License. */ -#ifndef ART_SRC_GC_GARBAGE_COLLECTR_H_ -#define ART_SRC_GC_GARBAGE_COLLECTR_H_ +#ifndef ART_SRC_GC_GARBAGE_COLLECTOR_H_ +#define ART_SRC_GC_GARBAGE_COLLECTOR_H_ #include "locks.h" -#include "utils.h" + +#include +#include namespace art { @@ -56,7 +58,7 @@ class GarbageCollector { void RegisterPause(uint64_t nano_length); protected: - // The initial phase. Done with mutators upaused. + // The initial phase. Done without mutators paused. virtual void InitializePhase() = 0; // Mark all reachable objects, done concurrently. @@ -68,7 +70,7 @@ class GarbageCollector { // Called with mutators running. virtual void ReclaimPhase() = 0; - // Called after the GC is finished. Done with mutators upaused. + // Called after the GC is finished. Done without mutators paused. virtual void FinishPhase() = 0; Heap* heap_; @@ -78,4 +80,4 @@ class GarbageCollector { } // namespace art -#endif // ART_SRC_GC_GARBAGE_COLLECTR_H_ +#endif // ART_SRC_GC_GARBAGE_COLLECTOR_H_ diff --git a/src/gc/gc_type.h b/src/gc/gc_type.h new file mode 100644 index 0000000000..908f038ff2 --- /dev/null +++ b/src/gc/gc_type.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_GC_GC_TYPE_H_ +#define ART_SRC_GC_GC_TYPE_H_ + +namespace art { + +// The ordering of the enum matters, it is used to determine which GCs are run first. +enum GcType { + // No Gc + kGcTypeNone, + // Sticky mark bits "generational" GC. + kGcTypeSticky, + // Partial GC, over only the alloc space. + kGcTypePartial, + // Full GC + kGcTypeFull, + // Number of different Gc types. + kGcTypeMax, +}; +std::ostream& operator<<(std::ostream& os, const GcType& policy); + +} // namespace art + +#endif // ART_SRC_GC_GC_TYPE_H_ diff --git a/src/gc/heap_bitmap-inl.h b/src/gc/heap_bitmap-inl.h new file mode 100644 index 0000000000..281118359b --- /dev/null +++ b/src/gc/heap_bitmap-inl.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_GC_HEAP_BITMAP_INL_H_ +#define ART_SRC_GC_HEAP_BITMAP_INL_H_ + +#include "heap_bitmap.h" + +namespace art { + +template +inline void HeapBitmap::Visit(const Visitor& visitor) { + // TODO: C++0x auto + for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) { + SpaceBitmap* bitmap = *it; + bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor()); + } + large_objects_->Visit(visitor); +} + +} // namespace art + +#endif // ART_SRC_GC_HEAP_BITMAP_INL_H_ diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h index 42c4166ba0..87e08483f5 100644 --- a/src/gc/heap_bitmap.h +++ b/src/gc/heap_bitmap.h @@ -14,96 +14,91 @@ * limitations under the License. */ -#ifndef ART_SRC_HEAP_BITMAP_H_ -#define ART_SRC_HEAP_BITMAP_H_ +#ifndef ART_SRC_GC_HEAP_BITMAP_H_ +#define ART_SRC_GC_HEAP_BITMAP_H_ +#include "locks.h" #include "space_bitmap.h" namespace art { - class Heap; - class SpaceBitmap; - - class HeapBitmap { - public: - bool Test(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { - SpaceBitmap* bitmap = GetSpaceBitmap(obj); - if (LIKELY(bitmap != NULL)) { - return bitmap->Test(obj); - } else { - return large_objects_->Test(obj); - } +class Heap; + +class HeapBitmap { + public: + bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SpaceBitmap* bitmap = GetSpaceBitmap(obj); + if (LIKELY(bitmap != NULL)) { + return bitmap->Test(obj); + } else { + return large_objects_->Test(obj); } - - void Clear(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { - SpaceBitmap* bitmap = GetSpaceBitmap(obj); - if (LIKELY(bitmap != NULL)) { - bitmap->Clear(obj); - } else { - large_objects_->Clear(obj); - } + } + + void Clear(const mirror::Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SpaceBitmap* bitmap = GetSpaceBitmap(obj); + if (LIKELY(bitmap != NULL)) { + bitmap->Clear(obj); + } else { + large_objects_->Clear(obj); } - - void Set(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { - SpaceBitmap* bitmap = GetSpaceBitmap(obj); - if (LIKELY(bitmap != NULL)) { - bitmap->Set(obj); - } else { - large_objects_->Set(obj); - } + } + + void Set(const mirror::Object* obj) + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SpaceBitmap* bitmap = GetSpaceBitmap(obj); + if (LIKELY(bitmap != NULL)) { + bitmap->Set(obj); + } else { + large_objects_->Set(obj); } + } - SpaceBitmap* GetSpaceBitmap(const Object* obj) { - // TODO: C++0x auto - for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) { - if ((*it)->HasAddress(obj)) { - return *it; - } + SpaceBitmap* GetSpaceBitmap(const mirror::Object* obj) { + // TODO: C++0x auto + for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) { + if ((*it)->HasAddress(obj)) { + return *it; } - return NULL; } + return NULL; + } - void Walk(SpaceBitmap::Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void Walk(SpaceBitmap::Callback* callback, void* arg) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - template - void Visit(const Visitor& visitor) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { - // TODO: C++0x auto - for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) { - SpaceBitmap* bitmap = *it; - bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor()); - } - large_objects_->Visit(visitor); - } + template + void Visit(const Visitor& visitor) + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. - void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. + void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - HeapBitmap(Heap* heap); + HeapBitmap(Heap* heap); - inline SpaceSetMap* GetLargeObjects() const { - return large_objects_; - } + inline SpaceSetMap* GetLargeObjects() const { + return large_objects_; + } + + void SetLargeObjects(SpaceSetMap* large_objects); - void SetLargeObjects(SpaceSetMap* large_objects); + private: - private: + const Heap* const heap_; - const Heap* const heap_; + void AddSpaceBitmap(SpaceBitmap* bitmap); - void AddSpaceBitmap(SpaceBitmap* bitmap); + typedef std::vector Bitmaps; + Bitmaps bitmaps_; - typedef std::vector Bitmaps; - Bitmaps bitmaps_; + // Large object sets. + SpaceSetMap* large_objects_; - // Large object sets. - SpaceSetMap* large_objects_; + friend class Heap; +}; - friend class Heap; - }; } // namespace art -#endif // ART_SRC_HEAP_BITMAP_H_ +#endif // ART_SRC_GC_HEAP_BITMAP_H_ diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc index 1b93e5d570..69320faffd 100644 --- a/src/gc/large_object_space.cc +++ b/src/gc/large_object_space.cc @@ -22,6 +22,7 @@ #include "image.h" #include "os.h" #include "space_bitmap.h" +#include "thread.h" #include "utils.h" namespace art { @@ -58,13 +59,13 @@ LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { return new LargeObjectMapSpace(name); } -Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) { +mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) { MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE); if (mem_map == NULL) { return NULL; } MutexLock mu(self, lock_); - Object* obj = reinterpret_cast(mem_map->Begin()); + mirror::Object* obj = reinterpret_cast(mem_map->Begin()); large_objects_.push_back(obj); mem_maps_.Put(obj, mem_map); size_t allocation_size = mem_map->Size(); @@ -75,7 +76,7 @@ Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) { return obj; } -size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) { +size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { MutexLock mu(self, lock_); MemMaps::iterator found = mem_maps_.find(ptr); CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live"; @@ -88,14 +89,14 @@ size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) { return allocation_size; } -size_t LargeObjectMapSpace::AllocationSize(const Object* obj) { +size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) { MutexLock mu(Thread::Current(), lock_); - MemMaps::iterator found = mem_maps_.find(const_cast(obj)); + MemMaps::iterator found = mem_maps_.find(const_cast(obj)); CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live"; return found->second->Size(); } -size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) { +size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { size_t total = 0; for (size_t i = 0; i < num_ptrs; ++i) { if (kDebugSpaces) { @@ -115,9 +116,9 @@ void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) } } -bool LargeObjectMapSpace::Contains(const Object* obj) const { +bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const { MutexLock mu(Thread::Current(), lock_); - return mem_maps_.find(const_cast(obj)) != mem_maps_.end(); + return mem_maps_.find(const_cast(obj)) != mem_maps_.end(); } FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) { @@ -191,7 +192,7 @@ void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { } } -size_t FreeListSpace::Free(Thread* self, Object* obj) { +size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { MutexLock mu(self, lock_); CHECK(Contains(obj)); // Check adjacent chunks to see if we need to combine. @@ -220,7 +221,7 @@ size_t FreeListSpace::Free(Thread* self, Object* obj) { return allocation_size; } -bool FreeListSpace::Contains(const Object* obj) const { +bool FreeListSpace::Contains(const mirror::Object* obj) const { return mem_map_->HasAddress(obj); } @@ -228,13 +229,13 @@ FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) { return chunk + chunk->GetSize() / kAlignment; } -size_t FreeListSpace::AllocationSize(const Object* obj) { - Chunk* chunk = ChunkFromAddr(const_cast(obj)); +size_t FreeListSpace::AllocationSize(const mirror::Object* obj) { + Chunk* chunk = ChunkFromAddr(const_cast(obj)); CHECK(!chunk->IsFree()); return chunk->GetSize(); } -Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) { +mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) { MutexLock mu(self, lock_); num_bytes = RoundUp(num_bytes, kAlignment); Chunk temp; @@ -261,7 +262,7 @@ Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) { total_objects_allocated_++; num_bytes_allocated_ += num_bytes; total_bytes_allocated_ += num_bytes; - return reinterpret_cast(addr); + return reinterpret_cast(addr); } void FreeListSpace::Dump(std::ostream& os) const{ diff --git a/src/gc/large_object_space.h b/src/gc/large_object_space.h index 979fce6078..c34dbcc30c 100644 --- a/src/gc/large_object_space.h +++ b/src/gc/large_object_space.h @@ -18,8 +18,13 @@ #define ART_SRC_GC_LARGE_OBJECT_SPACE_H_ #include "space.h" +#include "safe_map.h" + +#include +#include namespace art { +class SpaceSetMap; class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { public: @@ -64,7 +69,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { return total_objects_allocated_; } - size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs); + size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); protected: @@ -90,19 +95,19 @@ class LargeObjectMapSpace : public LargeObjectSpace { static LargeObjectMapSpace* Create(const std::string& name); // Return the storage space required by obj. - virtual size_t AllocationSize(const Object* obj); - virtual Object* Alloc(Thread* self, size_t num_bytes); - size_t Free(Thread* self, Object* ptr); + virtual size_t AllocationSize(const mirror::Object* obj); + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes); + size_t Free(Thread* self, mirror::Object* ptr); virtual void Walk(DlMallocSpace::WalkCallback, void* arg); - virtual bool Contains(const Object* obj) const; + virtual bool Contains(const mirror::Object* obj) const; private: LargeObjectMapSpace(const std::string& name); virtual ~LargeObjectMapSpace() {} // Used to ensure mutual exclusion when the allocation spaces data structures are being modified. mutable Mutex lock_; - std::vector large_objects_; - typedef SafeMap MemMaps; + std::vector large_objects_; + typedef SafeMap MemMaps; MemMaps mem_maps_; }; @@ -111,10 +116,10 @@ class FreeListSpace : public LargeObjectSpace { virtual ~FreeListSpace(); static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity); - size_t AllocationSize(const Object* obj); - Object* Alloc(Thread* self, size_t num_bytes); - size_t Free(Thread* self, Object* obj); - bool Contains(const Object* obj) const; + size_t AllocationSize(const mirror::Object* obj); + mirror::Object* Alloc(Thread* self, size_t num_bytes); + size_t Free(Thread* self, mirror::Object* obj); + bool Contains(const mirror::Object* obj) const; void Walk(DlMallocSpace::WalkCallback callback, void* arg); // Address at which the space begins diff --git a/src/gc/mark_sweep-inl.h b/src/gc/mark_sweep-inl.h new file mode 100644 index 0000000000..726502330b --- /dev/null +++ b/src/gc/mark_sweep-inl.h @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_GC_MARK_SWEEP_INL_H_ +#define ART_SRC_GC_MARK_SWEEP_INL_H_ + +#include "heap.h" +#include "mirror/class.h" +#include "mirror/field.h" +#include "mirror/object_array.h" + +namespace art { + +template +inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) { + DCHECK(obj != NULL); + if (kIsDebugBuild && !IsMarked(obj)) { + heap_->DumpSpaces(); + LOG(FATAL) << "Scanning unmarked object " << obj; + } + mirror::Class* klass = obj->GetClass(); + DCHECK(klass != NULL); + if (klass == java_lang_Class_) { + DCHECK_EQ(klass->GetClass(), java_lang_Class_); + if (kCountScannedTypes) { + ++class_count_; + } + VisitClassReferences(klass, obj, visitor); + } else if (klass->IsArrayClass()) { + if (kCountScannedTypes) { + ++array_count_; + } + visitor(obj, klass, mirror::Object::ClassOffset(), false); + if (klass->IsObjectArrayClass()) { + VisitObjectArrayReferences(obj->AsObjectArray(), visitor); + } + } else { + if (kCountScannedTypes) { + ++other_count_; + } + VisitOtherReferences(klass, obj, visitor); + if (UNLIKELY(klass->IsReferenceClass())) { + DelayReferenceReferent(const_cast(obj)); + } + } +} + +template +inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { + DCHECK(obj != NULL); + DCHECK(obj->GetClass() != NULL); + + mirror::Class* klass = obj->GetClass(); + DCHECK(klass != NULL); + if (klass == mirror::Class::GetJavaLangClass()) { + DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass()); + VisitClassReferences(klass, obj, visitor); + } else { + if (klass->IsArrayClass()) { + visitor(obj, klass, mirror::Object::ClassOffset(), false); + if (klass->IsObjectArrayClass()) { + VisitObjectArrayReferences(obj->AsObjectArray(), visitor); + } + } else { + VisitOtherReferences(klass, obj, visitor); + } + } +} + +template +inline void MarkSweep::VisitInstanceFieldsReferences(const mirror::Class* klass, + const mirror::Object* obj, + const Visitor& visitor) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + DCHECK(obj != NULL); + DCHECK(klass != NULL); + VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor); +} + +template +inline void MarkSweep::VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, + const Visitor& visitor) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + VisitInstanceFieldsReferences(klass, obj, visitor); + VisitStaticFieldsReferences(obj->AsClass(), visitor); +} + +template +inline void MarkSweep::VisitStaticFieldsReferences(const mirror::Class* klass, + const Visitor& visitor) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + DCHECK(klass != NULL); + VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor); +} + +template +inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, + bool is_static, const Visitor& visitor) { + if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) { + // Found a reference offset bitmap. Mark the specified offsets. + while (ref_offsets != 0) { + size_t right_shift = CLZ(ref_offsets); + MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift); + const mirror::Object* ref = obj->GetFieldObject(field_offset, false); + visitor(obj, ref, field_offset, is_static); + ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift); + } + } else { + // There is no reference offset bitmap. In the non-static case, + // walk up the class inheritance hierarchy and find reference + // offsets the hard way. In the static case, just consider this + // class. + for (const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); + klass != NULL; + klass = is_static ? NULL : klass->GetSuperClass()) { + size_t num_reference_fields = (is_static + ? klass->NumReferenceStaticFields() + : klass->NumReferenceInstanceFields()); + for (size_t i = 0; i < num_reference_fields; ++i) { + mirror::Field* field = (is_static ? klass->GetStaticField(i) + : klass->GetInstanceField(i)); + MemberOffset field_offset = field->GetOffset(); + const mirror::Object* ref = obj->GetFieldObject(field_offset, false); + visitor(obj, ref, field_offset, is_static); + } + } + } +} + +template +inline void MarkSweep::VisitObjectArrayReferences(const mirror::ObjectArray* array, + const Visitor& visitor) { + const int32_t length = array->GetLength(); + for (int32_t i = 0; i < length; ++i) { + const mirror::Object* element = array->GetWithoutChecks(i); + const size_t width = sizeof(mirror::Object*); + MemberOffset offset = MemberOffset(i * width + mirror::Array::DataOffset(width).Int32Value()); + visitor(array, element, offset, false); + } +} + +} // namespace art + +#endif // ART_SRC_GC_MARK_SWEEP_INL_H_ diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc index 7c52c83292..40102b2036 100644 --- a/src/gc/mark_sweep.cc +++ b/src/gc/mark_sweep.cc @@ -25,22 +25,32 @@ #include "base/logging.h" #include "base/macros.h" #include "card_table.h" -#include "class_loader.h" -#include "dex_cache.h" +#include "card_table-inl.h" #include "heap.h" #include "indirect_reference_table.h" #include "intern_table.h" #include "jni_internal.h" #include "large_object_space.h" #include "monitor.h" -#include "object.h" +#include "mark_sweep-inl.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache.h" +#include "mirror/field.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array.h" +#include "mirror/object_array-inl.h" #include "runtime.h" #include "space.h" +#include "space_bitmap-inl.h" #include "timing_logger.h" #include "thread.h" #include "thread_list.h" #include "verifier/method_verifier.h" +using namespace art::mirror; + namespace art { // Performance options. @@ -186,7 +196,7 @@ bool MarkSweep::HandleDirtyObjectsPhase() { timings_.AddSplit("ReMarkRoots"); // Scan dirty objects, this is only required if we are not doing concurrent GC. - RecursiveMarkDirtyObjects(); + RecursiveMarkDirtyObjects(CardTable::kCardDirty); } ProcessReferences(self); @@ -700,7 +710,7 @@ void MarkSweep::ReMarkRoots() { Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this); } -void MarkSweep::SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) { +void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { JavaVMExt* vm = Runtime::Current()->GetJavaVM(); MutexLock mu(Thread::Current(), vm->weak_globals_lock); IndirectReferenceTable* table = &vm->weak_globals; diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h index 3581d98772..0d43bee795 100644 --- a/src/gc/mark_sweep.h +++ b/src/gc/mark_sweep.h @@ -14,28 +14,38 @@ * limitations under the License. */ -#ifndef ART_SRC_MARK_SWEEP_H_ -#define ART_SRC_MARK_SWEEP_H_ +#ifndef ART_SRC_GC_MARK_SWEEP_H_ +#define ART_SRC_GC_MARK_SWEEP_H_ -#include "atomic_stack.h" +#include "atomic_integer.h" #include "base/macros.h" +#include "base/mutex.h" #include "garbage_collector.h" -#include "heap_bitmap.h" -#include "object.h" +#include "gc_type.h" #include "offsets.h" +#include "root_visitor.h" +#include "timing_logger.h" +#include "UniquePtr.h" namespace art { - +namespace mirror { +class Class; +class Object; +template class ObjectArray; +} +template class AtomicStack; class Barrier; class CheckObjectVisitor; -class Class; +class ContinuousSpace; class Heap; class MarkIfReachesAllocspaceVisitor; class ModUnionClearCardVisitor; class ModUnionVisitor; class ModUnionTableBitmap; -class Object; -class TimingLogger; +typedef AtomicStack ObjectStack; +class SpaceBitmap; +class StackVisitor; +class Thread; class MarkStackChunk; class MarkSweep : public GarbageCollector { @@ -79,7 +89,9 @@ class MarkSweep : public GarbageCollector { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Verify that image roots point to only marked objects within the alloc space. - void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void VerifyImageRoots() + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Builds a mark stack and recursively mark until it empties. void RecursiveMark() @@ -88,8 +100,8 @@ class MarkSweep : public GarbageCollector { // Make a space immune, immune spaces are assumed to have all live objects marked. void ImmuneSpace(ContinuousSpace* space) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);; + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps based on the gc type. virtual void BindBitmaps() @@ -102,7 +114,7 @@ class MarkSweep : public GarbageCollector { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Builds a mark stack with objects on dirty cards and recursively mark until it empties. - void RecursiveMarkDirtyObjects(byte minimum_age = CardTable::kCardDirty) + void RecursiveMarkDirtyObjects(byte minimum_age) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -130,61 +142,31 @@ class MarkSweep : public GarbageCollector { virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - Object* GetClearedReferences() { + mirror::Object* GetClearedReferences() { return cleared_reference_list_; } // Proxy for external access to ScanObject. - void ScanRoot(const Object* obj) + void ScanRoot(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Blackens an object. - void ScanObject(const Object* obj) + void ScanObject(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // TODO: enable thread safety analysis when in use by multiple worker threads. template - void ScanObjectVisit(const Object* obj, const MarkVisitor& visitor) - NO_THREAD_SAFETY_ANALYSIS { - DCHECK(obj != NULL); - if (kIsDebugBuild && !IsMarked(obj)) { - heap_->DumpSpaces(); - LOG(FATAL) << "Scanning unmarked object " << obj; - } - Class* klass = obj->GetClass(); - DCHECK(klass != NULL); - if (klass == java_lang_Class_) { - DCHECK_EQ(klass->GetClass(), java_lang_Class_); - if (kCountScannedTypes) { - ++class_count_; - } - VisitClassReferences(klass, obj, visitor); - } else if (klass->IsArrayClass()) { - if (kCountScannedTypes) { - ++array_count_; - } - visitor(obj, klass, Object::ClassOffset(), false); - if (klass->IsObjectArrayClass()) { - VisitObjectArrayReferences(obj->AsObjectArray(), visitor); - } - } else { - if (kCountScannedTypes) { - ++other_count_; - } - VisitOtherReferences(klass, obj, visitor); - if (UNLIKELY(klass->IsReferenceClass())) { - DelayReferenceReferent(const_cast(obj)); - } - } - } + void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) + NO_THREAD_SAFETY_ANALYSIS; - void SetFinger(Object* new_finger) { + void SetFinger(mirror::Object* new_finger) { finger_ = new_finger; } void DisableFinger() { - SetFinger(reinterpret_cast(~static_cast(0))); + SetFinger(reinterpret_cast(~static_cast(0))); } size_t GetFreedBytes() const { @@ -212,7 +194,7 @@ class MarkSweep : public GarbageCollector { } // Everything inside the immune range is assumed to be marked. - void SetImmuneRange(Object* begin, Object* end); + void SetImmuneRange(mirror::Object* begin, mirror::Object* end); void SweepSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); @@ -221,52 +203,33 @@ class MarkSweep : public GarbageCollector { void SweepSystemWeaksArray(ObjectStack* allocations) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - static bool VerifyIsLiveCallback(const Object* obj, void* arg) + static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void VerifySystemWeaks() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Verify that an object is live, either in a live bitmap or in the allocation stack. - void VerifyIsLive(const Object* obj) + void VerifyIsLive(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); template - static void VisitObjectReferences(const Object* obj, const Visitor& visitor) + static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, - Locks::mutator_lock_) { - DCHECK(obj != NULL); - DCHECK(obj->GetClass() != NULL); - - Class* klass = obj->GetClass(); - DCHECK(klass != NULL); - if (klass == Class::GetJavaLangClass()) { - DCHECK_EQ(klass->GetClass(), Class::GetJavaLangClass()); - VisitClassReferences(klass, obj, visitor); - } else { - if (klass->IsArrayClass()) { - visitor(obj, klass, Object::ClassOffset(), false); - if (klass->IsObjectArrayClass()) { - VisitObjectArrayReferences(obj->AsObjectArray(), visitor); - } - } else { - VisitOtherReferences(klass, obj, visitor); - } - } - } + Locks::mutator_lock_); - static void MarkObjectCallback(const Object* root, void* arg) + static void MarkObjectCallback(const mirror::Object* root, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - static void MarkRootParallelCallback(const Object* root, void* arg); + static void MarkRootParallelCallback(const mirror::Object* root, void* arg); // Marks an object. - void MarkObject(const Object* obj) + void MarkObject(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void MarkRoot(const Object* obj) + void MarkRoot(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); @@ -277,45 +240,46 @@ class MarkSweep : public GarbageCollector { protected: // Returns true if the object has its bit set in the mark bitmap. - bool IsMarked(const Object* object) const; + bool IsMarked(const mirror::Object* object) const; - static bool IsMarkedCallback(const Object* object, void* arg) + static bool IsMarkedCallback(const mirror::Object* object, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - static bool IsMarkedArrayCallback(const Object* object, void* arg) + static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - static void ReMarkObjectVisitor(const Object* root, void* arg) + static void ReMarkObjectVisitor(const mirror::Object* root, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - static void VerifyImageRootVisitor(Object* root, void* arg) + static void VerifyImageRootVisitor(mirror::Object* root, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - void MarkObjectNonNull(const Object* obj, bool check_finger) + void MarkObjectNonNull(const mirror::Object* obj, bool check_finger) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void MarkObjectNonNullParallel(const Object* obj, bool check_finger); + void MarkObjectNonNullParallel(const mirror::Object* obj, bool check_finger); - bool MarkLargeObject(const Object* obj) + bool MarkLargeObject(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Returns true if we need to add obj to a mark stack. - bool MarkObjectParallel(const Object* obj) NO_THREAD_SAFETY_ANALYSIS; + bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; - static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) + static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Special sweep for zygote that just marks objects / dirties cards. - static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) + static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) + void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset, + bool is_static) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - void CheckObject(const Object* obj) + void CheckObject(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Verify the roots of the heap and print out information related to any invalid roots. @@ -326,90 +290,41 @@ class MarkSweep : public GarbageCollector { // Expand mark stack to 2x its current size. Thread safe. void ExpandMarkStack(); - static void VerifyRootCallback(const Object* root, void* arg, size_t vreg, + static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, const StackVisitor *visitor); - void VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) + void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) NO_THREAD_SAFETY_ANALYSIS; template - static void VisitInstanceFieldsReferences(const Class* klass, const Object* obj, + static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { - DCHECK(obj != NULL); - DCHECK(klass != NULL); - VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor); - } + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Visit the header, static field references, and interface pointers of a class object. template - static void VisitClassReferences(const Class* klass, const Object* obj, + static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { - VisitInstanceFieldsReferences(klass, obj, visitor); - VisitStaticFieldsReferences(obj->AsClass(), visitor); - } + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); template - static void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { - DCHECK(klass != NULL); - VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor); - } + static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); template - static void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static, - const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { - if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) { - // Found a reference offset bitmap. Mark the specified offsets. - while (ref_offsets != 0) { - size_t right_shift = CLZ(ref_offsets); - MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift); - const Object* ref = obj->GetFieldObject(field_offset, false); - visitor(obj, ref, field_offset, is_static); - ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift); - } - } else { - // There is no reference offset bitmap. In the non-static case, - // walk up the class inheritance hierarchy and find reference - // offsets the hard way. In the static case, just consider this - // class. - for (const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); - klass != NULL; - klass = is_static ? NULL : klass->GetSuperClass()) { - size_t num_reference_fields = (is_static - ? klass->NumReferenceStaticFields() - : klass->NumReferenceInstanceFields()); - for (size_t i = 0; i < num_reference_fields; ++i) { - Field* field = (is_static - ? klass->GetStaticField(i) - : klass->GetInstanceField(i)); - MemberOffset field_offset = field->GetOffset(); - const Object* ref = obj->GetFieldObject(field_offset, false); - visitor(obj, ref, field_offset, is_static); - } - } - } - } + static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, + const Visitor& visitor) + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Visit all of the references in an object array. template - static void VisitObjectArrayReferences(const ObjectArray* array, + static void VisitObjectArrayReferences(const mirror::ObjectArray* array, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { - const int32_t length = array->GetLength(); - for (int32_t i = 0; i < length; ++i) { - const Object* element = array->GetWithoutChecks(i); - const size_t width = sizeof(Object*); - MemberOffset offset = MemberOffset(i * width + Array::DataOffset(width).Int32Value()); - visitor(array, element, offset, false); - } - } + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Visits the header and field references of a data object. template - static void VisitOtherReferences(const Class* klass, const Object* obj, + static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, const Visitor& visitor) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { return VisitInstanceFieldsReferences(klass, obj, visitor); @@ -421,7 +336,7 @@ class MarkSweep : public GarbageCollector { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Schedules an unmarked object for reference processing. - void DelayReferenceReferent(Object* reference) + void DelayReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Recursively blackens objects on the mark stack. @@ -433,25 +348,25 @@ class MarkSweep : public GarbageCollector { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnqueueFinalizerReferences(Object** ref) + void EnqueueFinalizerReferences(mirror::Object** ref) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PreserveSomeSoftReferences(Object** ref) + void PreserveSomeSoftReferences(mirror::Object** ref) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ClearWhiteReferences(Object** list) + void ClearWhiteReferences(mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - void ProcessReferences(Object** soft_references, bool clear_soft_references, - Object** weak_references, - Object** finalizer_references, - Object** phantom_references) + void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, + mirror::Object** weak_references, + mirror::Object** finalizer_references, + mirror::Object** phantom_references) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) + void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Whether or not we count how many of each type of object were scanned. @@ -461,21 +376,21 @@ class MarkSweep : public GarbageCollector { SpaceBitmap* current_mark_bitmap_; // Cache java.lang.Class for optimization. - Class* java_lang_Class_; + mirror::Class* java_lang_Class_; ObjectStack* mark_stack_; - Object* finger_; + mirror::Object* finger_; // Immune range, every object inside the immune range is assumed to be marked. - Object* immune_begin_; - Object* immune_end_; + mirror::Object* immune_begin_; + mirror::Object* immune_end_; - Object* soft_reference_list_; - Object* weak_reference_list_; - Object* finalizer_reference_list_; - Object* phantom_reference_list_; - Object* cleared_reference_list_; + mirror::Object* soft_reference_list_; + mirror::Object* weak_reference_list_; + mirror::Object* finalizer_reference_list_; + mirror::Object* phantom_reference_list_; + mirror::Object* cleared_reference_list_; AtomicInteger freed_bytes_; AtomicInteger freed_objects_; @@ -529,4 +444,4 @@ class MarkSweep : public GarbageCollector { } // namespace art -#endif // ART_SRC_MARK_SWEEP_H_ +#endif // ART_SRC_GC_MARK_SWEEP_H_ diff --git a/src/gc/mod_union_table-inl.h b/src/gc/mod_union_table-inl.h new file mode 100644 index 0000000000..c1c69fb379 --- /dev/null +++ b/src/gc/mod_union_table-inl.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_ +#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_ + +#include "mod_union_table.h" + +namespace art { + +template +class ModUnionTableToZygoteAllocspace : public Implementation { +public: + ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) { + } + + bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) { + const Spaces& spaces = Implementation::GetHeap()->GetSpaces(); + for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) { + if ((*it)->Contains(ref)) { + return (*it)->IsAllocSpace(); + } + } + // Assume it points to a large object. + // TODO: Check. + return true; + } +}; + +template +class ModUnionTableToAllocspace : public Implementation { +public: + ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) { + } + + bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) { + const Spaces& spaces = Implementation::GetHeap()->GetSpaces(); + for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) { + if ((*it)->Contains(ref)) { + return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect; + } + } + // Assume it points to a large object. + // TODO: Check. + return true; + } +}; + +} // namespace art + +#endif // ART_SRC_GC_MOD_UNION_TABLE_INL_H_ diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc index 8953c5ac61..da950bb9b8 100644 --- a/src/gc/mod_union_table.cc +++ b/src/gc/mod_union_table.cc @@ -17,12 +17,22 @@ #include "mod_union_table.h" #include "base/stl_util.h" +#include "card_table-inl.h" #include "heap.h" #include "heap_bitmap.h" #include "mark_sweep.h" +#include "mark_sweep-inl.h" +#include "mirror/object-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object_array-inl.h" #include "space.h" +#include "space_bitmap-inl.h" +#include "thread.h" #include "UniquePtr.h" +using namespace art::mirror; + namespace art { class MarkIfReachesAllocspaceVisitor { @@ -260,7 +270,7 @@ class CheckReferenceVisitor { // TODO: Fixme when anotatalysis works with visitors. void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { Heap* heap = mod_union_table_->GetHeap(); if (ref != NULL && mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) { diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h index f3da41c841..23c0a516e3 100644 --- a/src/gc/mod_union_table.h +++ b/src/gc/mod_union_table.h @@ -14,23 +14,30 @@ * limitations under the License. */ -#ifndef ART_SRC_MOD_UNION_TABLE_H_ -#define ART_SRC_MOD_UNION_TABLE_H_ +#ifndef ART_SRC_GC_MOD_UNION_TABLE_H_ +#define ART_SRC_GC_MOD_UNION_TABLE_H_ -#include "heap.h" +#include "globals.h" #include "safe_map.h" -#include "space.h" -namespace art { +#include +#include +namespace art { +namespace mirror { +class Object; +} +class ContinuousSpace; class Heap; class HeapBitmap; +class MarkSweep; class Space; +class SpaceBitmap; // Base class class ModUnionTable { public: - typedef std::vector ReferenceArray; + typedef std::vector ReferenceArray; typedef std::set ClearedCards; ModUnionTable(Heap* heap) : heap_(heap) { @@ -118,7 +125,7 @@ class ModUnionTableReferenceCache : public ModUnionTable { void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Function that tells whether or not to add a reference to the table. - virtual bool AddReference(const Object* obj, const Object* ref) = 0; + virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0; protected: // Cleared card array, used to update the mod-union table. @@ -155,44 +162,6 @@ class ModUnionTableCardCache : public ModUnionTable { ClearedCards cleared_cards_; }; -template -class ModUnionTableToZygoteAllocspace : public Implementation { -public: - ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) { - } - - bool AddReference(const Object* /* obj */, const Object* ref) { - const Spaces& spaces = Implementation::GetHeap()->GetSpaces(); - for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) { - if ((*it)->Contains(ref)) { - return (*it)->IsAllocSpace(); - } - } - // Assume it points to a large object. - // TODO: Check. - return true; - } -}; - -template -class ModUnionTableToAllocspace : public Implementation { -public: - ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) { - } - - bool AddReference(const Object* /* obj */, const Object* ref) { - const Spaces& spaces = Implementation::GetHeap()->GetSpaces(); - for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) { - if ((*it)->Contains(ref)) { - return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect; - } - } - // Assume it points to a large object. - // TODO: Check. - return true; - } -}; - } // namespace art -#endif // ART_SRC_MOD_UNION_TABLE_H_ +#endif // ART_SRC_GC_MOD_UNION_TABLE_H_ diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/partial_mark_sweep.cc index 64f09ff91e..f9c1787045 100644 --- a/src/gc/partial_mark_sweep.cc +++ b/src/gc/partial_mark_sweep.cc @@ -14,32 +14,38 @@ * limitations under the License. */ +#include "partial_mark_sweep.h" + +#include "heap.h" #include "large_object_space.h" #include "partial_mark_sweep.h" #include "space.h" +#include "thread.h" namespace art { - PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent) - : MarkSweep(heap, is_concurrent) { - cumulative_timings_.SetName(GetName()); - } - PartialMarkSweep::~PartialMarkSweep() { +PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent) + : MarkSweep(heap, is_concurrent) { + cumulative_timings_.SetName(GetName()); +} - } +PartialMarkSweep::~PartialMarkSweep() { - void PartialMarkSweep::BindBitmaps() { - MarkSweep::BindBitmaps(); - - Spaces& spaces = GetHeap()->GetSpaces(); - WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); - // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the - // zygote space are viewed as marked. - for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) { - ContinuousSpace* space = *it; - if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) { - ImmuneSpace(space); - } +} + +void PartialMarkSweep::BindBitmaps() { + MarkSweep::BindBitmaps(); + + Spaces& spaces = GetHeap()->GetSpaces(); + WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the + // zygote space are viewed as marked. + for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) { + ContinuousSpace* space = *it; + if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) { + ImmuneSpace(space); } } +} + } // namespace art diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h index 80a15630ae..64c0bcd8f8 100644 --- a/src/gc/partial_mark_sweep.h +++ b/src/gc/partial_mark_sweep.h @@ -14,27 +14,14 @@ * limitations under the License. */ -#ifndef ART_SRC_PARTIAL_MARK_SWEEP_H_ -#define ART_SRC_PARTIAL_MARK_SWEEP_H_ +#ifndef ART_SRC_GC_PARTIAL_MARK_SWEEP_H_ +#define ART_SRC_GC_PARTIAL_MARK_SWEEP_H_ #include "locks.h" #include "mark_sweep.h" -#include "utils.h" namespace art { -class Barrier; -class CheckObjectVisitor; -class Class; -class Heap; -class MarkIfReachesAllocspaceVisitor; -class ModUnionClearCardVisitor; -class ModUnionVisitor; -class ModUnionTableBitmap; -class Object; -class TimingLogger; -class MarkStackChunk; - class PartialMarkSweep : public MarkSweep { public: virtual GcType GetGcType() const { @@ -53,4 +40,4 @@ protected: } // namespace art -#endif // ART_SRC_PARTIAL_MARK_SWEEP_H_ +#endif // ART_SRC_GC_PARTIAL_MARK_SWEEP_H_ diff --git a/src/gc/space.cc b/src/gc/space.cc index 04f932dc9b..9db84f24ba 100644 --- a/src/gc/space.cc +++ b/src/gc/space.cc @@ -19,10 +19,16 @@ #include "base/logging.h" #include "base/stl_util.h" #include "base/unix_file/fd_file.h" +#include "card_table.h" #include "dlmalloc.h" #include "image.h" +#include "mirror/array.h" +#include "mirror/abstract_method.h" #include "os.h" +#include "runtime.h" #include "space_bitmap.h" +#include "space_bitmap-inl.h" +#include "thread.h" #include "UniquePtr.h" #include "utils.h" @@ -204,12 +210,12 @@ void DlMallocSpace::SwapBitmaps() { mark_bitmap_->SetName(temp_name); } -Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) { +mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) { if (kDebugSpaces) { num_bytes += sizeof(word); } - Object* result = reinterpret_cast(mspace_calloc(mspace_, 1, num_bytes)); + mirror::Object* result = reinterpret_cast(mspace_calloc(mspace_, 1, num_bytes)); if (kDebugSpaces && result != NULL) { CHECK(Contains(result)) << "Allocation (" << reinterpret_cast(result) << ") not in bounds of allocation space " << *this; @@ -225,18 +231,18 @@ Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) { return result; } -Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) { +mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) { MutexLock mu(self, lock_); return AllocWithoutGrowthLocked(num_bytes); } -Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) { +mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) { MutexLock mu(self, lock_); // Grow as much as possible within the mspace. size_t max_allowed = Capacity(); mspace_set_footprint_limit(mspace_, max_allowed); // Try the allocation. - Object* result = AllocWithoutGrowthLocked(num_bytes); + mirror::Object* result = AllocWithoutGrowthLocked(num_bytes); // Shrink back down as small as possible. size_t footprint = mspace_footprint(mspace_); mspace_set_footprint_limit(mspace_, footprint); @@ -301,7 +307,7 @@ DlMallocSpace* DlMallocSpace::CreateZygoteSpace() { return alloc_space; } -size_t DlMallocSpace::Free(Thread* self, Object* ptr) { +size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) { MutexLock mu(self, lock_); if (kDebugSpaces) { CHECK(ptr != NULL); @@ -317,13 +323,13 @@ size_t DlMallocSpace::Free(Thread* self, Object* ptr) { return bytes_freed; } -size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) { +size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { DCHECK(ptrs != NULL); // Don't need the lock to calculate the size of the freed pointers. size_t bytes_freed = 0; for (size_t i = 0; i < num_ptrs; i++) { - Object* ptr = ptrs[i]; + mirror::Object* ptr = ptrs[i]; const size_t look_ahead = 8; if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) { // The head of chunk for the allocation is sizeof(size_t) behind the allocation. @@ -397,12 +403,12 @@ void* DlMallocSpace::MoreCore(intptr_t increment) { } // Virtual functions can't get inlined. -inline size_t DlMallocSpace::InternalAllocationSize(const Object* obj) { +inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) { return mspace_usable_size(const_cast(reinterpret_cast(obj))) + kChunkOverhead; } -size_t DlMallocSpace::AllocationSize(const Object* obj) { +size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) { return InternalAllocationSize(obj); } @@ -504,29 +510,29 @@ ImageSpace* ImageSpace::Create(const std::string& image_file_name) { DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader))); Runtime* runtime = Runtime::Current(); - Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray); - runtime->SetJniDlsymLookupStub(down_cast(jni_stub_array)); + mirror::Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray); + runtime->SetJniDlsymLookupStub(down_cast(jni_stub_array)); - Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray); - runtime->SetAbstractMethodErrorStubArray(down_cast(ame_stub_array)); + mirror::Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray); + runtime->SetAbstractMethodErrorStubArray(down_cast(ame_stub_array)); - Object* resolution_stub_array = + mirror::Object* resolution_stub_array = image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray); runtime->SetResolutionStubArray( - down_cast(resolution_stub_array), Runtime::kStaticMethod); + down_cast(resolution_stub_array), Runtime::kStaticMethod); resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray); runtime->SetResolutionStubArray( - down_cast(resolution_stub_array), Runtime::kUnknownMethod); + down_cast(resolution_stub_array), Runtime::kUnknownMethod); - Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod); - runtime->SetResolutionMethod(down_cast(resolution_method)); + mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod); + runtime->SetResolutionMethod(down_cast(resolution_method)); - Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod); - runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kSaveAll); + mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod); + runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kSaveAll); callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod); - runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kRefsOnly); + runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kRefsOnly); callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod); - runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kRefsAndArgs); + runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kRefsAndArgs); ImageSpace* space = new ImageSpace(image_file_name, map.release()); if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { @@ -548,7 +554,7 @@ void ImageSpace::RecordImageAllocations(SpaceBitmap* live_bitmap) const { byte* end = End(); while (current < end) { DCHECK_ALIGNED(current, kObjectAlignment); - const Object* obj = reinterpret_cast(current); + const mirror::Object* obj = reinterpret_cast(current); live_bitmap->Set(obj); current += RoundUp(obj->SizeOf(), kObjectAlignment); } diff --git a/src/gc/space.h b/src/gc/space.h index 2ed4988f75..d2bcd53725 100644 --- a/src/gc/space.h +++ b/src/gc/space.h @@ -31,10 +31,12 @@ namespace art { static const bool kDebugSpaces = kIsDebugBuild; +namespace mirror { +class Object; +} // namespace mirror class DlMallocSpace; class ImageSpace; class LargeObjectSpace; -class Object; class SpaceBitmap; enum GcRetentionPolicy { @@ -57,7 +59,7 @@ class Space { public: virtual bool CanAllocateInto() const = 0; virtual bool IsCompactible() const = 0; - virtual bool Contains(const Object* obj) const = 0; + virtual bool Contains(const mirror::Object* obj) const = 0; virtual SpaceType GetType() const = 0; virtual GcRetentionPolicy GetGcRetentionPolicy() const = 0; virtual std::string GetName() const = 0; @@ -108,16 +110,16 @@ class AllocSpace { virtual uint64_t GetTotalObjectsAllocated() const = 0; // Allocate num_bytes without allowing growth. - virtual Object* Alloc(Thread* self, size_t num_bytes) = 0; + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0; // Return the storage space required by obj. - virtual size_t AllocationSize(const Object* obj) = 0; + virtual size_t AllocationSize(const mirror::Object* obj) = 0; // Returns how many bytes were freed. - virtual size_t Free(Thread* self, Object* ptr) = 0; + virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; // Returns how many bytes were freed. - virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs) = 0; + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; protected: AllocSpace() {} @@ -149,12 +151,12 @@ class ContinuousSpace : public Space { virtual SpaceBitmap* GetMarkBitmap() const = 0; // Is object within this space? - bool HasAddress(const Object* obj) const { + bool HasAddress(const mirror::Object* obj) const { const byte* byte_ptr = reinterpret_cast(obj); return Begin() <= byte_ptr && byte_ptr < End(); } - virtual bool Contains(const Object* obj) const { + virtual bool Contains(const mirror::Object* obj) const { return HasAddress(obj); } @@ -188,7 +190,7 @@ class ContinuousSpace : public Space { class DiscontinuousSpace : public virtual Space { public: // Is object within this space? - virtual bool Contains(const Object* obj) const = 0; + virtual bool Contains(const mirror::Object* obj) const = 0; virtual std::string GetName() const { return name_; @@ -267,15 +269,15 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace { size_t capacity, byte* requested_begin); // Allocate num_bytes without allowing the underlying mspace to grow. - virtual Object* AllocWithGrowth(Thread* self, size_t num_bytes); + virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes); // Allocate num_bytes allowing the underlying mspace to grow. - virtual Object* Alloc(Thread* self, size_t num_bytes); + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes); // Return the storage space required by obj. - virtual size_t AllocationSize(const Object* obj); - virtual size_t Free(Thread* self, Object* ptr); - virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs); + virtual size_t AllocationSize(const mirror::Object* obj); + virtual size_t Free(Thread* self, mirror::Object* ptr); + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); void* MoreCore(intptr_t increment); @@ -353,8 +355,8 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace { } private: - size_t InternalAllocationSize(const Object* obj); - Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_); + size_t InternalAllocationSize(const mirror::Object* obj); + mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_); UniquePtr live_bitmap_; UniquePtr mark_bitmap_; diff --git a/src/gc/space_bitmap-inl.h b/src/gc/space_bitmap-inl.h new file mode 100644 index 0000000000..e1fdd29d8d --- /dev/null +++ b/src/gc/space_bitmap-inl.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_GC_SPACE_BITMAP_INL_H_ +#define ART_SRC_GC_SPACE_BITMAP_INL_H_ + +#include "base/logging.h" +#include "cutils/atomic.h" + +namespace art { + +inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) { + uintptr_t addr = reinterpret_cast(obj); + DCHECK_GE(addr, heap_begin_); + const uintptr_t offset = addr - heap_begin_; + const size_t index = OffsetToIndex(offset); + const word mask = OffsetToMask(offset); + word* const address = &bitmap_begin_[index]; + DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_; + word old_word; + do { + old_word = *address; + // Fast path: The bit is already set. + if ((old_word & mask) != 0) { + return true; + } + } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0)); + return false; +} + +inline bool SpaceBitmap::Test(const mirror::Object* obj) const { + uintptr_t addr = reinterpret_cast(obj); + DCHECK(HasAddress(obj)) << obj; + DCHECK(bitmap_begin_ != NULL); + DCHECK_GE(addr, heap_begin_); + const uintptr_t offset = addr - heap_begin_; + return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0; +} + +template +void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, + const Visitor& visitor, + const FingerVisitor& finger_visitor) const { + DCHECK_LT(visit_begin, visit_end); + + const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1). + const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment; + const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment; + + size_t word_start = bit_index_start / kBitsPerWord; + size_t word_end = bit_index_end / kBitsPerWord; + DCHECK_LT(word_end * kWordSize, Size()); + + // Trim off left_bits of left bits. + size_t edge_word = bitmap_begin_[word_start]; + + // Handle bits on the left first as a special case + size_t left_bits = bit_index_start & (kBitsPerWord - 1); + if (left_bits != 0) { + edge_word &= (1 << (kBitsPerWord - left_bits)) - 1; + } + + // If word_start == word_end then handle this case at the same place we handle the right edge. + if (edge_word != 0 && word_start < word_end) { + uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_; + finger_visitor(reinterpret_cast(ptr_base + word_span)); + do { + const size_t shift = CLZ(edge_word); + mirror::Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); + visitor(obj); + edge_word ^= static_cast(kWordHighBitMask) >> shift; + } while (edge_word != 0); + } + word_start++; + + for (size_t i = word_start; i < word_end; i++) { + size_t w = bitmap_begin_[i]; + if (w != 0) { + uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; + finger_visitor(reinterpret_cast(ptr_base + word_span)); + do { + const size_t shift = CLZ(w); + mirror::Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); + visitor(obj); + w ^= static_cast(kWordHighBitMask) >> shift; + } while (w != 0); + } + } + + // Handle the right edge, and also the left edge if both edges are on the same word. + size_t right_bits = bit_index_end & (kBitsPerWord - 1); + + // If word_start == word_end then we need to use the word which we removed the left bits. + if (word_start <= word_end) { + edge_word = bitmap_begin_[word_end]; + } + + // Bits that we trim off the right. + edge_word &= ~((static_cast(kWordHighBitMask) >> right_bits) - 1); + uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_; + finger_visitor(reinterpret_cast(ptr_base + word_span)); + while (edge_word != 0) { + const size_t shift = CLZ(edge_word); + mirror::Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); + visitor(obj); + edge_word ^= static_cast(kWordHighBitMask) >> shift; + } +} + +inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) { + uintptr_t addr = reinterpret_cast(obj); + DCHECK_GE(addr, heap_begin_); + const uintptr_t offset = addr - heap_begin_; + const size_t index = OffsetToIndex(offset); + const word mask = OffsetToMask(offset); + DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_; + word* address = &bitmap_begin_[index]; + word old_word = *address; + if (do_set) { + *address = old_word | mask; + } else { + *address = old_word & ~mask; + } + return (old_word & mask) != 0; +} +} // namespace art + +#endif // ART_SRC_GC_SPACE_BITMAP_INL_H_ diff --git a/src/gc/space_bitmap.cc b/src/gc/space_bitmap.cc index 25fa672ee1..d90c090187 100644 --- a/src/gc/space_bitmap.cc +++ b/src/gc/space_bitmap.cc @@ -17,6 +17,11 @@ #include "heap_bitmap.h" #include "base/logging.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "space_bitmap-inl.h" #include "UniquePtr.h" #include "utils.h" @@ -32,7 +37,7 @@ void SpaceBitmap::SetName(const std::string& name) { void SpaceSetMap::Walk(SpaceBitmap::Callback* callback, void* arg) { for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) { - callback(const_cast(*it), arg); + callback(const_cast(*it), arg); } } @@ -98,7 +103,7 @@ void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) { uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; do { const size_t shift = CLZ(w); - Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); + mirror::Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); (*callback)(obj, arg); w ^= static_cast(kWordHighBitMask) >> shift; } while (w != 0); @@ -127,10 +132,10 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap, return; } - // TODO: rewrite the callbacks to accept a std::vector rather than a Object**? + // TODO: rewrite the callbacks to accept a std::vector rather than a mirror::Object**? const size_t buffer_size = kWordSize * kBitsPerWord; - Object* pointer_buf[buffer_size]; - Object** pb = &pointer_buf[0]; + mirror::Object* pointer_buf[buffer_size]; + mirror::Object** pb = &pointer_buf[0]; size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_); size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1); CHECK_LT(end, live_bitmap.Size() / kWordSize); @@ -143,7 +148,7 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap, do { const size_t shift = CLZ(garbage); garbage ^= static_cast(kWordHighBitMask) >> shift; - *pb++ = reinterpret_cast(ptr_base + shift * kAlignment); + *pb++ = reinterpret_cast(ptr_base + shift * kAlignment); } while (garbage != 0); // Make sure that there are always enough slots available for an // entire word of one bits. @@ -161,32 +166,32 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap, } // namespace art // Support needed for in order traversal -#include "object.h" +#include "mirror/object.h" #include "object_utils.h" namespace art { -static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, +static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj, void* arg); // Walk instance fields of the given Class. Separate function to allow recursion on the super // class. -static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, - Class* klass, void* arg) +static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj, + mirror::Class* klass, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Visit fields of parent classes first. - Class* super = klass->GetSuperClass(); + mirror::Class* super = klass->GetSuperClass(); if (super != NULL) { WalkInstanceFields(visited, callback, obj, super, arg); } // Walk instance fields - ObjectArray* fields = klass->GetIFields(); + mirror::ObjectArray* fields = klass->GetIFields(); if (fields != NULL) { for (int32_t i = 0; i < fields->GetLength(); i++) { - Field* field = fields->Get(i); + mirror::Field* field = fields->Get(i); FieldHelper fh(field); if (!fh.IsPrimitiveType()) { - Object* value = field->GetObj(obj); + mirror::Object* value = field->GetObj(obj); if (value != NULL) { WalkFieldsInOrder(visited, callback, value, arg); } @@ -196,7 +201,7 @@ static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* call } // For an unvisited object, visit it then all its children found via fields. -static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, +static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (visited->Test(obj)) { @@ -206,17 +211,17 @@ static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callb (*callback)(obj, arg); visited->Set(obj); // Walk instance fields of all objects - Class* klass = obj->GetClass(); + mirror::Class* klass = obj->GetClass(); WalkInstanceFields(visited, callback, obj, klass, arg); // Walk static fields of a Class if (obj->IsClass()) { - ObjectArray* fields = klass->GetSFields(); + mirror::ObjectArray* fields = klass->GetSFields(); if (fields != NULL) { for (int32_t i = 0; i < fields->GetLength(); i++) { - Field* field = fields->Get(i); + mirror::Field* field = fields->Get(i); FieldHelper fh(field); if (!fh.IsPrimitiveType()) { - Object* value = field->GetObj(NULL); + mirror::Object* value = field->GetObj(NULL); if (value != NULL) { WalkFieldsInOrder(visited, callback, value, arg); } @@ -225,10 +230,10 @@ static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callb } } else if (obj->IsObjectArray()) { // Walk elements of an object array - ObjectArray* obj_array = obj->AsObjectArray(); + mirror::ObjectArray* obj_array = obj->AsObjectArray(); int32_t length = obj_array->GetLength(); for (int32_t i = 0; i < length; i++) { - Object* value = obj_array->Get(i); + mirror::Object* value = obj_array->Get(i); if (value != NULL) { WalkFieldsInOrder(visited, callback, value, arg); } @@ -251,7 +256,7 @@ void SpaceBitmap::InOrderWalk(SpaceBitmap::Callback* callback, void* arg) { uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; while (w != 0) { const size_t shift = CLZ(w); - Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); + mirror::Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); WalkFieldsInOrder(visited.get(), callback, obj, arg); w ^= static_cast(kWordHighBitMask) >> shift; } diff --git a/src/gc/space_bitmap.h b/src/gc/space_bitmap.h index dd2f47d8f8..6bc06d600d 100644 --- a/src/gc/space_bitmap.h +++ b/src/gc/space_bitmap.h @@ -14,35 +14,33 @@ * limitations under the License. */ -#ifndef ART_SRC_SPACE_BITMAP_H_ -#define ART_SRC_SPACE_BITMAP_H_ +#ifndef ART_SRC_GC_SPACE_BITMAP_H_ +#define ART_SRC_GC_SPACE_BITMAP_H_ + +#include "locks.h" +#include "globals.h" +#include "mem_map.h" +#include "UniquePtr.h" #include #include #include #include -#include "base/logging.h" -#include "cutils/atomic.h" -#include "cutils/atomic-inline.h" -#include "UniquePtr.h" -#include "globals.h" -#include "mem_map.h" -#include "utils.h" - namespace art { - +namespace mirror { class Object; +} // namespace mirror class SpaceBitmap { public: static const size_t kAlignment = 8; - typedef void Callback(Object* obj, void* arg); + typedef void Callback(mirror::Object* obj, void* arg); - typedef void ScanCallback(Object* obj, void* finger, void* arg); + typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg); - typedef void SweepCallback(size_t ptr_count, Object** ptrs, void* arg); + typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg); // Initialize a HeapBitmap so that it points to a bitmap large enough to cover a heap at // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned. @@ -66,44 +64,20 @@ class SpaceBitmap { return static_cast(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord); } - inline bool Set(const Object* obj) { + inline bool Set(const mirror::Object* obj) { return Modify(obj, true); } - inline bool Clear(const Object* obj) { + inline bool Clear(const mirror::Object* obj) { return Modify(obj, false); } // Returns true if the object was previously marked. - inline bool AtomicTestAndSet(const Object* obj) { - uintptr_t addr = reinterpret_cast(obj); - DCHECK_GE(addr, heap_begin_); - const uintptr_t offset = addr - heap_begin_; - const size_t index = OffsetToIndex(offset); - const word mask = OffsetToMask(offset); - word* const address = &bitmap_begin_[index]; - DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_; - word old_word; - do { - old_word = *address; - // Fast path: The bit is already set. - if ((old_word & mask) != 0) { - return true; - } - } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0)); - return false; - } + bool AtomicTestAndSet(const mirror::Object* obj); void Clear(); - inline bool Test(const Object* obj) const { - uintptr_t addr = reinterpret_cast(obj); - DCHECK(HasAddress(obj)) << obj; - DCHECK(bitmap_begin_ != NULL); - DCHECK_GE(addr, heap_begin_); - const uintptr_t offset = addr - heap_begin_; - return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0; - } + bool Test(const mirror::Object* obj) const; // Return true iff is within the range of pointers that this bitmap could potentially cover, // even if a bit has not been set for it. @@ -123,7 +97,7 @@ class SpaceBitmap { : bitmap_(bitmap) { } - void operator ()(Object* obj) const { + void operator ()(mirror::Object* obj) const { bitmap_->Clear(obj); } private: @@ -133,86 +107,21 @@ class SpaceBitmap { template void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const { for (; visit_begin < visit_end; visit_begin += kAlignment ) { - visitor(reinterpret_cast(visit_begin)); + visitor(reinterpret_cast(visit_begin)); } } template void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor, const FingerVisitor& finger_visitor) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { - DCHECK_LT(visit_begin, visit_end); - - const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1). - const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment; - const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment; - - size_t word_start = bit_index_start / kBitsPerWord; - size_t word_end = bit_index_end / kBitsPerWord; - DCHECK_LT(word_end * kWordSize, Size()); - - // Trim off left_bits of left bits. - size_t edge_word = bitmap_begin_[word_start]; - - // Handle bits on the left first as a special case - size_t left_bits = bit_index_start & (kBitsPerWord - 1); - if (left_bits != 0) { - edge_word &= (1 << (kBitsPerWord - left_bits)) - 1; - } - - // If word_start == word_end then handle this case at the same place we handle the right edge. - if (edge_word != 0 && word_start < word_end) { - uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_; - finger_visitor(reinterpret_cast(ptr_base + word_span)); - do { - const size_t shift = CLZ(edge_word); - Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); - visitor(obj); - edge_word ^= static_cast(kWordHighBitMask) >> shift; - } while (edge_word != 0); - } - word_start++; - - for (size_t i = word_start; i < word_end; i++) { - size_t w = bitmap_begin_[i]; - if (w != 0) { - uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; - finger_visitor(reinterpret_cast(ptr_base + word_span)); - do { - const size_t shift = CLZ(w); - Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); - visitor(obj); - w ^= static_cast(kWordHighBitMask) >> shift; - } while (w != 0); - } - } - - // Handle the right edge, and also the left edge if both edges are on the same word. - size_t right_bits = bit_index_end & (kBitsPerWord - 1); - - // If word_start == word_end then we need to use the word which we removed the left bits. - if (word_start <= word_end) { - edge_word = bitmap_begin_[word_end]; - } - - // Bits that we trim off the right. - edge_word &= ~((static_cast(kWordHighBitMask) >> right_bits) - 1); - uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_; - finger_visitor(reinterpret_cast(ptr_base + word_span)); - while (edge_word != 0) { - const size_t shift = CLZ(edge_word); - Object* obj = reinterpret_cast(ptr_base + shift * kAlignment); - visitor(obj); - edge_word ^= static_cast(kWordHighBitMask) >> shift; - } - } + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Walk(Callback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void InOrderWalk(Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); static void SweepWalk(const SpaceBitmap& live, const SpaceBitmap& mark, @@ -251,7 +160,7 @@ class SpaceBitmap { std::string GetName() const; void SetName(const std::string& name); - const void* GetObjectWordAddress(const Object* obj) const { + const void* GetObjectWordAddress(const mirror::Object* obj) const { uintptr_t addr = reinterpret_cast(obj); const uintptr_t offset = addr - heap_begin_; const size_t index = OffsetToIndex(offset); @@ -265,22 +174,7 @@ class SpaceBitmap { heap_begin_(reinterpret_cast(heap_begin)), name_(name) {} - inline bool Modify(const Object* obj, bool do_set) { - uintptr_t addr = reinterpret_cast(obj); - DCHECK_GE(addr, heap_begin_); - const uintptr_t offset = addr - heap_begin_; - const size_t index = OffsetToIndex(offset); - const word mask = OffsetToMask(offset); - DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_; - word* address = &bitmap_begin_[index]; - word old_word = *address; - if (do_set) { - *address = old_word | mask; - } else { - *address = old_word & ~mask; - } - return (old_word & mask) != 0; - } + bool Modify(const mirror::Object* obj, bool do_set); // Backing storage for bitmap. UniquePtr mem_map_; @@ -302,17 +196,17 @@ class SpaceBitmap { // Like a bitmap except it keeps track of objects using sets. class SpaceSetMap { public: - typedef std::set Objects; + typedef std::set Objects; bool IsEmpty() const { return contained_.empty(); } - inline void Set(const Object* obj) { + inline void Set(const mirror::Object* obj) { contained_.insert(obj); } - inline void Clear(const Object* obj) { + inline void Clear(const mirror::Object* obj) { Objects::iterator found = contained_.find(obj); if (found != contained_.end()) { contained_.erase(found); @@ -323,7 +217,7 @@ class SpaceSetMap { contained_.clear(); } - inline bool Test(const Object* obj) const { + inline bool Test(const mirror::Object* obj) const { return contained_.find(obj) != contained_.end(); } @@ -357,4 +251,4 @@ std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap); } // namespace art -#endif // ART_SRC_SPACE_BITMAP_H_ +#endif // ART_SRC_GC_SPACE_BITMAP_H_ diff --git a/src/gc/space_bitmap_test.cc b/src/gc/space_bitmap_test.cc index a2f1afc304..5a829e4f66 100644 --- a/src/gc/space_bitmap_test.cc +++ b/src/gc/space_bitmap_test.cc @@ -19,6 +19,7 @@ #include "common_test.h" #include "dlmalloc.h" #include "globals.h" +#include "space_bitmap-inl.h" #include "UniquePtr.h" #include @@ -39,20 +40,20 @@ TEST_F(SpaceBitmapTest, Init) { class BitmapVerify { public: - BitmapVerify(SpaceBitmap* bitmap, const Object* begin, const Object* end) + BitmapVerify(SpaceBitmap* bitmap, const mirror::Object* begin, const mirror::Object* end) : bitmap_(bitmap), begin_(begin), end_(end) {} - void operator ()(const Object* obj) { + void operator ()(const mirror::Object* obj) { EXPECT_TRUE(obj >= begin_); EXPECT_TRUE(obj <= end_); EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast(obj) & 0xF) != 0)); } SpaceBitmap* bitmap_; - const Object* begin_; - const Object* end_; + const mirror::Object* begin_; + const mirror::Object* end_; }; TEST_F(SpaceBitmapTest, ScanRange) { @@ -65,7 +66,8 @@ TEST_F(SpaceBitmapTest, ScanRange) { // Set all the odd bits in the first BitsPerWord * 3 to one. for (size_t j = 0;j < kBitsPerWord * 3; ++j) { - const Object* obj = reinterpret_cast(heap_begin + j * SpaceBitmap::kAlignment); + const mirror::Object* obj = + reinterpret_cast(heap_begin + j * SpaceBitmap::kAlignment); if (reinterpret_cast(obj) & 0xF) { space_bitmap->Set(obj); } @@ -75,9 +77,11 @@ TEST_F(SpaceBitmapTest, ScanRange) { // This handles all the cases, having runs which start and end on the same word, and different // words. for (size_t i = 0; i < static_cast(kBitsPerWord); ++i) { - Object* start = reinterpret_cast(heap_begin + i * SpaceBitmap::kAlignment); + mirror::Object* start = + reinterpret_cast(heap_begin + i * SpaceBitmap::kAlignment); for (size_t j = 0; j < static_cast(kBitsPerWord * 2); ++j) { - Object* end = reinterpret_cast(heap_begin + (i + j) * SpaceBitmap::kAlignment); + mirror::Object* end = + reinterpret_cast(heap_begin + (i + j) * SpaceBitmap::kAlignment); BitmapVerify(space_bitmap.get(), start, end); } } diff --git a/src/gc/space_test.cc b/src/gc/space_test.cc index 2e03eae0cc..372ec7710c 100644 --- a/src/gc/space_test.cc +++ b/src/gc/space_test.cc @@ -83,23 +83,23 @@ TEST_F(SpaceTest, ZygoteSpace) { Thread* self = Thread::Current(); // Succeeds, fits without adjusting the footprint limit. - Object* ptr1 = space->Alloc(self, 1 * MB); + mirror::Object* ptr1 = space->Alloc(self, 1 * MB); EXPECT_TRUE(ptr1 != NULL); // Fails, requires a higher footprint limit. - Object* ptr2 = space->Alloc(self, 8 * MB); + mirror::Object* ptr2 = space->Alloc(self, 8 * MB); EXPECT_TRUE(ptr2 == NULL); // Succeeds, adjusts the footprint. - Object* ptr3 = space->AllocWithGrowth(self, 8 * MB); + mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB); EXPECT_TRUE(ptr3 != NULL); // Fails, requires a higher footprint limit. - Object* ptr4 = space->Alloc(self, 8 * MB); + mirror::Object* ptr4 = space->Alloc(self, 8 * MB); EXPECT_TRUE(ptr4 == NULL); // Also fails, requires a higher allowed footprint. - Object* ptr5 = space->AllocWithGrowth(self, 8 * MB); + mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB); EXPECT_TRUE(ptr5 == NULL); // Release some memory. @@ -151,23 +151,23 @@ TEST_F(SpaceTest, AllocAndFree) { Runtime::Current()->GetHeap()->AddSpace(space); // Succeeds, fits without adjusting the footprint limit. - Object* ptr1 = space->Alloc(self, 1 * MB); + mirror::Object* ptr1 = space->Alloc(self, 1 * MB); EXPECT_TRUE(ptr1 != NULL); // Fails, requires a higher footprint limit. - Object* ptr2 = space->Alloc(self, 8 * MB); + mirror::Object* ptr2 = space->Alloc(self, 8 * MB); EXPECT_TRUE(ptr2 == NULL); // Succeeds, adjusts the footprint. - Object* ptr3 = space->AllocWithGrowth(self, 8 * MB); + mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB); EXPECT_TRUE(ptr3 != NULL); // Fails, requires a higher footprint limit. - Object* ptr4 = space->Alloc(self, 8 * MB); + mirror::Object* ptr4 = space->Alloc(self, 8 * MB); EXPECT_TRUE(ptr4 == NULL); // Also fails, requires a higher allowed footprint. - Object* ptr5 = space->AllocWithGrowth(self, 8 * MB); + mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB); EXPECT_TRUE(ptr5 == NULL); // Release some memory. @@ -194,7 +194,7 @@ TEST_F(SpaceTest, AllocAndFreeList) { Thread* self = Thread::Current(); // Succeeds, fits without adjusting the max allowed footprint. - Object* lots_of_objects[1024]; + mirror::Object* lots_of_objects[1024]; for (size_t i = 0; i < arraysize(lots_of_objects); i++) { lots_of_objects[i] = space->Alloc(self, 16); EXPECT_TRUE(lots_of_objects[i] != NULL); @@ -252,7 +252,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr // Fill the space with lots of small objects up to the growth limit size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1; - UniquePtr lots_of_objects(new Object*[max_objects]); + UniquePtr lots_of_objects(new mirror::Object*[max_objects]); size_t last_object = 0; // last object for which allocation succeeded size_t amount_allocated = 0; // amount of space allocated Thread* self = Thread::Current(); @@ -269,7 +269,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr alloc_size = 8; } } - Object* object; + mirror::Object* object; if (round <= 1) { object = space->Alloc(self, alloc_size); } else { @@ -326,7 +326,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr // Free some objects for (size_t i = 0; i < last_object; i += free_increment) { - Object* object = lots_of_objects.get()[i]; + mirror::Object* object = lots_of_objects.get()[i]; if (object == NULL) { continue; } @@ -347,7 +347,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr } // All memory was released, try a large allocation to check freed memory is being coalesced - Object* large_object; + mirror::Object* large_object; size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4); if (round <= 1) { large_object = space->Alloc(self, three_quarters_space); diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc index 23196fda25..988d4e79e7 100644 --- a/src/gc/sticky_mark_sweep.cc +++ b/src/gc/sticky_mark_sweep.cc @@ -14,47 +14,51 @@ * limitations under the License. */ +#include "heap.h" #include "large_object_space.h" #include "space.h" #include "sticky_mark_sweep.h" +#include "thread.h" namespace art { - StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent) - : PartialMarkSweep(heap, is_concurrent) { - cumulative_timings_.SetName(GetName()); - } - StickyMarkSweep::~StickyMarkSweep() { +StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent) + : PartialMarkSweep(heap, is_concurrent) { + cumulative_timings_.SetName(GetName()); +} - } +StickyMarkSweep::~StickyMarkSweep() { - void StickyMarkSweep::BindBitmaps() { - PartialMarkSweep::BindBitmaps(); - - Spaces& spaces = GetHeap()->GetSpaces(); - WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); - // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space. - // This lets us start with the mark bitmap of the previous garbage collection as the current - // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps, - // making it so that the live bitmap of the alloc space is contains the newly marked objects - // from the sticky GC. - for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) { - if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) { - BindLiveToMarkBitmap(*it); - } - } +} - GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); - } +void StickyMarkSweep::BindBitmaps() { + PartialMarkSweep::BindBitmaps(); - void StickyMarkSweep::MarkReachableObjects() { - DisableFinger(); - RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1); + Spaces& spaces = GetHeap()->GetSpaces(); + WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space. + // This lets us start with the mark bitmap of the previous garbage collection as the current + // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps, + // making it so that the live bitmap of the alloc space is contains the newly marked objects + // from the sticky GC. + for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) { + if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) { + BindLiveToMarkBitmap(*it); + } } - void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) { - ObjectStack* live_stack = GetHeap()->GetLiveStack(); - SweepArray(timings_, live_stack, false); - timings_.AddSplit("SweepArray"); - } + GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); +} + +void StickyMarkSweep::MarkReachableObjects() { + DisableFinger(); + RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1); +} + +void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) { + ObjectStack* live_stack = GetHeap()->GetLiveStack(); + SweepArray(timings_, live_stack, false); + timings_.AddSplit("SweepArray"); +} + } // namespace art diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/sticky_mark_sweep.h index 8396bbe09a..41ab0cc807 100644 --- a/src/gc/sticky_mark_sweep.h +++ b/src/gc/sticky_mark_sweep.h @@ -14,28 +14,15 @@ * limitations under the License. */ -#ifndef ART_SRC_STICKY_MARK_SWEEP_H_ -#define ART_SRC_STICKY_MARK_SWEEP_H_ +#ifndef ART_SRC_GC_STICKY_MARK_SWEEP_H_ +#define ART_SRC_GC_STICKY_MARK_SWEEP_H_ #include "base/macros.h" #include "locks.h" #include "partial_mark_sweep.h" -#include "utils.h" namespace art { -class Barrier; -class CheckObjectVisitor; -class Class; -class Heap; -class MarkIfReachesAllocspaceVisitor; -class ModUnionClearCardVisitor; -class ModUnionVisitor; -class ModUnionTableBitmap; -class Object; -class TimingLogger; -class MarkStackChunk; - class StickyMarkSweep : public PartialMarkSweep { public: virtual GcType GetGcType() const { @@ -60,4 +47,4 @@ protected: } // namespace art -#endif // ART_SRC_STICKY_MARK_SWEEP_H_ +#endif // ART_SRC_GC_STICKY_MARK_SWEEP_H_ diff --git a/src/heap.cc b/src/heap.cc index 805d63c576..5c96decdd9 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -27,15 +27,24 @@ #include "debugger.h" #include "gc/atomic_stack.h" #include "gc/card_table.h" +#include "gc/card_table-inl.h" #include "gc/heap_bitmap.h" +#include "gc/heap_bitmap-inl.h" #include "gc/large_object_space.h" #include "gc/mark_sweep.h" +#include "gc/mark_sweep-inl.h" #include "gc/partial_mark_sweep.h" +#include "gc/space_bitmap-inl.h" #include "gc/sticky_mark_sweep.h" #include "gc/mod_union_table.h" +#include "gc/mod_union_table-inl.h" #include "gc/space.h" #include "image.h" -#include "object.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "os.h" #include "ScopedLocalRef.h" @@ -431,7 +440,7 @@ Heap::~Heap() { delete gc_complete_lock_; } -ContinuousSpace* Heap::FindSpaceFromObject(const Object* obj) const { +ContinuousSpace* Heap::FindSpaceFromObject(const mirror::Object* obj) const { // TODO: C++0x auto for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) { if ((*it)->Contains(obj)) { @@ -465,13 +474,13 @@ static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* } } -Object* Heap::AllocObject(Thread* self, Class* c, size_t byte_count) { - DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) || +mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) { + DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || (c->IsVariableSize() || c->GetObjectSize() == byte_count) || strlen(ClassHelper(c).GetDescriptor()) == 0); - DCHECK_GE(byte_count, sizeof(Object)); + DCHECK_GE(byte_count, sizeof(mirror::Object)); - Object* obj = NULL; + mirror::Object* obj = NULL; size_t size = 0; uint64_t allocation_start = 0; if (measure_allocation_time_) { @@ -513,7 +522,7 @@ Object* Heap::AllocObject(Thread* self, Class* c, size_t byte_count) { // concurrent_start_bytes_. concurrent_start_bytes_ = std::numeric_limits::max(); // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. - SirtRef ref(self, obj); + SirtRef ref(self, obj); RequestConcurrentGC(self); } VerifyObject(obj); @@ -547,7 +556,7 @@ Object* Heap::AllocObject(Thread* self, Class* c, size_t byte_count) { return NULL; } -bool Heap::IsHeapAddress(const Object* obj) { +bool Heap::IsHeapAddress(const mirror::Object* obj) { // Note: we deliberately don't take the lock here, and mustn't test anything that would // require taking the lock. if (obj == NULL) { @@ -566,7 +575,7 @@ bool Heap::IsHeapAddress(const Object* obj) { return large_object_space_->Contains(obj); } -bool Heap::IsLiveObjectLocked(const Object* obj) { +bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj); } @@ -596,7 +605,7 @@ void Heap::DumpSpaces() { } } -void Heap::VerifyObjectBody(const Object* obj) { +void Heap::VerifyObjectBody(const mirror::Object* obj) { if (!IsAligned(obj)) { LOG(FATAL) << "Object isn't aligned: " << obj; } @@ -618,8 +627,8 @@ void Heap::VerifyObjectBody(const Object* obj) { // Ignore early dawn of the universe verifications if (!VERIFY_OBJECT_FAST && GetObjectsAllocated() > 10) { const byte* raw_addr = reinterpret_cast(obj) + - Object::ClassOffset().Int32Value(); - const Class* c = *reinterpret_cast(raw_addr); + mirror::Object::ClassOffset().Int32Value(); + const mirror::Class* c = *reinterpret_cast(raw_addr); if (c == NULL) { LOG(FATAL) << "Null class in object: " << obj; } else if (!IsAligned(c)) { @@ -630,15 +639,15 @@ void Heap::VerifyObjectBody(const Object* obj) { // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() // Note: we don't use the accessors here as they have internal sanity checks // that we don't want to run - raw_addr = reinterpret_cast(c) + Object::ClassOffset().Int32Value(); - const Class* c_c = *reinterpret_cast(raw_addr); - raw_addr = reinterpret_cast(c_c) + Object::ClassOffset().Int32Value(); - const Class* c_c_c = *reinterpret_cast(raw_addr); + raw_addr = reinterpret_cast(c) + mirror::Object::ClassOffset().Int32Value(); + const mirror::Class* c_c = *reinterpret_cast(raw_addr); + raw_addr = reinterpret_cast(c_c) + mirror::Object::ClassOffset().Int32Value(); + const mirror::Class* c_c_c = *reinterpret_cast(raw_addr); CHECK_EQ(c_c, c_c_c); } } -void Heap::VerificationCallback(Object* obj, void* arg) { +void Heap::VerificationCallback(mirror::Object* obj, void* arg) { DCHECK(obj != NULL); reinterpret_cast(arg)->VerifyObjectBody(obj); } @@ -648,7 +657,7 @@ void Heap::VerifyHeap() { GetLiveBitmap()->Walk(Heap::VerificationCallback, this); } -void Heap::RecordAllocation(size_t size, Object* obj) { +void Heap::RecordAllocation(size_t size, mirror::Object* obj) { DCHECK(obj != NULL); DCHECK_GT(size, 0u); num_bytes_allocated_ += size; @@ -687,7 +696,7 @@ void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { } } -Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) { +mirror::Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) { // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize? if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) { // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here. @@ -711,13 +720,13 @@ Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, return space->Alloc(self, alloc_size); } -Object* Heap::Allocate(Thread* self, AllocSpace* space, size_t alloc_size) { +mirror::Object* Heap::Allocate(Thread* self, AllocSpace* space, size_t alloc_size) { // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are // done in the runnable state where suspension is expected. DCHECK_EQ(self->GetState(), kRunnable); self->AssertThreadSuspensionIsAllowable(); - Object* ptr = TryToAllocate(self, space, alloc_size, false); + mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false); if (ptr != NULL) { return ptr; } @@ -838,14 +847,14 @@ size_t Heap::GetTotalBytesAllocated() const { class InstanceCounter { public: - InstanceCounter(const std::vector& classes, bool use_is_assignable_from, uint64_t* counts) + InstanceCounter(const std::vector& classes, bool use_is_assignable_from, uint64_t* counts) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { } - void operator()(const Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (size_t i = 0; i < classes_.size(); ++i) { - const Class* instance_class = o->GetClass(); + const mirror::Class* instance_class = o->GetClass(); if (use_is_assignable_from_) { if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { ++counts_[i]; @@ -859,14 +868,14 @@ class InstanceCounter { } private: - const std::vector& classes_; + const std::vector& classes_; bool use_is_assignable_from_; uint64_t* const counts_; DISALLOW_COPY_AND_ASSIGN(InstanceCounter); }; -void Heap::CountInstances(const std::vector& classes, bool use_is_assignable_from, +void Heap::CountInstances(const std::vector& classes, bool use_is_assignable_from, uint64_t* counts) { // We only want reachable instances, so do a GC. This also ensures that the alloc stack // is empty, so the live bitmap is the only place we need to look. @@ -882,29 +891,30 @@ void Heap::CountInstances(const std::vector& classes, bool use_is_assign class InstanceCollector { public: - InstanceCollector(Class* c, int32_t max_count, std::vector& instances) + InstanceCollector(mirror::Class* c, int32_t max_count, std::vector& instances) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_(c), max_count_(max_count), instances_(instances) { } - void operator()(const Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const Class* instance_class = o->GetClass(); + void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const mirror::Class* instance_class = o->GetClass(); if (instance_class == class_) { if (max_count_ == 0 || instances_.size() < max_count_) { - instances_.push_back(const_cast(o)); + instances_.push_back(const_cast(o)); } } } private: - Class* class_; + mirror::Class* class_; uint32_t max_count_; - std::vector& instances_; + std::vector& instances_; DISALLOW_COPY_AND_ASSIGN(InstanceCollector); }; -void Heap::GetInstances(Class* c, int32_t max_count, std::vector& instances) { +void Heap::GetInstances(mirror::Class* c, int32_t max_count, + std::vector& instances) { // We only want reachable instances, so do a GC. This also ensures that the alloc stack // is empty, so the live bitmap is the only place we need to look. Thread* self = Thread::Current(); @@ -919,7 +929,8 @@ void Heap::GetInstances(Class* c, int32_t max_count, std::vector& insta class ReferringObjectsFinder { public: - ReferringObjectsFinder(Object* object, int32_t max_count, std::vector& referring_objects) + ReferringObjectsFinder(mirror::Object* object, int32_t max_count, + std::vector& referring_objects) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : object_(object), max_count_(max_count), referring_objects_(referring_objects) { } @@ -927,27 +938,28 @@ class ReferringObjectsFinder { // For bitmap Visit. // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for // annotalysis on visitors. - void operator()(const Object* o) const NO_THREAD_SAFETY_ANALYSIS { + void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { MarkSweep::VisitObjectReferences(o, *this); } // For MarkSweep::VisitObjectReferences. - void operator ()(const Object* referrer, const Object* object, const MemberOffset&, bool) const { + void operator ()(const mirror::Object* referrer, const mirror::Object* object, + const MemberOffset&, bool) const { if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { - referring_objects_.push_back(const_cast(referrer)); + referring_objects_.push_back(const_cast(referrer)); } } private: - Object* object_; + mirror::Object* object_; uint32_t max_count_; - std::vector& referring_objects_; + std::vector& referring_objects_; DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); }; -void Heap::GetReferringObjects(Object* o, int32_t max_count, - std::vector& referring_objects) { +void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, + std::vector& referring_objects) { // We only want reachable instances, so do a GC. This also ensures that the alloc stack // is empty, so the live bitmap is the only place we need to look. Thread* self = Thread::Current(); @@ -1026,9 +1038,9 @@ size_t Heap::GetUsedMemorySize() const { } void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) { - Object** limit = stack->End(); - for (Object** it = stack->Begin(); it != limit; ++it) { - const Object* obj = *it; + mirror::Object** limit = stack->End(); + for (mirror::Object** it = stack->Begin(); it != limit; ++it) { + const mirror::Object* obj = *it; DCHECK(obj != NULL); if (LIKELY(bitmap->HasAddress(obj))) { bitmap->Set(obj); @@ -1039,9 +1051,9 @@ void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, Objec } void Heap::UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) { - Object** limit = stack->End(); - for (Object** it = stack->Begin(); it != limit; ++it) { - const Object* obj = *it; + mirror::Object** limit = stack->End(); + for (mirror::Object** it = stack->Begin(); it != limit; ++it) { + const mirror::Object* obj = *it; DCHECK(obj != NULL); if (LIKELY(bitmap->HasAddress(obj))) { bitmap->Clear(obj); @@ -1187,8 +1199,8 @@ void Heap::UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, G timings.AddSplit("MarkImageToAllocSpaceReferences"); } -void Heap::RootMatchesObjectVisitor(const Object* root, void* arg) { - Object* obj = reinterpret_cast(arg); +void Heap::RootMatchesObjectVisitor(const mirror::Object* root, void* arg) { + mirror::Object* obj = reinterpret_cast(arg); if (root == obj) { LOG(INFO) << "Object " << obj << " is a root"; } @@ -1196,7 +1208,7 @@ void Heap::RootMatchesObjectVisitor(const Object* root, void* arg) { class ScanVisitor { public: - void operator ()(const Object* obj) const { + void operator ()(const mirror::Object* obj) const { LOG(INFO) << "Would have rescanned object " << obj; } }; @@ -1212,8 +1224,9 @@ class VerifyReferenceVisitor { // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter // analysis. - void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, - bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS { + void operator ()(const mirror::Object* obj, const mirror::Object* ref, + const MemberOffset& /* offset */, bool /* is_static */) const + NO_THREAD_SAFETY_ANALYSIS { // Verify that the reference is live. if (ref != NULL && !IsLive(ref)) { CardTable* card_table = heap_->GetCardTable(); @@ -1260,7 +1273,7 @@ class VerifyReferenceVisitor { } } - bool IsLive(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { + bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { if (heap_->GetLiveBitmap()->Test(obj)) { return true; } @@ -1284,7 +1297,7 @@ class VerifyObjectVisitor { } - void operator ()(const Object* obj) const + void operator ()(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceVisitor visitor(heap_, const_cast(&failed_)); MarkSweep::VisitObjectReferences(obj, visitor); @@ -1328,8 +1341,8 @@ class VerifyReferenceCardVisitor { // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for // annotalysis on visitors. - void operator ()(const Object* obj, const Object* ref, const MemberOffset& offset, - bool is_static) const NO_THREAD_SAFETY_ANALYSIS { + void operator ()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, + bool is_static) const NO_THREAD_SAFETY_ANALYSIS { // Filter out class references since changing an object's class does not mark the card as dirty. // Also handles large objects, since the only reference they hold is a class reference. if (ref != NULL && !ref->IsClass()) { @@ -1355,12 +1368,13 @@ class VerifyReferenceCardVisitor { // Print which field of the object is dead. if (!obj->IsObjectArray()) { - const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); + const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); CHECK(klass != NULL); - const ObjectArray* fields = is_static ? klass->GetSFields() : klass->GetIFields(); + const mirror::ObjectArray* fields = is_static ? klass->GetSFields() + : klass->GetIFields(); CHECK(fields != NULL); for (int32_t i = 0; i < fields->GetLength(); ++i) { - const Field* cur = fields->Get(i); + const mirror::Field* cur = fields->Get(i); if (cur->GetOffset().Int32Value() == offset.Int32Value()) { LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " << PrettyField(cur); @@ -1368,7 +1382,8 @@ class VerifyReferenceCardVisitor { } } } else { - const ObjectArray* object_array = obj->AsObjectArray(); + const mirror::ObjectArray* object_array = + obj->AsObjectArray(); for (int32_t i = 0; i < object_array->GetLength(); ++i) { if (object_array->Get(i) == ref) { LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; @@ -1395,7 +1410,7 @@ class VerifyLiveStackReferences { } - void operator ()(const Object* obj) const + void operator ()(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceCardVisitor visitor(heap_, const_cast(&failed_)); MarkSweep::VisitObjectReferences(obj, visitor); @@ -1419,7 +1434,7 @@ bool Heap::VerifyMissingCardMarks() { GetLiveBitmap()->Visit(visitor); // We can verify objects in the live stack since none of these should reference dead objects. - for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { + for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { visitor(*it); } @@ -1636,34 +1651,36 @@ void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); } -Object* Heap::GetReferenceReferent(Object* reference) { +mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { DCHECK(reference != NULL); DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); - return reference->GetFieldObject(reference_referent_offset_, true); + return reference->GetFieldObject(reference_referent_offset_, true); } -void Heap::ClearReferenceReferent(Object* reference) { +void Heap::ClearReferenceReferent(mirror::Object* reference) { DCHECK(reference != NULL); DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); reference->SetFieldObject(reference_referent_offset_, NULL, true); } // Returns true if the reference object has not yet been enqueued. -bool Heap::IsEnqueuable(const Object* ref) { +bool Heap::IsEnqueuable(const mirror::Object* ref) { DCHECK(ref != NULL); - const Object* queue = ref->GetFieldObject(reference_queue_offset_, false); - const Object* queue_next = ref->GetFieldObject(reference_queueNext_offset_, false); + const mirror::Object* queue = + ref->GetFieldObject(reference_queue_offset_, false); + const mirror::Object* queue_next = + ref->GetFieldObject(reference_queueNext_offset_, false); return (queue != NULL) && (queue_next == NULL); } -void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) { +void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) { DCHECK(ref != NULL); - CHECK(ref->GetFieldObject(reference_queue_offset_, false) != NULL); - CHECK(ref->GetFieldObject(reference_queueNext_offset_, false) == NULL); + CHECK(ref->GetFieldObject(reference_queue_offset_, false) != NULL); + CHECK(ref->GetFieldObject(reference_queueNext_offset_, false) == NULL); EnqueuePendingReference(ref, cleared_reference_list); } -void Heap::EnqueuePendingReference(Object* ref, Object** list) { +void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) { DCHECK(ref != NULL); DCHECK(list != NULL); @@ -1673,17 +1690,19 @@ void Heap::EnqueuePendingReference(Object* ref, Object** list) { ref->SetFieldObject(reference_pendingNext_offset_, ref, false); *list = ref; } else { - Object* head = (*list)->GetFieldObject(reference_pendingNext_offset_, false); + mirror::Object* head = + (*list)->GetFieldObject(reference_pendingNext_offset_, false); ref->SetFieldObject(reference_pendingNext_offset_, head, false); (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); } } -Object* Heap::DequeuePendingReference(Object** list) { +mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) { DCHECK(list != NULL); DCHECK(*list != NULL); - Object* head = (*list)->GetFieldObject(reference_pendingNext_offset_, false); - Object* ref; + mirror::Object* head = (*list)->GetFieldObject(reference_pendingNext_offset_, + false); + mirror::Object* ref; // Note: the following code is thread-safe because it is only called from ProcessReferences which // is single threaded. @@ -1691,7 +1710,8 @@ Object* Heap::DequeuePendingReference(Object** list) { ref = *list; *list = NULL; } else { - Object* next = head->GetFieldObject(reference_pendingNext_offset_, false); + mirror::Object* next = head->GetFieldObject(reference_pendingNext_offset_, + false); (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); ref = head; } @@ -1699,7 +1719,7 @@ Object* Heap::DequeuePendingReference(Object** list) { return ref; } -void Heap::AddFinalizerReference(Thread* self, Object* object) { +void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { ScopedObjectAccess soa(self); JValue args[1]; args[0].SetL(object); @@ -1731,7 +1751,7 @@ size_t Heap::GetConcurrentMinFree() const { return concurrent_min_free_; } -void Heap::EnqueueClearedReferences(Object** cleared) { +void Heap::EnqueueClearedReferences(mirror::Object** cleared) { DCHECK(cleared != NULL); if (*cleared != NULL) { // When a runtime isn't started there are no reference queues to care about so ignore. diff --git a/src/heap.h b/src/heap.h index b7fc34d8ff..9981f835c8 100644 --- a/src/heap.h +++ b/src/heap.h @@ -24,6 +24,7 @@ #include "atomic_integer.h" #include "gc/atomic_stack.h" #include "gc/card_table.h" +#include "gc/gc_type.h" #include "gc/heap_bitmap.h" #include "globals.h" #include "gtest/gtest.h" @@ -39,9 +40,11 @@ #define VERIFY_OBJECT_FAST 1 namespace art { - -class AllocSpace; +namespace mirror { class Class; +class Object; +} // namespace mirror +class AllocSpace; class ConditionVariable; class DlMallocSpace; class GarbageCollector; @@ -51,14 +54,12 @@ class LargeObjectSpace; class MarkSweep; class ModUnionTable; class Mutex; -class Object; class Space; class SpaceTest; class StackVisitor; class Thread; class TimingLogger; -typedef AtomicStack ObjectStack; typedef std::vector Spaces; class AgeCardVisitor { @@ -72,21 +73,6 @@ class AgeCardVisitor { } }; -// The ordering of the enum matters, it is used to determine which GCs are run first. -enum GcType { - // No Gc - kGcTypeNone, - // Sticky mark bits "generational" GC. - kGcTypeSticky, - // Partial GC, over only the alloc space. - kGcTypePartial, - // Full GC - kGcTypeFull, - // Number of different Gc types. - kGcTypeMax, -}; -std::ostream& operator<<(std::ostream& os, const GcType& policy); - enum GcCause { kGcCauseForAlloc, kGcCauseBackground, @@ -107,11 +93,6 @@ class Heap { // Used so that we don't overflow the allocation time atomic integer. static const size_t kTimeAdjust = 1024; - typedef void (RootVisitor)(const Object* root, void* arg); - typedef void (VerifyRootVisitor)(const Object* root, void* arg, size_t vreg, - const StackVisitor* visitor); - typedef bool (IsMarkedTester)(const Object* object, void* arg); - // Create a heap with the requested sizes. The possible empty // image_file_names names specify Spaces to load based on // ImageWriter output. @@ -122,19 +103,19 @@ class Heap { ~Heap(); // Allocates and initializes storage for an object instance. - Object* AllocObject(Thread* self, Class* klass, size_t num_bytes) + mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Check sanity of given reference. Requires the heap lock. #if VERIFY_OBJECT_ENABLED - void VerifyObject(const Object* o); + void VerifyObject(const mirror::Object* o); #else - void VerifyObject(const Object*) {} + void VerifyObject(const mirror::Object*) {} #endif // Check sanity of all live references. Requires the heap lock. void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - static void RootMatchesObjectVisitor(const Object* root, void* arg); + static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg); bool VerifyHeapReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -145,11 +126,11 @@ class Heap { // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, // and doesn't abort on error, allowing the caller to report more // meaningful diagnostics. - bool IsHeapAddress(const Object* obj); + bool IsHeapAddress(const mirror::Object* obj); // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). // Requires the heap lock to be held. - bool IsLiveObjectLocked(const Object* obj) + bool IsLiveObjectLocked(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Initiates an explicit garbage collection. @@ -169,16 +150,16 @@ class Heap { // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. // The boolean decides whether to use IsAssignableFrom or == when comparing classes. - void CountInstances(const std::vector& classes, bool use_is_assignable_from, + void CountInstances(const std::vector& classes, bool use_is_assignable_from, uint64_t* counts) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implements JDWP RT_Instances. - void GetInstances(Class* c, int32_t max_count, std::vector& instances) + void GetInstances(mirror::Class* c, int32_t max_count, std::vector& instances) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implements JDWP OR_ReferringObjects. - void GetReferringObjects(Object* o, int32_t max_count, std::vector& referring_objects) + void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector& referring_objects) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -218,15 +199,15 @@ class Heap { MemberOffset reference_pendingNext_offset, MemberOffset finalizer_reference_zombie_offset); - Object* GetReferenceReferent(Object* reference); - void ClearReferenceReferent(Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetReferenceReferent(mirror::Object* reference); + void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if the reference object has not yet been enqueued. - bool IsEnqueuable(const Object* ref); - void EnqueueReference(Object* ref, Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnqueuePendingReference(Object* ref, Object** list) + bool IsEnqueuable(const mirror::Object* ref); + void EnqueueReference(mirror::Object* ref, mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* DequeuePendingReference(Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* DequeuePendingReference(mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); MemberOffset GetReferencePendingNextOffset() { DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); @@ -257,12 +238,12 @@ class Heap { // Must be called if a field of an Object in the heap changes, and before any GC safe-point. // The call is not needed if NULL is stored in the field. - void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) { + void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, const mirror::Object* /*new_value*/) { card_table_->MarkCard(dst); } // Write barrier for array operations that update many field positions - void WriteBarrierArray(const Object* dst, int /*start_offset*/, + void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/, size_t /*length TODO: element_count or byte_count?*/) { card_table_->MarkCard(dst); } @@ -271,7 +252,7 @@ class Heap { return card_table_.get(); } - void AddFinalizerReference(Thread* self, Object* object); + void AddFinalizerReference(Thread* self, mirror::Object* object); size_t GetBytesAllocated() const; size_t GetObjectsAllocated() const; @@ -293,7 +274,7 @@ class Heap { // Functions for getting the bitmap which corresponds to an object's address. // This is probably slow, TODO: use better data structure like binary tree . - ContinuousSpace* FindSpaceFromObject(const Object*) const; + ContinuousSpace* FindSpaceFromObject(const mirror::Object*) const; void DumpForSigQuit(std::ostream& os); @@ -354,22 +335,22 @@ class Heap { private: // Allocates uninitialized storage. Passing in a null space tries to place the object in the // large object space. - Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes) + mirror::Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes) LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Try to allocate a number of bytes, this function never does any GCs. - Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) + mirror::Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Pushes a list of cleared references out to the managed heap. - void EnqueueClearedReferences(Object** cleared_references); + void EnqueueClearedReferences(mirror::Object** cleared_references); void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); - void RecordAllocation(size_t size, Object* object) + void RecordAllocation(size_t size, mirror::Object* object) LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -395,9 +376,9 @@ class Heap { // No thread saftey analysis since we call this everywhere and it is impossible to find a proper // lock ordering for it. - void VerifyObjectBody(const Object *obj) NO_THREAD_SAFETY_ANALYSIS; + void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS; - static void VerificationCallback(Object* obj, void* arg) + static void VerificationCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); // Swap the allocation stack with the live stack. diff --git a/src/heap_test.cc b/src/heap_test.cc index 6db7416d06..79cc835471 100644 --- a/src/heap_test.cc +++ b/src/heap_test.cc @@ -15,6 +15,11 @@ */ #include "common_test.h" +#include "gc/card_table-inl.h" +#include "gc/space_bitmap-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "sirt_ref.h" namespace art { @@ -37,12 +42,12 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) { ScopedObjectAccess soa(Thread::Current()); // garbage is created during ClassLinker::Init - Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;"); + mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;"); for (size_t i = 0; i < 1024; ++i) { - SirtRef > array(soa.Self(), - ObjectArray::Alloc(soa.Self(), c, 2048)); + SirtRef > array(soa.Self(), + mirror::ObjectArray::Alloc(soa.Self(), c, 2048)); for (size_t j = 0; j < 2048; ++j) { - array->Set(j, String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")); + array->Set(j, mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")); } } } @@ -53,7 +58,7 @@ TEST_F(HeapTest, HeapBitmapCapacityTest) { byte* heap_begin = reinterpret_cast(0x1000); const size_t heap_capacity = SpaceBitmap::kAlignment * (sizeof(intptr_t) * 8 + 1); UniquePtr bitmap(SpaceBitmap::Create("test-bitmap", heap_begin, heap_capacity)); - bitmap->Set(reinterpret_cast(&heap_begin[heap_capacity - SpaceBitmap::kAlignment])); + bitmap->Set(reinterpret_cast(&heap_begin[heap_capacity - SpaceBitmap::kAlignment])); } } // namespace art diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc index c0e73bce0e..e0a4c05402 100644 --- a/src/hprof/hprof.cc +++ b/src/hprof/hprof.cc @@ -44,7 +44,11 @@ #include "debugger.h" #include "globals.h" #include "heap.h" -#include "object.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/field.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" #include "object_utils.h" #include "os.h" #include "safe_map.h" @@ -165,8 +169,8 @@ typedef uint32_t HprofId; typedef HprofId HprofStringId; typedef HprofId HprofObjectId; typedef HprofId HprofClassObjectId; -typedef std::set ClassSet; -typedef std::set::iterator ClassSetIterator; +typedef std::set ClassSet; +typedef std::set::iterator ClassSetIterator; typedef SafeMap StringMap; typedef SafeMap::iterator StringMapIterator; @@ -480,14 +484,14 @@ class Hprof { } private: - static void RootVisitor(const Object* obj, void* arg) + static void RootVisitor(const mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(arg != NULL); Hprof* hprof = reinterpret_cast(arg); hprof->VisitRoot(obj); } - static void HeapBitmapCallback(Object* obj, void* arg) + static void HeapBitmapCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(obj != NULL); CHECK(arg != NULL); @@ -495,9 +499,9 @@ class Hprof { hprof->DumpHeapObject(obj); } - void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoot(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int DumpHeapObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Finish() { } @@ -507,7 +511,7 @@ class Hprof { uint32_t nextSerialNumber = 1; for (ClassSetIterator it = classes_.begin(); it != classes_.end(); ++it) { - const Class* c = *it; + const mirror::Class* c = *it; CHECK(c != NULL); int err = current_record_.StartNewRecord(header_fp_, HPROF_TAG_LOAD_CLASS, HPROF_TIME); @@ -567,9 +571,9 @@ class Hprof { current_heap_ = HPROF_HEAP_DEFAULT; } - int MarkRootObject(const Object* obj, jobject jniObj); + int MarkRootObject(const mirror::Object* obj, jobject jniObj); - HprofClassObjectId LookupClassId(Class* c) + HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (c == NULL) { // c is the superclass of java.lang.Object or a primitive @@ -577,7 +581,7 @@ class Hprof { } std::pair result = classes_.insert(c); - const Class* present = *result.first; + const mirror::Class* present = *result.first; // Make sure that we've assigned a string ID for this class' name LookupClassNameId(c); @@ -586,7 +590,7 @@ class Hprof { return (HprofStringId) present; } - HprofStringId LookupStringId(String* string) { + HprofStringId LookupStringId(mirror::String* string) { return LookupStringId(string->ToModifiedUtf8()); } @@ -604,7 +608,7 @@ class Hprof { return id; } - HprofStringId LookupClassNameId(const Class* c) + HprofStringId LookupClassNameId(const mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return LookupStringId(PrettyDescriptor(c)); } @@ -740,7 +744,7 @@ static HprofBasicType PrimitiveToBasicTypeAndSize(Primitive::Type prim, size_t* // something when ctx->gc_scan_state_ is non-zero, which is usually // only true when marking the root set or unreachable // objects. Used to add rootset references to obj. -int Hprof::MarkRootObject(const Object* obj, jobject jniObj) { +int Hprof::MarkRootObject(const mirror::Object* obj, jobject jniObj) { HprofRecord* rec = ¤t_record_; HprofHeapTag heapTag = (HprofHeapTag)gc_scan_state_; @@ -823,11 +827,11 @@ int Hprof::MarkRootObject(const Object* obj, jobject jniObj) { return 0; } -static int StackTraceSerialNumber(const Object* /*obj*/) { +static int StackTraceSerialNumber(const mirror::Object* /*obj*/) { return HPROF_NULL_STACK_TRACE; } -int Hprof::DumpHeapObject(Object* obj) { +int Hprof::DumpHeapObject(mirror::Object* obj) { HprofRecord* rec = ¤t_record_; HprofHeapId desiredHeap = false ? HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP; // TODO: zygote objects? @@ -859,7 +863,7 @@ int Hprof::DumpHeapObject(Object* obj) { current_heap_ = desiredHeap; } - Class* c = obj->GetClass(); + mirror::Class* c = obj->GetClass(); if (c == NULL) { // This object will bother HprofReader, because it has a NULL // class, so just don't dump it. It could be @@ -867,7 +871,7 @@ int Hprof::DumpHeapObject(Object* obj) { // allocated which hasn't been initialized yet. } else { if (obj->IsClass()) { - Class* thisClass = obj->AsClass(); + mirror::Class* thisClass = obj->AsClass(); // obj is a ClassObject. size_t sFieldCount = thisClass->NumStaticFields(); if (sFieldCount != 0) { @@ -896,7 +900,7 @@ int Hprof::DumpHeapObject(Object* obj) { if (thisClass->IsClassClass()) { // ClassObjects have their static fields appended, so aren't all the same size. // But they're at least this size. - rec->AddU4(sizeof(Class)); // instance size + rec->AddU4(sizeof(mirror::Class)); // instance size } else if (thisClass->IsArrayClass() || thisClass->IsPrimitive()) { rec->AddU4(0); } else { @@ -917,7 +921,7 @@ int Hprof::DumpHeapObject(Object* obj) { rec->AddId(CLASS_STATICS_ID(obj)); for (size_t i = 0; i < sFieldCount; ++i) { - Field* f = thisClass->GetStaticField(i); + mirror::Field* f = thisClass->GetStaticField(i); fh.ChangeField(f); size_t size; @@ -942,14 +946,14 @@ int Hprof::DumpHeapObject(Object* obj) { int iFieldCount = thisClass->IsObjectClass() ? 0 : thisClass->NumInstanceFields(); rec->AddU2((uint16_t)iFieldCount); for (int i = 0; i < iFieldCount; ++i) { - Field* f = thisClass->GetInstanceField(i); + mirror::Field* f = thisClass->GetInstanceField(i); fh.ChangeField(f); HprofBasicType t = SignatureToBasicTypeAndSize(fh.GetTypeDescriptor(), NULL); rec->AddId(LookupStringId(fh.GetName())); rec->AddU1(t); } } else if (c->IsArrayClass()) { - const Array* aobj = obj->AsArray(); + const mirror::Array* aobj = obj->AsArray(); uint32_t length = aobj->GetLength(); if (obj->IsObjectArray()) { @@ -962,7 +966,7 @@ int Hprof::DumpHeapObject(Object* obj) { rec->AddId(LookupClassId(c)); // Dump the elements, which are always objects or NULL. - rec->AddIdList((const HprofObjectId*)aobj->GetRawData(sizeof(Object*)), length); + rec->AddIdList((const HprofObjectId*)aobj->GetRawData(sizeof(mirror::Object*)), length); } else { size_t size; HprofBasicType t = PrimitiveToBasicTypeAndSize(c->GetComponentType()->GetPrimitiveType(), &size); @@ -1000,12 +1004,12 @@ int Hprof::DumpHeapObject(Object* obj) { // Write the instance data; fields for this class, followed by super class fields, // and so on. Don't write the klass or monitor fields of Object.class. - const Class* sclass = c; + const mirror::Class* sclass = c; FieldHelper fh; while (!sclass->IsObjectClass()) { int ifieldCount = sclass->NumInstanceFields(); for (int i = 0; i < ifieldCount; ++i) { - Field* f = sclass->GetInstanceField(i); + mirror::Field* f = sclass->GetInstanceField(i); fh.ChangeField(f); size_t size; SignatureToBasicTypeAndSize(fh.GetTypeDescriptor(), &size); @@ -1034,7 +1038,7 @@ int Hprof::DumpHeapObject(Object* obj) { return 0; } -void Hprof::VisitRoot(const Object* obj) { +void Hprof::VisitRoot(const mirror::Object* obj) { uint32_t threadId = 0; // TODO /*RootType*/ size_t type = 0; // TODO diff --git a/src/image.cc b/src/image.cc index a190f1015e..8eeb772a5d 100644 --- a/src/image.cc +++ b/src/image.cc @@ -16,9 +16,47 @@ #include "image.h" +#include "mirror/object_array.h" +#include "mirror/object_array-inl.h" +#include "utils.h" + namespace art { const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; const byte ImageHeader::kImageVersion[] = { '0', '0', '2', '\0' }; +ImageHeader::ImageHeader(uint32_t image_begin, + uint32_t image_roots, + uint32_t oat_checksum, + uint32_t oat_file_begin, + uint32_t oat_data_begin, + uint32_t oat_data_end, + uint32_t oat_file_end) + : image_begin_(image_begin), + oat_checksum_(oat_checksum), + oat_file_begin_(oat_file_begin), + oat_data_begin_(oat_data_begin), + oat_data_end_(oat_data_end), + oat_file_end_(oat_file_end), + image_roots_(image_roots) { + CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize)); + CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize)); + CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize)); + CHECK_LT(image_begin, image_roots); + CHECK_LT(image_roots, oat_file_begin); + CHECK_LE(oat_file_begin, oat_data_begin); + CHECK_LT(oat_data_begin, oat_data_end); + CHECK_LE(oat_data_end, oat_file_end); + memcpy(magic_, kImageMagic, sizeof(kImageMagic)); + memcpy(version_, kImageVersion, sizeof(kImageVersion)); +} + +mirror::Object* ImageHeader::GetImageRoot(ImageRoot image_root) const { + return GetImageRoots()->Get(image_root); +} + +mirror::ObjectArray* ImageHeader::GetImageRoots() const { + return reinterpret_cast*>(image_roots_); +} + } // namespace art diff --git a/src/image.h b/src/image.h index f38f04be8b..6501328300 100644 --- a/src/image.h +++ b/src/image.h @@ -20,7 +20,7 @@ #include #include "globals.h" -#include "object.h" +#include "mirror/object.h" namespace art { @@ -35,25 +35,7 @@ class PACKED(4) ImageHeader { uint32_t oat_file_begin, uint32_t oat_data_begin, uint32_t oat_data_end, - uint32_t oat_file_end) - : image_begin_(image_begin), - oat_checksum_(oat_checksum), - oat_file_begin_(oat_file_begin), - oat_data_begin_(oat_data_begin), - oat_data_end_(oat_data_end), - oat_file_end_(oat_file_end), - image_roots_(image_roots) { - CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize)); - CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize)); - CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize)); - CHECK_LT(image_begin, image_roots); - CHECK_LT(image_roots, oat_file_begin); - CHECK_LE(oat_file_begin, oat_data_begin); - CHECK_LT(oat_data_begin, oat_data_end); - CHECK_LE(oat_data_end, oat_file_end); - memcpy(magic_, kImageMagic, sizeof(kImageMagic)); - memcpy(version_, kImageVersion, sizeof(kImageVersion)); - } + uint32_t oat_file_end); bool IsValid() const { if (memcmp(magic_, kImageMagic, sizeof(kImageMagic) != 0)) { @@ -113,15 +95,11 @@ class PACKED(4) ImageHeader { kImageRootsMax, }; - Object* GetImageRoot(ImageRoot image_root) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetImageRoots()->Get(image_root); - } + mirror::Object* GetImageRoot(ImageRoot image_root) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - ObjectArray* GetImageRoots() const { - return reinterpret_cast*>(image_roots_); - } + mirror::ObjectArray* GetImageRoots() const; static const byte kImageMagic[4]; static const byte kImageVersion[4]; diff --git a/src/image_test.cc b/src/image_test.cc index 89e3a05041..ed6426b7b1 100644 --- a/src/image_test.cc +++ b/src/image_test.cc @@ -54,7 +54,7 @@ TEST_F(ImageTest, WriteRead) { for (size_t i = 0; i < java_lang_dex_file_->NumClassDefs(); ++i) { const DexFile::ClassDef& class_def = java_lang_dex_file_->GetClassDef(i); const char* descriptor = java_lang_dex_file_->GetClassDescriptor(class_def); - Class* klass = class_linker_->FindSystemClass(descriptor); + mirror::Class* klass = class_linker_->FindSystemClass(descriptor); EXPECT_TRUE(klass != NULL) << descriptor; } } @@ -139,7 +139,7 @@ TEST_F(ImageTest, WriteRead) { for (size_t i = 0; i < dex->NumClassDefs(); ++i) { const DexFile::ClassDef& class_def = dex->GetClassDef(i); const char* descriptor = dex->GetClassDescriptor(class_def); - Class* klass = class_linker_->FindSystemClass(descriptor); + mirror::Class* klass = class_linker_->FindSystemClass(descriptor); EXPECT_TRUE(klass != NULL) << descriptor; EXPECT_LT(image_begin, reinterpret_cast(klass)) << descriptor; EXPECT_LT(reinterpret_cast(klass), image_end) << descriptor; diff --git a/src/image_writer.cc b/src/image_writer.cc index fc88cbbfc3..dc19d72cca 100644 --- a/src/image_writer.cc +++ b/src/image_writer.cc @@ -23,19 +23,25 @@ #include "base/logging.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" -#include "class_loader.h" #include "compiled_method.h" #include "compiler.h" -#include "dex_cache.h" +#include "gc/card_table-inl.h" #include "gc/large_object_space.h" #include "gc/space.h" #include "globals.h" #include "heap.h" #include "image.h" #include "intern_table.h" +#include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "oat.h" #include "oat_file.h" -#include "object.h" #include "object_utils.h" #include "runtime.h" #include "scoped_thread_state_change.h" @@ -43,6 +49,8 @@ #include "UniquePtr.h" #include "utils.h" +using namespace art::mirror; + namespace art { bool ImageWriter::Write(const std::string& image_filename, @@ -442,7 +450,7 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* object, void* arg) { DCHECK_LT(offset + n, image_writer->image_->Size()); memcpy(dst, src, n); Object* copy = reinterpret_cast(dst); - copy->monitor_ = 0; // We may have inflated the lock during compilation. + copy->SetField32(Object::MonitorOffset(), 0, false); // We may have inflated the lock during compilation. image_writer->FixupObject(obj, copy); } @@ -476,13 +484,13 @@ void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) // Every type of method can have an invoke stub uint32_t invoke_stub_offset = orig->GetOatInvokeStubOffset(); const byte* invoke_stub = GetOatAddress(invoke_stub_offset); - copy->invoke_stub_ = reinterpret_cast(const_cast(invoke_stub)); + copy->SetInvokeStub(reinterpret_cast(const_cast(invoke_stub))); if (orig->IsAbstract()) { // Abstract methods are pointed to a stub that will throw AbstractMethodError if they are called ByteArray* orig_ame_stub_array_ = Runtime::Current()->GetAbstractMethodErrorStubArray(); ByteArray* copy_ame_stub_array_ = down_cast(GetImageAddress(orig_ame_stub_array_)); - copy->code_ = copy_ame_stub_array_->GetData(); + copy->SetCode(copy_ame_stub_array_->GetData()); return; } @@ -492,7 +500,7 @@ void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) Runtime::Current()->GetResolutionStubArray(Runtime::kUnknownMethod); CHECK(orig->GetCode() == orig_res_stub_array_->GetData()); ByteArray* copy_res_stub_array_ = down_cast(GetImageAddress(orig_res_stub_array_)); - copy->code_ = copy_res_stub_array_->GetData(); + copy->SetCode(copy_res_stub_array_->GetData()); return; } @@ -511,27 +519,27 @@ void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) if (code == NULL) { code = GetOatAddress(code_offset); } - copy->code_ = code; + copy->SetCode(code); if (orig->IsNative()) { // The native method's pointer is directed to a stub to lookup via dlsym. // Note this is not the code_ pointer, that is handled above. ByteArray* orig_jni_stub_array_ = Runtime::Current()->GetJniDlsymLookupStub(); ByteArray* copy_jni_stub_array_ = down_cast(GetImageAddress(orig_jni_stub_array_)); - copy->native_method_ = copy_jni_stub_array_->GetData(); + copy->SetNativeMethod(copy_jni_stub_array_->GetData()); } else { // normal (non-abstract non-native) methods have mapping tables to relocate uint32_t mapping_table_off = orig->GetOatMappingTableOffset(); const byte* mapping_table = GetOatAddress(mapping_table_off); - copy->mapping_table_ = reinterpret_cast(mapping_table); + copy->SetMappingTable(reinterpret_cast(mapping_table)); uint32_t vmap_table_offset = orig->GetOatVmapTableOffset(); const byte* vmap_table = GetOatAddress(vmap_table_offset); - copy->vmap_table_ = reinterpret_cast(vmap_table); + copy->SetVmapTable(reinterpret_cast(vmap_table)); uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset(); const byte* native_gc_map = GetOatAddress(native_gc_map_offset); - copy->native_gc_map_ = reinterpret_cast(native_gc_map); + copy->SetNativeGcMap(reinterpret_cast(native_gc_map)); } } diff --git a/src/image_writer.h b/src/image_writer.h index 64bac2ed1d..eff9ffb3de 100644 --- a/src/image_writer.h +++ b/src/image_writer.h @@ -24,10 +24,9 @@ #include #include "compiler.h" -#include "dex_cache.h" #include "mem_map.h" #include "oat_file.h" -#include "object.h" +#include "mirror/dex_cache.h" #include "os.h" #include "safe_map.h" #include "gc/space.h" @@ -59,7 +58,7 @@ class ImageWriter { bool AllocMemory(); // we use the lock word to store the offset of the object in the image - void AssignImageOffset(Object* object) + void AssignImageOffset(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(object != NULL); SetImageOffset(object, image_end_); @@ -67,35 +66,35 @@ class ImageWriter { DCHECK_LT(image_end_, image_->Size()); } - void SetImageOffset(Object* object, size_t offset) { + void SetImageOffset(mirror::Object* object, size_t offset) { DCHECK(object != NULL); DCHECK_NE(offset, 0U); DCHECK(!IsImageOffsetAssigned(object)); offsets_.Put(object, offset); } - size_t IsImageOffsetAssigned(const Object* object) const { + size_t IsImageOffsetAssigned(const mirror::Object* object) const { DCHECK(object != NULL); return offsets_.find(object) != offsets_.end(); } - size_t GetImageOffset(const Object* object) const { + size_t GetImageOffset(const mirror::Object* object) const { DCHECK(object != NULL); DCHECK(IsImageOffsetAssigned(object)); return offsets_.find(object)->second; } - Object* GetImageAddress(const Object* object) const { + mirror::Object* GetImageAddress(const mirror::Object* object) const { if (object == NULL) { return NULL; } - return reinterpret_cast(image_begin_ + GetImageOffset(object)); + return reinterpret_cast(image_begin_ + GetImageOffset(object)); } - Object* GetLocalAddress(const Object* object) const { + mirror::Object* GetLocalAddress(const mirror::Object* object) const { size_t offset = GetImageOffset(object); byte* dst = image_->Begin() + offset; - return reinterpret_cast(dst); + return reinterpret_cast(dst); } const byte* GetOatAddress(uint32_t offset) const { @@ -106,50 +105,52 @@ class ImageWriter { return oat_data_begin_ + offset; } - bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImageClass(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpImageClasses(); void ComputeLazyFieldsForImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool ComputeLazyFieldsForClassesVisitor(Class* klass, void* arg) + static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Wire dex cache resolved strings to strings in the image to avoid runtime resolution void ComputeEagerResolvedStrings(); - static void ComputeEagerResolvedStringsCallback(Object* obj, void* arg) + static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool NonImageClassesVisitor(Class* c, void* arg) + static bool NonImageClassesVisitor(mirror::Class* c, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CheckNonImageClassesRemoved(); - static void CheckNonImageClassesRemovedCallback(Object* obj, void* arg) + static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray* CreateImageRoots() const + mirror::ObjectArray* CreateImageRoots() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void CalculateNewObjectOffsetsCallback(Object* obj, void* arg) + static void CalculateNewObjectOffsetsCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CopyAndFixupObjects(); - static void CopyAndFixupObjectsCallback(Object* obj, void* arg) + static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupClass(const Class* orig, Class* copy) + void FixupClass(const mirror::Class* orig, mirror::Class* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) + void FixupMethod(const mirror::AbstractMethod* orig, mirror::AbstractMethod* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupObject(const Object* orig, Object* copy) + void FixupObject(const mirror::Object* orig, mirror::Object* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupObjectArray(const ObjectArray* orig, ObjectArray* copy) + void FixupObjectArray(const mirror::ObjectArray* orig, + mirror::ObjectArray* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupInstanceFields(const Object* orig, Object* copy) + void FixupInstanceFields(const mirror::Object* orig, mirror::Object* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupStaticFields(const Class* orig, Class* copy) + void FixupStaticFields(const mirror::Class* orig, mirror::Class* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static) + void FixupFields(const mirror::Object* orig, mirror::Object* copy, uint32_t ref_offsets, + bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PatchOatCodeAndMethods(const Compiler& compiler) @@ -157,7 +158,7 @@ class ImageWriter { void SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - SafeMap offsets_; + SafeMap offsets_; // oat file with code for this image OatFile* oat_file_; @@ -178,7 +179,7 @@ class ImageWriter { const byte* oat_data_begin_; // DexCaches seen while scanning for fixing up CodeAndDirectMethods - typedef std::set Set; + typedef std::set Set; Set dex_caches_; }; diff --git a/src/indirect_reference_table.cc b/src/indirect_reference_table.cc index 9bb6edc9cb..720380a2eb 100644 --- a/src/indirect_reference_table.cc +++ b/src/indirect_reference_table.cc @@ -40,9 +40,9 @@ IndirectReferenceTable::IndirectReferenceTable(size_t initialCount, CHECK_LE(initialCount, maxCount); CHECK_NE(desiredKind, kSirtOrInvalid); - table_ = reinterpret_cast(malloc(initialCount * sizeof(const Object*))); + table_ = reinterpret_cast(malloc(initialCount * sizeof(const mirror::Object*))); CHECK(table_ != NULL); - memset(table_, 0xd1, initialCount * sizeof(const Object*)); + memset(table_, 0xd1, initialCount * sizeof(const mirror::Object*)); slot_data_ = reinterpret_cast(calloc(initialCount, sizeof(IndirectRefSlot))); CHECK(slot_data_ != NULL); @@ -63,7 +63,7 @@ IndirectReferenceTable::~IndirectReferenceTable() { // Make sure that the entry at "idx" is correctly paired with "iref". bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const { - const Object* obj = table_[idx]; + const mirror::Object* obj = table_[idx]; IndirectRef checkRef = ToIndirectRef(obj, idx); if (checkRef != iref) { LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what @@ -75,7 +75,7 @@ bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int return true; } -IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) { +IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const mirror::Object* obj) { IRTSegmentState prevState; prevState.all = cookie; size_t topIndex = segment_state_.parts.topIndex; @@ -101,7 +101,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) { } DCHECK_GT(newSize, alloc_entries_); - table_ = reinterpret_cast(realloc(table_, newSize * sizeof(const Object*))); + table_ = reinterpret_cast(realloc(table_, newSize * sizeof(const mirror::Object*))); slot_data_ = reinterpret_cast(realloc(slot_data_, newSize * sizeof(IndirectRefSlot))); if (table_ == NULL || slot_data_ == NULL) { @@ -126,7 +126,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) { if (numHoles > 0) { DCHECK_GT(topIndex, 1U); // Find the first hole; likely to be near the end of the list. - const Object** pScan = &table_[topIndex - 1]; + const mirror::Object** pScan = &table_[topIndex - 1]; DCHECK(*pScan != NULL); while (*--pScan != NULL) { DCHECK_GE(pScan, table_ + prevState.parts.topIndex); @@ -194,7 +194,7 @@ bool IndirectReferenceTable::GetChecked(IndirectRef iref) const { return true; } -static int Find(Object* direct_pointer, int bottomIndex, int topIndex, const Object** table) { +static int Find(mirror::Object* direct_pointer, int bottomIndex, int topIndex, const mirror::Object** table) { for (int i = bottomIndex; i < topIndex; ++i) { if (table[i] == direct_pointer) { return i; @@ -203,7 +203,7 @@ static int Find(Object* direct_pointer, int bottomIndex, int topIndex, const Obj return -1; } -bool IndirectReferenceTable::ContainsDirectPointer(Object* direct_pointer) const { +bool IndirectReferenceTable::ContainsDirectPointer(mirror::Object* direct_pointer) const { return Find(direct_pointer, 0, segment_state_.parts.topIndex, table_) != -1; } @@ -234,7 +234,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { return true; } if (GetIndirectRefKind(iref) == kSirtOrInvalid && vm->work_around_app_jni_bugs) { - Object* direct_pointer = reinterpret_cast(iref); + mirror::Object* direct_pointer = reinterpret_cast(iref); idx = Find(direct_pointer, bottomIndex, topIndex, table_); if (idx == -1) { LOG(WARNING) << "trying to work around app JNI bugs, but didn't find " << iref << " in table!"; @@ -308,7 +308,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { return true; } -void IndirectReferenceTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) { typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto for (It it = begin(), end = this->end(); it != end; ++it) { visitor(**it, arg); @@ -317,7 +317,7 @@ void IndirectReferenceTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) { void IndirectReferenceTable::Dump(std::ostream& os) const { os << kind_ << " table dump:\n"; - std::vector entries(table_, table_ + Capacity()); + std::vector entries(table_, table_ + Capacity()); // Remove NULLs. for (int i = entries.size() - 1; i >= 0; --i) { if (entries[i] == NULL) { diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h index cd358e93df..e09043dba7 100644 --- a/src/indirect_reference_table.h +++ b/src/indirect_reference_table.h @@ -23,11 +23,13 @@ #include #include "base/logging.h" -#include "heap.h" +#include "offsets.h" +#include "root_visitor.h" namespace art { - +namespace mirror { class Object; +} // namespace mirror /* * Maintain a table of indirect references. Used for local/global JNI @@ -98,8 +100,8 @@ class Object; typedef void* IndirectRef; // Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress(). -static Object* const kInvalidIndirectRefObject = reinterpret_cast(0xdead4321); -static Object* const kClearedJniWeakGlobal = reinterpret_cast(0xdead1234); +static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast(0xdead4321); +static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast(0xdead1234); /* * Indirect reference kind, used as the two low bits of IndirectRef. @@ -128,7 +130,7 @@ static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { static const size_t kIRTPrevCount = 4; struct IndirectRefSlot { uint32_t serial; - const Object* previous[kIRTPrevCount]; + const mirror::Object* previous[kIRTPrevCount]; }; /* use as initial value for "cookie", and when table has only one segment */ @@ -204,7 +206,7 @@ union IRTSegmentState { class IrtIterator { public: - explicit IrtIterator(const Object** table, size_t i, size_t capacity) + explicit IrtIterator(const mirror::Object** table, size_t i, size_t capacity) : table_(table), i_(i), capacity_(capacity) { SkipNullsAndTombstones(); } @@ -215,7 +217,7 @@ class IrtIterator { return *this; } - const Object** operator*() { + const mirror::Object** operator*() { return &table_[i_]; } @@ -231,7 +233,7 @@ class IrtIterator { } } - const Object** table_; + const mirror::Object** table_; size_t i_; size_t capacity_; }; @@ -258,7 +260,7 @@ class IndirectReferenceTable { * Returns NULL if the table is full (max entries reached, or alloc * failed during expansion). */ - IndirectRef Add(uint32_t cookie, const Object* obj) + IndirectRef Add(uint32_t cookie, const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* @@ -266,7 +268,7 @@ class IndirectReferenceTable { * * Returns kInvalidIndirectRefObject if iref is invalid. */ - const Object* Get(IndirectRef iref) const { + const mirror::Object* Get(IndirectRef iref) const { if (!GetChecked(iref)) { return kInvalidIndirectRefObject; } @@ -274,7 +276,7 @@ class IndirectReferenceTable { } // TODO: remove when we remove work_around_app_jni_bugs support. - bool ContainsDirectPointer(Object* direct_pointer) const; + bool ContainsDirectPointer(mirror::Object* direct_pointer) const; /* * Remove an existing entry. @@ -307,7 +309,7 @@ class IndirectReferenceTable { return iterator(table_, Capacity(), Capacity()); } - void VisitRoots(Heap::RootVisitor* visitor, void* arg); + void VisitRoots(RootVisitor* visitor, void* arg); uint32_t GetSegmentState() const { return segment_state_.all; @@ -334,7 +336,7 @@ class IndirectReferenceTable { * The object pointer itself is subject to relocation in some GC * implementations, so we shouldn't really be using it here. */ - IndirectRef ToIndirectRef(const Object* /*o*/, uint32_t tableIndex) const { + IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const { DCHECK_LT(tableIndex, 65536U); uint32_t serialChunk = slot_data_[tableIndex].serial; uint32_t uref = serialChunk << 20 | (tableIndex << 2) | kind_; @@ -347,7 +349,7 @@ class IndirectReferenceTable { * We advance the serial number, invalidating any outstanding references to * this slot. */ - void UpdateSlotAdd(const Object* obj, int slot) { + void UpdateSlotAdd(const mirror::Object* obj, int slot) { if (slot_data_ != NULL) { IndirectRefSlot* pSlot = &slot_data_[slot]; pSlot->serial++; @@ -363,7 +365,7 @@ class IndirectReferenceTable { IRTSegmentState segment_state_; /* bottom of the stack */ - const Object** table_; + const mirror::Object** table_; /* bit mask, ORed into all irefs */ IndirectRefKind kind_; /* extended debugging info */ diff --git a/src/indirect_reference_table_test.cc b/src/indirect_reference_table_test.cc index b5a05ece2b..bd2890c497 100644 --- a/src/indirect_reference_table_test.cc +++ b/src/indirect_reference_table_test.cc @@ -47,15 +47,15 @@ TEST_F(IndirectReferenceTableTest, BasicTest) { static const size_t kTableMax = 20; IndirectReferenceTable irt(kTableInitial, kTableMax, kGlobal); - Class* c = class_linker_->FindSystemClass("Ljava/lang/Object;"); + mirror::Class* c = class_linker_->FindSystemClass("Ljava/lang/Object;"); ASSERT_TRUE(c != NULL); - Object* obj0 = c->AllocObject(soa.Self()); + mirror::Object* obj0 = c->AllocObject(soa.Self()); ASSERT_TRUE(obj0 != NULL); - Object* obj1 = c->AllocObject(soa.Self()); + mirror::Object* obj1 = c->AllocObject(soa.Self()); ASSERT_TRUE(obj1 != NULL); - Object* obj2 = c->AllocObject(soa.Self()); + mirror::Object* obj2 = c->AllocObject(soa.Self()); ASSERT_TRUE(obj2 != NULL); - Object* obj3 = c->AllocObject(soa.Self()); + mirror::Object* obj3 = c->AllocObject(soa.Self()); ASSERT_TRUE(obj3 != NULL); const uint32_t cookie = IRT_FIRST_SEGMENT; diff --git a/src/instrumentation.cc b/src/instrumentation.cc index 065758d16e..e3d4d28d86 100644 --- a/src/instrumentation.cc +++ b/src/instrumentation.cc @@ -21,7 +21,10 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "debugger.h" -#include "dex_cache.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #if !defined(ART_USE_LLVM_COMPILER) #include "oat/runtime/oat_support_entrypoints.h" #endif @@ -34,18 +37,18 @@ namespace art { -static bool InstallStubsClassVisitor(Class* klass, void*) +static bool InstallStubsClassVisitor(mirror::Class* klass, void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - AbstractMethod* method = klass->GetDirectMethod(i); + mirror::AbstractMethod* method = klass->GetDirectMethod(i); if (instrumentation->GetSavedCodeFromMap(method) == NULL) { instrumentation->SaveAndUpdateCode(method); } } for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - AbstractMethod* method = klass->GetVirtualMethod(i); + mirror::AbstractMethod* method = klass->GetVirtualMethod(i); if (instrumentation->GetSavedCodeFromMap(method) == NULL) { instrumentation->SaveAndUpdateCode(method); } @@ -53,18 +56,18 @@ static bool InstallStubsClassVisitor(Class* klass, void*) return true; } -static bool UninstallStubsClassVisitor(Class* klass, void*) +static bool UninstallStubsClassVisitor(mirror::Class* klass, void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - AbstractMethod* method = klass->GetDirectMethod(i); + mirror::AbstractMethod* method = klass->GetDirectMethod(i); if (instrumentation->GetSavedCodeFromMap(method) != NULL) { instrumentation->ResetSavedCode(method); } } for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - AbstractMethod* method = klass->GetVirtualMethod(i); + mirror::AbstractMethod* method = klass->GetVirtualMethod(i); if (instrumentation->GetSavedCodeFromMap(method) != NULL) { instrumentation->ResetSavedCode(method); } @@ -83,7 +86,7 @@ void InstrumentationInstallStack(Thread* self, void* arg) if (GetCurrentQuickFrame() == NULL) { return true; // Ignore shadow frames. } - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m == NULL) { return true; // Ignore upcalls. } @@ -125,7 +128,7 @@ static void InstrumentationRestoreStack(Thread* self, void*) if (self_->IsInstrumentationStackEmpty()) { return false; // Stop. } - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m == NULL) { return true; // Ignore upcalls. } @@ -171,16 +174,16 @@ void Instrumentation::UninstallStubs() { Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, NULL); } -void Instrumentation::AddSavedCodeToMap(const AbstractMethod* method, const void* code) { +void Instrumentation::AddSavedCodeToMap(const mirror::AbstractMethod* method, const void* code) { saved_code_map_.Put(method, code); } -void Instrumentation::RemoveSavedCodeFromMap(const AbstractMethod* method) { +void Instrumentation::RemoveSavedCodeFromMap(const mirror::AbstractMethod* method) { saved_code_map_.erase(method); } -const void* Instrumentation::GetSavedCodeFromMap(const AbstractMethod* method) { - typedef SafeMap::const_iterator It; // TODO: C++0x auto +const void* Instrumentation::GetSavedCodeFromMap(const mirror::AbstractMethod* method) { + typedef SafeMap::const_iterator It; // TODO: C++0x auto It it = saved_code_map_.find(method); if (it == saved_code_map_.end()) { return NULL; @@ -189,7 +192,7 @@ const void* Instrumentation::GetSavedCodeFromMap(const AbstractMethod* method) { } } -void Instrumentation::SaveAndUpdateCode(AbstractMethod* method) { +void Instrumentation::SaveAndUpdateCode(mirror::AbstractMethod* method) { #if defined(ART_USE_LLVM_COMPILER) UNUSED(method); UNIMPLEMENTED(FATAL); @@ -201,7 +204,7 @@ void Instrumentation::SaveAndUpdateCode(AbstractMethod* method) { #endif } -void Instrumentation::ResetSavedCode(AbstractMethod* method) { +void Instrumentation::ResetSavedCode(mirror::AbstractMethod* method) { CHECK(GetSavedCodeFromMap(method) != NULL); method->SetCode(GetSavedCodeFromMap(method)); RemoveSavedCodeFromMap(method); @@ -223,7 +226,7 @@ void Instrumentation::RemoveTrace() { uint32_t InstrumentationMethodUnwindFromCode(Thread* self) { Trace* trace = Runtime::Current()->GetInstrumentation()->GetTrace(); InstrumentationStackFrame instrumentation_frame = self->PopInstrumentationStackFrame(); - AbstractMethod* method = instrumentation_frame.method_; + mirror::AbstractMethod* method = instrumentation_frame.method_; uint32_t lr = instrumentation_frame.return_pc_; trace->LogMethodTraceEvent(self, method, Trace::kMethodTraceUnwind); diff --git a/src/instrumentation.h b/src/instrumentation.h index 00060ce168..fb49bf8208 100644 --- a/src/instrumentation.h +++ b/src/instrumentation.h @@ -17,29 +17,27 @@ #ifndef ART_SRC_INSTRUMENTATION_H_ #define ART_SRC_INSTRUMENTATION_H_ -#include -#include -#include - #include "base/macros.h" -#include "globals.h" #include "safe_map.h" -#include "trace.h" -#include "UniquePtr.h" + +#include namespace art { +namespace mirror { class AbstractMethod; +} class Thread; +class Trace; uint32_t InstrumentationMethodUnwindFromCode(Thread* self); struct InstrumentationStackFrame { InstrumentationStackFrame() : method_(NULL), return_pc_(0), frame_id_(0) {} - InstrumentationStackFrame(AbstractMethod* method, uintptr_t return_pc, size_t frame_id) + InstrumentationStackFrame(mirror::AbstractMethod* method, uintptr_t return_pc, size_t frame_id) : method_(method), return_pc_(return_pc), frame_id_(frame_id) { } - AbstractMethod* method_; + mirror::AbstractMethod* method_; uintptr_t return_pc_; size_t frame_id_; }; @@ -55,20 +53,20 @@ class Instrumentation { // Restores original code for each method and fixes the return values of each thread's stack. void UninstallStubs() LOCKS_EXCLUDED(Locks::thread_list_lock_); - const void* GetSavedCodeFromMap(const AbstractMethod* method); - void SaveAndUpdateCode(AbstractMethod* method); - void ResetSavedCode(AbstractMethod* method); + const void* GetSavedCodeFromMap(const mirror::AbstractMethod* method); + void SaveAndUpdateCode(mirror::AbstractMethod* method); + void ResetSavedCode(mirror::AbstractMethod* method); Trace* GetTrace() const; void SetTrace(Trace* trace); void RemoveTrace(); private: - void AddSavedCodeToMap(const AbstractMethod* method, const void* code); - void RemoveSavedCodeFromMap(const AbstractMethod* method); + void AddSavedCodeToMap(const mirror::AbstractMethod* method, const void* code); + void RemoveSavedCodeFromMap(const mirror::AbstractMethod* method); // Maps a method to its original code pointer. - SafeMap saved_code_map_; + SafeMap saved_code_map_; Trace* trace_; diff --git a/src/intern_table.cc b/src/intern_table.cc index 817ce1e5e1..fa3c0753a9 100644 --- a/src/intern_table.cc +++ b/src/intern_table.cc @@ -16,6 +16,8 @@ #include "intern_table.h" +#include "mirror/string.h" +#include "thread.h" #include "UniquePtr.h" #include "utf.h" @@ -36,7 +38,7 @@ void InternTable::DumpForSigQuit(std::ostream& os) const { << image_strong_interns_.size() << " image strong\n"; } -void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void InternTable::VisitRoots(RootVisitor* visitor, void* arg) { MutexLock mu(Thread::Current(), intern_table_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) { @@ -46,11 +48,11 @@ void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) { // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots. } -String* InternTable::Lookup(Table& table, String* s, uint32_t hash_code) { +mirror::String* InternTable::Lookup(Table& table, mirror::String* s, uint32_t hash_code) { intern_table_lock_.AssertHeld(Thread::Current()); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = table.find(hash_code), end = table.end(); it != end; ++it) { - String* existing_string = it->second; + mirror::String* existing_string = it->second; if (existing_string->Equals(s)) { return existing_string; } @@ -58,18 +60,18 @@ String* InternTable::Lookup(Table& table, String* s, uint32_t hash_code) { return NULL; } -String* InternTable::Insert(Table& table, String* s, uint32_t hash_code) { +mirror::String* InternTable::Insert(Table& table, mirror::String* s, uint32_t hash_code) { intern_table_lock_.AssertHeld(Thread::Current()); table.insert(std::make_pair(hash_code, s)); return s; } -void InternTable::RegisterStrong(String* s) { +void InternTable::RegisterStrong(mirror::String* s) { MutexLock mu(Thread::Current(), intern_table_lock_); Insert(image_strong_interns_, s, s->GetHashCode()); } -void InternTable::Remove(Table& table, const String* s, uint32_t hash_code) { +void InternTable::Remove(Table& table, const mirror::String* s, uint32_t hash_code) { intern_table_lock_.AssertHeld(Thread::Current()); typedef Table::iterator It; // TODO: C++0x auto for (It it = table.find(hash_code), end = table.end(); it != end; ++it) { @@ -80,7 +82,7 @@ void InternTable::Remove(Table& table, const String* s, uint32_t hash_code) { } } -String* InternTable::Insert(String* s, bool is_strong) { +mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) { MutexLock mu(Thread::Current(), intern_table_lock_); DCHECK(s != NULL); @@ -88,12 +90,12 @@ String* InternTable::Insert(String* s, bool is_strong) { if (is_strong) { // Check the strong table for a match. - String* strong = Lookup(strong_interns_, s, hash_code); + mirror::String* strong = Lookup(strong_interns_, s, hash_code); if (strong != NULL) { return strong; } // Check the image table for a match. - String* image = Lookup(image_strong_interns_, s, hash_code); + mirror::String* image = Lookup(image_strong_interns_, s, hash_code); if (image != NULL) { return image; } @@ -102,7 +104,7 @@ String* InternTable::Insert(String* s, bool is_strong) { Dirty(); // There is no match in the strong table, check the weak table. - String* weak = Lookup(weak_interns_, s, hash_code); + mirror::String* weak = Lookup(weak_interns_, s, hash_code); if (weak != NULL) { // A match was found in the weak table. Promote to the strong table. Remove(weak_interns_, weak, hash_code); @@ -114,17 +116,17 @@ String* InternTable::Insert(String* s, bool is_strong) { } // Check the strong table for a match. - String* strong = Lookup(strong_interns_, s, hash_code); + mirror::String* strong = Lookup(strong_interns_, s, hash_code); if (strong != NULL) { return strong; } // Check the image table for a match. - String* image = Lookup(image_strong_interns_, s, hash_code); + mirror::String* image = Lookup(image_strong_interns_, s, hash_code); if (image != NULL) { return image; } // Check the weak table for a match. - String* weak = Lookup(weak_interns_, s, hash_code); + mirror::String* weak = Lookup(weak_interns_, s, hash_code); if (weak != NULL) { return weak; } @@ -132,39 +134,39 @@ String* InternTable::Insert(String* s, bool is_strong) { return Insert(weak_interns_, s, hash_code); } -String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) { - return InternStrong(String::AllocFromModifiedUtf8(Thread::Current(), utf16_length, utf8_data)); +mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) { + return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf16_length, utf8_data)); } -String* InternTable::InternStrong(const char* utf8_data) { - return InternStrong(String::AllocFromModifiedUtf8(Thread::Current(), utf8_data)); +mirror::String* InternTable::InternStrong(const char* utf8_data) { + return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data)); } -String* InternTable::InternStrong(String* s) { +mirror::String* InternTable::InternStrong(mirror::String* s) { if (s == NULL) { return NULL; } return Insert(s, true); } -String* InternTable::InternWeak(String* s) { +mirror::String* InternTable::InternWeak(mirror::String* s) { if (s == NULL) { return NULL; } return Insert(s, false); } -bool InternTable::ContainsWeak(String* s) { +bool InternTable::ContainsWeak(mirror::String* s) { MutexLock mu(Thread::Current(), intern_table_lock_); - const String* found = Lookup(weak_interns_, s, s->GetHashCode()); + const mirror::String* found = Lookup(weak_interns_, s, s->GetHashCode()); return found == s; } -void InternTable::SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg) { +void InternTable::SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) { MutexLock mu(Thread::Current(), intern_table_lock_); typedef Table::iterator It; // TODO: C++0x auto for (It it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) { - Object* object = it->second; + mirror::Object* object = it->second; if (!is_marked(object, arg)) { weak_interns_.erase(it++); } else { diff --git a/src/intern_table.h b/src/intern_table.h index 06a2b89e19..3018317e0f 100644 --- a/src/intern_table.h +++ b/src/intern_table.h @@ -17,14 +17,15 @@ #ifndef ART_SRC_INTERN_TABLE_H_ #define ART_SRC_INTERN_TABLE_H_ -#include - #include "base/mutex.h" -#include "heap.h" -#include "object.h" -#include "safe_map.h" +#include "root_visitor.h" + +#include namespace art { +namespace mirror { +class String; +} // namespace mirror /** * Used to intern strings. @@ -41,31 +42,31 @@ class InternTable { InternTable(); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(int32_t utf16_length, const char* utf8_data) + mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(const char* utf8_data) + mirror::String* InternStrong(const char* utf8_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'weak' table. (See above.) - String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Register a String trusting that it is safe to intern. // Used when reinitializing InternTable from an image. - void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RegisterStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg) + void SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t Size() const; - void VisitRoots(Heap::RootVisitor* visitor, void* arg); + void VisitRoots(RootVisitor* visitor, void* arg); void DumpForSigQuit(std::ostream& os) const; @@ -75,15 +76,15 @@ class InternTable { } private: - typedef std::multimap Table; + typedef std::multimap Table; - String* Insert(String* s, bool is_strong) + mirror::String* Insert(mirror::String* s, bool is_strong) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - String* Lookup(Table& table, String* s, uint32_t hash_code) + mirror::String* Lookup(Table& table, mirror::String* s, uint32_t hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - String* Insert(Table& table, String* s, uint32_t hash_code); - void Remove(Table& table, const String* s, uint32_t hash_code); + mirror::String* Insert(Table& table, mirror::String* s, uint32_t hash_code); + void Remove(Table& table, const mirror::String* s, uint32_t hash_code); mutable Mutex intern_table_lock_; bool is_dirty_; diff --git a/src/intern_table_test.cc b/src/intern_table_test.cc index ee9165e69a..f6b040def2 100644 --- a/src/intern_table_test.cc +++ b/src/intern_table_test.cc @@ -17,7 +17,7 @@ #include "intern_table.h" #include "common_test.h" -#include "object.h" +#include "mirror/object.h" #include "sirt_ref.h" namespace art { @@ -27,10 +27,10 @@ class InternTableTest : public CommonTest {}; TEST_F(InternTableTest, Intern) { ScopedObjectAccess soa(Thread::Current()); InternTable intern_table; - SirtRef foo_1(soa.Self(), intern_table.InternStrong(3, "foo")); - SirtRef foo_2(soa.Self(), intern_table.InternStrong(3, "foo")); - SirtRef foo_3(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo")); - SirtRef bar(soa.Self(), intern_table.InternStrong(3, "bar")); + SirtRef foo_1(soa.Self(), intern_table.InternStrong(3, "foo")); + SirtRef foo_2(soa.Self(), intern_table.InternStrong(3, "foo")); + SirtRef foo_3(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")); + SirtRef bar(soa.Self(), intern_table.InternStrong(3, "bar")); EXPECT_TRUE(foo_1->Equals("foo")); EXPECT_TRUE(foo_2->Equals("foo")); EXPECT_TRUE(foo_3->Equals("foo")); @@ -47,7 +47,7 @@ TEST_F(InternTableTest, Size) { InternTable t; EXPECT_EQ(0U, t.Size()); t.InternStrong(3, "foo"); - SirtRef foo(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo")); + SirtRef foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")); t.InternWeak(foo.get()); EXPECT_EQ(1U, t.Size()); t.InternStrong(3, "bar"); @@ -56,9 +56,9 @@ TEST_F(InternTableTest, Size) { class TestPredicate { public: - bool IsMarked(const Object* s) const { + bool IsMarked(const mirror::Object* s) const { bool erased = false; - typedef std::vector::iterator It; // TODO: C++0x auto + typedef std::vector::iterator It; // TODO: C++0x auto for (It it = expected_.begin(), end = expected_.end(); it != end; ++it) { if (*it == s) { expected_.erase(it); @@ -70,7 +70,7 @@ class TestPredicate { return false; } - void Expect(const String* s) { + void Expect(const mirror::String* s) { expected_.push_back(s); } @@ -79,10 +79,10 @@ class TestPredicate { } private: - mutable std::vector expected_; + mutable std::vector expected_; }; -bool IsMarked(const Object* object, void* arg) { +bool IsMarked(const mirror::Object* object, void* arg) { return reinterpret_cast(arg)->IsMarked(object); } @@ -91,10 +91,12 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { InternTable t; t.InternStrong(3, "foo"); t.InternStrong(3, "bar"); - SirtRef hello(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "hello")); - SirtRef world(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "world")); - SirtRef s0(soa.Self(), t.InternWeak(hello.get())); - SirtRef s1(soa.Self(), t.InternWeak(world.get())); + SirtRef hello(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello")); + SirtRef world(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), "world")); + SirtRef s0(soa.Self(), t.InternWeak(hello.get())); + SirtRef s1(soa.Self(), t.InternWeak(world.get())); EXPECT_EQ(4U, t.Size()); @@ -110,7 +112,8 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { EXPECT_EQ(2U, t.Size()); // Just check that we didn't corrupt the map. - SirtRef still_here(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "still here")); + SirtRef still_here(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), "still here")); t.InternWeak(still_here.get()); EXPECT_EQ(3U, t.Size()); } @@ -120,9 +123,9 @@ TEST_F(InternTableTest, ContainsWeak) { { // Strongs are never weak. InternTable t; - SirtRef interned_foo_1(soa.Self(), t.InternStrong(3, "foo")); + SirtRef interned_foo_1(soa.Self(), t.InternStrong(3, "foo")); EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get())); - SirtRef interned_foo_2(soa.Self(), t.InternStrong(3, "foo")); + SirtRef interned_foo_2(soa.Self(), t.InternStrong(3, "foo")); EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get())); EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get()); } @@ -130,11 +133,13 @@ TEST_F(InternTableTest, ContainsWeak) { { // Weaks are always weak. InternTable t; - SirtRef foo_1(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo")); - SirtRef foo_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo")); + SirtRef foo_1(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")); + SirtRef foo_2(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")); EXPECT_NE(foo_1.get(), foo_2.get()); - SirtRef interned_foo_1(soa.Self(), t.InternWeak(foo_1.get())); - SirtRef interned_foo_2(soa.Self(), t.InternWeak(foo_2.get())); + SirtRef interned_foo_1(soa.Self(), t.InternWeak(foo_1.get())); + SirtRef interned_foo_2(soa.Self(), t.InternWeak(foo_2.get())); EXPECT_TRUE(t.ContainsWeak(interned_foo_2.get())); EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get()); } @@ -142,10 +147,10 @@ TEST_F(InternTableTest, ContainsWeak) { { // A weak can be promoted to a strong. InternTable t; - SirtRef foo(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo")); - SirtRef interned_foo_1(soa.Self(), t.InternWeak(foo.get())); + SirtRef foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")); + SirtRef interned_foo_1(soa.Self(), t.InternWeak(foo.get())); EXPECT_TRUE(t.ContainsWeak(interned_foo_1.get())); - SirtRef interned_foo_2(soa.Self(), t.InternStrong(3, "foo")); + SirtRef interned_foo_2(soa.Self(), t.InternStrong(3, "foo")); EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get())); EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get()); } @@ -153,10 +158,11 @@ TEST_F(InternTableTest, ContainsWeak) { { // Interning a weak after a strong gets you the strong. InternTable t; - SirtRef interned_foo_1(soa.Self(), t.InternStrong(3, "foo")); + SirtRef interned_foo_1(soa.Self(), t.InternStrong(3, "foo")); EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get())); - SirtRef foo(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo")); - SirtRef interned_foo_2(soa.Self(), t.InternWeak(foo.get())); + SirtRef foo(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")); + SirtRef interned_foo_2(soa.Self(), t.InternWeak(foo.get())); EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get())); EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get()); } diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc index 820348e081..65729c9394 100644 --- a/src/interpreter/interpreter.cc +++ b/src/interpreter/interpreter.cc @@ -19,18 +19,28 @@ #include #include "base/logging.h" +#include "class_linker-inl.h" #include "common_throws.h" #include "debugger.h" #include "dex_instruction.h" +#include "gc/card_table-inl.h" #include "invoke_arg_array_builder.h" #include "nth_caller_visitor.h" -#include "object.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "runtime_support.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "thread.h" +using namespace art::mirror; + namespace art { namespace interpreter { diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h index 6990458934..eee13dc1b3 100644 --- a/src/interpreter/interpreter.h +++ b/src/interpreter/interpreter.h @@ -21,18 +21,20 @@ #include "locks.h" namespace art { - +namespace mirror { class AbstractMethod; +class Object; +} // namespace mirror + union JValue; class MethodHelper; -class Object; class ShadowFrame; class Thread; namespace interpreter { -extern void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* receiver, - JValue* args, JValue* result) +extern void EnterInterpreterFromInvoke(Thread* self, mirror::AbstractMethod* method, + mirror::Object* receiver, JValue* args, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern JValue EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame& shadow_frame, diff --git a/src/invoke_arg_array_builder.h b/src/invoke_arg_array_builder.h index 16eedfe54d..19c42ac9b8 100644 --- a/src/invoke_arg_array_builder.h +++ b/src/invoke_arg_array_builder.h @@ -17,7 +17,7 @@ #ifndef ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_ #define ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_ -#include "object.h" +#include "mirror/object.h" #include "scoped_thread_state_change.h" namespace art { @@ -31,7 +31,7 @@ static inline size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) { } else if (ch == 'L') { // Argument is a reference or an array. The shorty descriptor // does not distinguish between these types. - num_bytes += sizeof(Object*); + num_bytes += sizeof(mirror::Object*); } else { num_bytes += 4; } @@ -78,7 +78,7 @@ class ArgArray { arg_array_[offset].SetF(va_arg(ap, jdouble)); break; case 'L': - arg_array_[offset].SetL(soa.Decode(va_arg(ap, jobject))); + arg_array_[offset].SetL(soa.Decode(va_arg(ap, jobject))); break; case 'D': arg_array_[offset].SetD(va_arg(ap, jdouble)); @@ -113,7 +113,7 @@ class ArgArray { arg_array_[offset].SetF(args[offset].f); break; case 'L': - arg_array_[offset].SetL(soa.Decode(args[offset].l)); + arg_array_[offset].SetL(soa.Decode(args[offset].l)); break; case 'D': arg_array_[offset].SetD(args[offset].d); diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h index 6cac0f678d..71bae087fe 100644 --- a/src/jdwp/jdwp.h +++ b/src/jdwp/jdwp.h @@ -30,8 +30,9 @@ struct iovec; namespace art { - +namespace mirror { class AbstractMethod; +} // namespace mirror class Thread; namespace JDWP { diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc index ba2d8d23af..71e91d4958 100644 --- a/src/jdwp/jdwp_event.cc +++ b/src/jdwp/jdwp_event.cc @@ -28,6 +28,7 @@ #include "jdwp/jdwp_expand_buf.h" #include "jdwp/jdwp_handler.h" #include "jdwp/jdwp_priv.h" +#include "thread.h" /* General notes: diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc index cb1369576f..aa5a8a05d7 100644 --- a/src/jdwp/jdwp_handler.cc +++ b/src/jdwp/jdwp_handler.cc @@ -42,6 +42,9 @@ #include "jdwp/jdwp_event.h" #include "jdwp/jdwp_expand_buf.h" #include "jdwp/jdwp_priv.h" +#include "runtime.h" +#include "thread.h" +#include "UniquePtr.h" namespace art { diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc index 16976d4994..4ed789817f 100644 --- a/src/jni_compiler_test.cc +++ b/src/jni_compiler_test.cc @@ -21,6 +21,11 @@ #include "indirect_reference_table.h" #include "jni_internal.h" #include "mem_map.h" +#include "mirror/class.h" +#include "mirror/class_loader.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/stack_trace_element.h" #include "runtime.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" @@ -43,8 +48,9 @@ class JniCompilerTest : public CommonTest { const char* method_name, const char* method_sig) { ScopedObjectAccess soa(Thread::Current()); // Compile the native method before starting the runtime - Class* c = class_linker_->FindClass("LMyClassNatives;", soa.Decode(class_loader)); - AbstractMethod* method; + mirror::Class* c = class_linker_->FindClass("LMyClassNatives;", + soa.Decode(class_loader)); + mirror::AbstractMethod* method; if (direct) { method = c->FindDirectMethod(method_name, method_sig); } else { @@ -141,7 +147,7 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) { ScopedObjectAccess soa(Thread::Current()); std::string reason; ASSERT_TRUE( - Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode(class_loader_), + Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode(class_loader_), reason)) << reason; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); @@ -155,7 +161,7 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) { ScopedObjectAccess soa(Thread::Current()); std::string reason; ASSERT_TRUE( - Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode(class_loader_), + Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode(class_loader_), reason)) << reason; jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); @@ -548,15 +554,15 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { // Build stack trace jobject internal = Thread::Current()->CreateInternalStackTrace(soa); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal); - ObjectArray* trace_array = - soa.Decode*>(ste_array); + mirror::ObjectArray* trace_array = + soa.Decode*>(ste_array); EXPECT_TRUE(trace_array != NULL); EXPECT_EQ(11, trace_array->GetLength()); // Check stack trace entries have expected values for (int32_t i = 0; i < trace_array->GetLength(); ++i) { EXPECT_EQ(-2, trace_array->Get(i)->GetLineNumber()); - StackTraceElement* ste = trace_array->Get(i); + mirror::StackTraceElement* ste = trace_array->Get(i); EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str()); EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str()); EXPECT_STREQ("fooI", ste->GetMethodName()->ToModifiedUtf8().c_str()); @@ -601,7 +607,7 @@ jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) { // Add 10 local references ScopedObjectAccess soa(env); for (int i = 0; i < 10; i++) { - soa.AddLocalReference(soa.Decode(thisObj)); + soa.AddLocalReference(soa.Decode(thisObj)); } return x+1; } diff --git a/src/jni_internal.cc b/src/jni_internal.cc index 84b5144a3a..0ee4c21ce4 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -27,19 +27,28 @@ #include "base/stl_util.h" #include "base/stringpiece.h" #include "class_linker.h" -#include "class_loader.h" +#include "gc/card_table-inl.h" #include "invoke_arg_array_builder.h" #include "jni.h" -#include "object.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/throwable.h" #include "object_utils.h" #include "runtime.h" #include "safe_map.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "thread.h" +#include "utf.h" #include "UniquePtr.h" #include "well_known_classes.h" +using namespace art::mirror; + namespace art { static const size_t kMonitorsInitial = 32; // Arbitrary. @@ -2533,6 +2542,11 @@ void JNIEnvExt::PopFrame() { stacked_local_ref_cookies.pop_back(); } +Offset JNIEnvExt::SegmentStateOffset() { + return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) + + IndirectReferenceTable::SegmentStateOffset().Int32Value()); +} + // JNI Invocation interface. extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, void** p_env, void* vm_args) { @@ -2860,7 +2874,7 @@ void* JavaVMExt::FindCodeForNativeMethod(AbstractMethod* m) { return native_method; } -void JavaVMExt::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void JavaVMExt::VisitRoots(RootVisitor* visitor, void* arg) { Thread* self = Thread::Current(); { MutexLock mu(self, globals_lock); diff --git a/src/jni_internal.h b/src/jni_internal.h index 6b597ec09e..9b773f3a6d 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -21,9 +21,9 @@ #include "base/macros.h" #include "base/mutex.h" -#include "heap.h" #include "indirect_reference_table.h" #include "reference_table.h" +#include "root_visitor.h" #include "runtime.h" #include @@ -37,12 +37,13 @@ RegisterNativeMethods(env, jni_class_name, gMethods, arraysize(gMethods)) namespace art { - +namespace mirror { +class AbstractMethod; class ClassLoader; class Field; +} union JValue; class Libraries; -class AbstractMethod; class ScopedObjectAccess; class Thread; @@ -54,7 +55,8 @@ void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINat JValue InvokeWithJValues(const ScopedObjectAccess&, jobject obj, jmethodID mid, jvalue* args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -JValue InvokeWithJValues(const ScopedObjectAccess&, Object* receiver, AbstractMethod* m, JValue* args) +JValue InvokeWithJValues(const ScopedObjectAccess&, mirror::Object* receiver, + mirror::AbstractMethod* m, JValue* args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause); @@ -69,14 +71,15 @@ struct JavaVMExt : public JavaVM { * Returns 'true' on success. On failure, sets 'detail' to a * human-readable description of the error. */ - bool LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, std::string& detail) + bool LoadNativeLibrary(const std::string& path, mirror::ClassLoader* class_loader, + std::string& detail) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /** * Returns a pointer to the code for the native method 'm', found * using dlsym(3) on every native library that's been loaded so far. */ - void* FindCodeForNativeMethod(AbstractMethod* m) + void* FindCodeForNativeMethod(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os); @@ -86,7 +89,7 @@ struct JavaVMExt : public JavaVM { void SetCheckJniEnabled(bool enabled); - void VisitRoots(Heap::RootVisitor*, void*); + void VisitRoots(RootVisitor*, void*); Runtime* runtime; @@ -135,10 +138,7 @@ struct JNIEnvExt : public JNIEnv { void PushFrame(int capacity); void PopFrame(); - static Offset SegmentStateOffset() { - return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) + - IndirectReferenceTable::SegmentStateOffset().Int32Value()); - } + static Offset SegmentStateOffset(); static Offset LocalRefCookieOffset() { return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie)); diff --git a/src/jni_internal_test.cc b/src/jni_internal_test.cc index 7201233c05..a945ba6199 100644 --- a/src/jni_internal_test.cc +++ b/src/jni_internal_test.cc @@ -20,6 +20,8 @@ #include #include "common_test.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #include "ScopedLocalRef.h" #include "sirt_ref.h" @@ -70,15 +72,17 @@ class JniInternalTest : public CommonTest { CommonTest::TearDown(); } - AbstractMethod::InvokeStub* DoCompile(AbstractMethod*& method, Object*& receiver, bool is_static, - const char* method_name, const char* method_signature) + mirror::AbstractMethod::InvokeStub* DoCompile(mirror::AbstractMethod*& method, + mirror::Object*& receiver, + bool is_static, const char* method_name, + const char* method_signature) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods"; jobject jclass_loader(LoadDex(class_name)); Thread* self = Thread::Current(); - SirtRef + SirtRef class_loader(self, - ScopedObjectAccessUnchecked(self).Decode(jclass_loader)); + ScopedObjectAccessUnchecked(self).Decode(jclass_loader)); if (is_static) { CompileDirectMethod(class_loader.get(), class_name, method_name, method_signature); } else { @@ -87,7 +91,7 @@ class JniInternalTest : public CommonTest { CompileVirtualMethod(class_loader.get(), class_name, method_name, method_signature); } - Class* c = class_linker_->FindClass(DotToDescriptor(class_name).c_str(), class_loader.get()); + mirror::Class* c = class_linker_->FindClass(DotToDescriptor(class_name).c_str(), class_loader.get()); CHECK(c != NULL); method = is_static ? c->FindDirectMethod(method_name, method_signature) @@ -96,24 +100,25 @@ class JniInternalTest : public CommonTest { receiver = (is_static ? NULL : c->AllocObject(self)); - AbstractMethod::InvokeStub* stub = method->GetInvokeStub(); + mirror::AbstractMethod::InvokeStub* stub = method->GetInvokeStub(); CHECK(stub != NULL); return stub; } void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "nop", "()V"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "nop", "()V"); (*stub)(method, receiver, Thread::Current(), NULL, NULL); } void InvokeIdentityByteMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(B)B"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "identity", "(B)B"); JValue args[1]; JValue result; @@ -141,9 +146,10 @@ class JniInternalTest : public CommonTest { void InvokeIdentityIntMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(I)I"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "identity", "(I)I"); JValue args[1]; JValue result; @@ -171,9 +177,10 @@ class JniInternalTest : public CommonTest { void InvokeIdentityDoubleMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(D)D"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "identity", "(D)D"); JValue args[1]; JValue result; @@ -201,9 +208,10 @@ class JniInternalTest : public CommonTest { void InvokeSumIntIntMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(II)I"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(II)I"); JValue result; result.SetI(-1); @@ -240,9 +248,10 @@ class JniInternalTest : public CommonTest { void InvokeSumIntIntIntMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(III)I"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(III)I"); JValue result; result.SetI(-1); @@ -284,9 +293,10 @@ class JniInternalTest : public CommonTest { void InvokeSumIntIntIntIntMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIII)I"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(IIII)I"); JValue result; result.SetI(-1); @@ -333,9 +343,10 @@ class JniInternalTest : public CommonTest { void InvokeSumIntIntIntIntIntMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIIII)I"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(IIIII)I"); JValue result; result.SetI(-1.0); @@ -387,9 +398,10 @@ class JniInternalTest : public CommonTest { void InvokeSumDoubleDoubleMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DD)D"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(DD)D"); JValue args[2]; JValue result; @@ -427,9 +439,10 @@ class JniInternalTest : public CommonTest { void InvokeSumDoubleDoubleDoubleMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDD)D"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(DDD)D"); JValue args[3]; JValue result; @@ -458,9 +471,10 @@ class JniInternalTest : public CommonTest { void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDD)D"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(DDDD)D"); JValue args[4]; JValue result; @@ -492,9 +506,10 @@ class JniInternalTest : public CommonTest { void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; - Object* receiver; - AbstractMethod::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDDD)D"); + mirror::AbstractMethod* method; + mirror::Object* receiver; + mirror::AbstractMethod::InvokeStub* stub = + DoCompile(method, receiver, is_static, "sum", "(DDDDD)D"); JValue args[5]; JValue result; @@ -1270,7 +1285,7 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) { jobject outer; jobject inner1, inner2; ScopedObjectAccess soa(env_); - Object* inner2_direct_pointer; + mirror::Object* inner2_direct_pointer; { env_->PushLocalFrame(4); outer = env_->NewLocalRef(original); @@ -1279,7 +1294,7 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) { env_->PushLocalFrame(4); inner1 = env_->NewLocalRef(outer); inner2 = env_->NewStringUTF("survivor"); - inner2_direct_pointer = soa.Decode(inner2); + inner2_direct_pointer = soa.Decode(inner2); env_->PopLocalFrame(inner2); } @@ -1393,16 +1408,17 @@ TEST_F(JniInternalTest, DeleteWeakGlobalRef) { TEST_F(JniInternalTest, StaticMainMethod) { ScopedObjectAccess soa(Thread::Current()); jobject jclass_loader = LoadDex("Main"); - SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); + SirtRef + class_loader(soa.Self(), soa.Decode(jclass_loader)); CompileDirectMethod(class_loader.get(), "Main", "main", "([Ljava/lang/String;)V"); - Class* klass = class_linker_->FindClass("LMain;", class_loader.get()); + mirror::Class* klass = class_linker_->FindClass("LMain;", class_loader.get()); ASSERT_TRUE(klass != NULL); - AbstractMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V"); + mirror::AbstractMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V"); ASSERT_TRUE(method != NULL); - AbstractMethod::InvokeStub* stub = method->GetInvokeStub(); + mirror::AbstractMethod::InvokeStub* stub = method->GetInvokeStub(); JValue args[1]; args[0].SetL(NULL); diff --git a/src/jobject_comparator.cc b/src/jobject_comparator.cc index edd072738a..738a186874 100644 --- a/src/jobject_comparator.cc +++ b/src/jobject_comparator.cc @@ -16,7 +16,9 @@ #include "jobject_comparator.h" -#include "object.h" +#include "mirror/array-inl.h" +#include "mirror/class.h" +#include "mirror/object-inl.h" #include "scoped_thread_state_change.h" namespace art { @@ -29,8 +31,8 @@ bool JobjectComparator::operator()(jobject jobj1, jobject jobj2) const { return false; } ScopedObjectAccess soa(Thread::Current()); - Object* obj1 = soa.Decode(jobj1); - Object* obj2 = soa.Decode(jobj2); + mirror::Object* obj1 = soa.Decode(jobj1); + mirror::Object* obj2 = soa.Decode(jobj2); if (obj1 == NULL) { return true; } else if (obj2 == NULL) { diff --git a/src/jvalue.h b/src/jvalue.h index a7a17956bf..fa85937dcd 100644 --- a/src/jvalue.h +++ b/src/jvalue.h @@ -20,8 +20,9 @@ #include "base/macros.h" namespace art { - +namespace mirror { class Object; +} // namespace mirror union PACKED(4) JValue { // We default initialize JValue instances to all-zeros. @@ -47,8 +48,8 @@ union PACKED(4) JValue { int64_t GetJ() const { return j; } void SetJ(int64_t new_j) { j = new_j; } - Object* GetL() const { return l; } - void SetL(Object* new_l) { l = new_l; } + mirror::Object* GetL() const { return l; } + void SetL(mirror::Object* new_l) { l = new_l; } int16_t GetS() const { return s; } void SetS(int16_t new_s) { @@ -67,7 +68,7 @@ union PACKED(4) JValue { int64_t j; float f; double d; - Object* l; + mirror::Object* l; }; } // namespace art diff --git a/src/mirror/abstract_method-inl.h b/src/mirror/abstract_method-inl.h new file mode 100644 index 0000000000..efb7c03300 --- /dev/null +++ b/src/mirror/abstract_method-inl.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_METHOD_INL_H_ +#define ART_SRC_MIRROR_METHOD_INL_H_ + +#include "abstract_method.h" + +#include "array.h" +#include "dex_file.h" +#include "runtime.h" + +namespace art { +namespace mirror { + +inline Class* AbstractMethod::GetDeclaringClass() const { + Class* result = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, declaring_class_), false); + DCHECK(result != NULL) << this; + DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this; + return result; +} + +inline void AbstractMethod::SetDeclaringClass(Class *new_declaring_class) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, declaring_class_), new_declaring_class, false); +} + +inline uint32_t AbstractMethod::GetAccessFlags() const { + DCHECK(GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, access_flags_), false); +} + +inline uint16_t AbstractMethod::GetMethodIndex() const { + DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_), false); +} + +inline uint32_t AbstractMethod::GetDexMethodIndex() const { + DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_dex_index_), false); +} + +inline uint32_t AbstractMethod::GetCodeSize() const { + DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this); + uintptr_t code = reinterpret_cast(GetCode()); + if (code == 0) { + return 0; + } + // TODO: make this Thumb2 specific + code &= ~0x1; + return reinterpret_cast(code)[-1]; +} + +inline bool AbstractMethod::CheckIncompatibleClassChange(InvokeType type) { + switch (type) { + case kStatic: + return !IsStatic(); + case kDirect: + return !IsDirect() || IsStatic(); + case kVirtual: { + Class* methods_class = GetDeclaringClass(); + return IsDirect() || (methods_class->IsInterface() && !IsMiranda()); + } + case kSuper: + return false; // TODO: appropriate checks for call to super class. + case kInterface: { + Class* methods_class = GetDeclaringClass(); + return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass()); + } + default: + LOG(FATAL) << "Unreachable - invocation type: " << type; + return true; + } +} + +inline void AbstractMethod::AssertPcIsWithinCode(uintptr_t pc) const { + if (!kIsDebugBuild) { + return; + } + if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) { + return; + } + Runtime* runtime = Runtime::Current(); + if (GetCode() == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { + return; + } + DCHECK(IsWithinCode(pc)) + << PrettyMethod(this) + << " pc=" << std::hex << pc + << " code=" << GetCode() + << " size=" << GetCodeSize(); +} + +inline uint32_t AbstractMethod::GetOatCodeOffset() const { + DCHECK(!Runtime::Current()->IsStarted()); + return reinterpret_cast(GetCode()); +} + +inline void AbstractMethod::SetOatCodeOffset(uint32_t code_offset) { + DCHECK(!Runtime::Current()->IsStarted()); + SetCode(reinterpret_cast(code_offset)); +} + +inline uint32_t AbstractMethod::GetOatMappingTableOffset() const { + DCHECK(!Runtime::Current()->IsStarted()); + return reinterpret_cast(GetMappingTableRaw()); +} + +inline void AbstractMethod::SetOatMappingTableOffset(uint32_t mapping_table_offset) { + DCHECK(!Runtime::Current()->IsStarted()); + SetMappingTable(reinterpret_cast(mapping_table_offset)); +} + +inline uint32_t AbstractMethod::GetOatVmapTableOffset() const { + DCHECK(!Runtime::Current()->IsStarted()); + return reinterpret_cast(GetVmapTableRaw()); +} + +inline void AbstractMethod::SetOatVmapTableOffset(uint32_t vmap_table_offset) { + DCHECK(!Runtime::Current()->IsStarted()); + SetVmapTable(reinterpret_cast(vmap_table_offset)); +} + +inline void AbstractMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) { + DCHECK(!Runtime::Current()->IsStarted()); + SetNativeGcMap(reinterpret_cast(gc_map_offset)); +} + +inline uint32_t AbstractMethod::GetOatNativeGcMapOffset() const { + DCHECK(!Runtime::Current()->IsStarted()); + return reinterpret_cast(GetNativeGcMap()); +} + +inline uint32_t AbstractMethod::GetOatInvokeStubOffset() const { + DCHECK(!Runtime::Current()->IsStarted()); + return reinterpret_cast(GetInvokeStub()); +} + +inline void AbstractMethod::SetOatInvokeStubOffset(uint32_t invoke_stub_offset) { + DCHECK(!Runtime::Current()->IsStarted()); + SetInvokeStub(reinterpret_cast(invoke_stub_offset)); +} + +inline bool AbstractMethod::IsRuntimeMethod() const { + return GetDexMethodIndex() == DexFile::kDexNoIndex16; +} + +inline bool AbstractMethod::IsCalleeSaveMethod() const { + if (!IsRuntimeMethod()) { + return false; + } + Runtime* runtime = Runtime::Current(); + bool result = false; + for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) { + if (this == runtime->GetCalleeSaveMethod(Runtime::CalleeSaveType(i))) { + result = true; + break; + } + } + return result; +} + +inline bool AbstractMethod::IsResolutionMethod() const { + bool result = this == Runtime::Current()->GetResolutionMethod(); + // Check that if we do think it is phony it looks like the resolution method. + DCHECK(!result || IsRuntimeMethod()); + return result; +} +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_METHOD_INL_H_ diff --git a/src/mirror/abstract_method.cc b/src/mirror/abstract_method.cc new file mode 100644 index 0000000000..4641941ef9 --- /dev/null +++ b/src/mirror/abstract_method.cc @@ -0,0 +1,376 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "abstract_method.h" + +#include "abstract_method-inl.h" +#include "class-inl.h" +#include "base/stringpiece.h" +#include "gc/card_table-inl.h" +#include "interpreter/interpreter.h" +#include "jni_internal.h" +#include "object-inl.h" +#include "object_array.h" +#include "object_array-inl.h" +#include "string.h" +#include "object_utils.h" + +namespace art { +namespace mirror { + +// TODO: get global references for these +Class* AbstractMethod::java_lang_reflect_Constructor_ = NULL; +Class* AbstractMethod::java_lang_reflect_Method_ = NULL; + +InvokeType AbstractMethod::GetInvokeType() const { + // TODO: kSuper? + if (GetDeclaringClass()->IsInterface()) { + return kInterface; + } else if (IsStatic()) { + return kStatic; + } else if (IsDirect()) { + return kDirect; + } else { + return kVirtual; + } +} + +void AbstractMethod::SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method) { + CHECK(java_lang_reflect_Constructor_ == NULL); + CHECK(java_lang_reflect_Constructor != NULL); + java_lang_reflect_Constructor_ = java_lang_reflect_Constructor; + + CHECK(java_lang_reflect_Method_ == NULL); + CHECK(java_lang_reflect_Method != NULL); + java_lang_reflect_Method_ = java_lang_reflect_Method; +} + +void AbstractMethod::ResetClasses() { + CHECK(java_lang_reflect_Constructor_ != NULL); + java_lang_reflect_Constructor_ = NULL; + + CHECK(java_lang_reflect_Method_ != NULL); + java_lang_reflect_Method_ = NULL; +} + +ObjectArray* AbstractMethod::GetDexCacheStrings() const { + return GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_strings_), false); +} + +void AbstractMethod::SetDexCacheStrings(ObjectArray* new_dex_cache_strings) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_strings_), + new_dex_cache_strings, false); +} + +ObjectArray* AbstractMethod::GetDexCacheResolvedMethods() const { + return GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_methods_), false); +} + +void AbstractMethod::SetDexCacheResolvedMethods(ObjectArray* new_dex_cache_methods) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_methods_), + new_dex_cache_methods, false); +} + +ObjectArray* AbstractMethod::GetDexCacheResolvedTypes() const { + return GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_types_), false); +} + +void AbstractMethod::SetDexCacheResolvedTypes(ObjectArray* new_dex_cache_classes) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_types_), + new_dex_cache_classes, false); +} + +ObjectArray* AbstractMethod::GetDexCacheInitializedStaticStorage() const { + return GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_initialized_static_storage_), + false); +} + +void AbstractMethod::SetDexCacheInitializedStaticStorage(ObjectArray* new_value) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_initialized_static_storage_), + new_value, false); +} + +size_t AbstractMethod::NumArgRegisters(const StringPiece& shorty) { + CHECK_LE(1, shorty.length()); + uint32_t num_registers = 0; + for (int i = 1; i < shorty.length(); ++i) { + char ch = shorty[i]; + if (ch == 'D' || ch == 'J') { + num_registers += 2; + } else { + num_registers += 1; + } + } + return num_registers; +} + +bool AbstractMethod::IsProxyMethod() const { + return GetDeclaringClass()->IsProxyClass(); +} + +AbstractMethod* AbstractMethod::FindOverriddenMethod() const { + if (IsStatic()) { + return NULL; + } + Class* declaring_class = GetDeclaringClass(); + Class* super_class = declaring_class->GetSuperClass(); + uint16_t method_index = GetMethodIndex(); + ObjectArray* super_class_vtable = super_class->GetVTable(); + AbstractMethod* result = NULL; + // Did this method override a super class method? If so load the result from the super class' + // vtable + if (super_class_vtable != NULL && method_index < super_class_vtable->GetLength()) { + result = super_class_vtable->Get(method_index); + } else { + // Method didn't override superclass method so search interfaces + if (IsProxyMethod()) { + result = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex()); + CHECK_EQ(result, + Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this)); + } else { + MethodHelper mh(this); + MethodHelper interface_mh; + IfTable* iftable = GetDeclaringClass()->GetIfTable(); + for (size_t i = 0; i < iftable->Count() && result == NULL; i++) { + Class* interface = iftable->GetInterface(i); + for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) { + AbstractMethod* interface_method = interface->GetVirtualMethod(j); + interface_mh.ChangeMethod(interface_method); + if (mh.HasSameNameAndSignature(&interface_mh)) { + result = interface_method; + break; + } + } + } + } + } +#ifndef NDEBUG + MethodHelper result_mh(result); + DCHECK(result == NULL || MethodHelper(this).HasSameNameAndSignature(&result_mh)); +#endif + return result; +} + +static const void* GetOatCode(const AbstractMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Runtime* runtime = Runtime::Current(); + const void* code = m->GetCode(); + // Peel off any method tracing trampoline. + if (runtime->IsMethodTracingActive() && runtime->GetInstrumentation()->GetSavedCodeFromMap(m) != NULL) { + code = runtime->GetInstrumentation()->GetSavedCodeFromMap(m); + } + // Peel off any resolution stub. + if (code == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { + code = runtime->GetClassLinker()->GetOatCodeFor(m); + } + return code; +} + +uintptr_t AbstractMethod::NativePcOffset(const uintptr_t pc) const { + return pc - reinterpret_cast(GetOatCode(this)); +} + +// Find the lowest-address native safepoint pc for a given dex pc +uintptr_t AbstractMethod::ToFirstNativeSafepointPc(const uint32_t dex_pc) const { +#if !defined(ART_USE_LLVM_COMPILER) + const uint32_t* mapping_table = GetPcToDexMappingTable(); + if (mapping_table == NULL) { + DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this); + return DexFile::kDexNoIndex; // Special no mapping case + } + size_t mapping_table_length = GetPcToDexMappingTableLength(); + for (size_t i = 0; i < mapping_table_length; i += 2) { + if (mapping_table[i + 1] == dex_pc) { + return mapping_table[i] + reinterpret_cast(GetOatCode(this)); + } + } + LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc + << " in " << PrettyMethod(this); + return 0; +#else + // Compiler LLVM doesn't use the machine pc, we just use dex pc instead. + return static_cast(dex_pc); +#endif +} + +uint32_t AbstractMethod::ToDexPc(const uintptr_t pc) const { +#if !defined(ART_USE_LLVM_COMPILER) + const uint32_t* mapping_table = GetPcToDexMappingTable(); + if (mapping_table == NULL) { + DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this); + return DexFile::kDexNoIndex; // Special no mapping case + } + size_t mapping_table_length = GetPcToDexMappingTableLength(); + uint32_t sought_offset = pc - reinterpret_cast(GetOatCode(this)); + for (size_t i = 0; i < mapping_table_length; i += 2) { + if (mapping_table[i] == sought_offset) { + return mapping_table[i + 1]; + } + } + LOG(ERROR) << "Failed to find Dex offset for PC offset " << reinterpret_cast(sought_offset) + << "(PC " << reinterpret_cast(pc) << ") in " << PrettyMethod(this); + return DexFile::kDexNoIndex; +#else + // Compiler LLVM doesn't use the machine pc, we just use dex pc instead. + return static_cast(pc); +#endif +} + +uintptr_t AbstractMethod::ToNativePc(const uint32_t dex_pc) const { + const uint32_t* mapping_table = GetDexToPcMappingTable(); + if (mapping_table == NULL) { + DCHECK_EQ(dex_pc, 0U); + return 0; // Special no mapping/pc == 0 case + } + size_t mapping_table_length = GetDexToPcMappingTableLength(); + for (size_t i = 0; i < mapping_table_length; i += 2) { + uint32_t map_offset = mapping_table[i]; + uint32_t map_dex_offset = mapping_table[i + 1]; + if (map_dex_offset == dex_pc) { + return reinterpret_cast(GetOatCode(this)) + map_offset; + } + } + LOG(FATAL) << "Looking up Dex PC not contained in method, 0x" << std::hex << dex_pc + << " in " << PrettyMethod(this); + return 0; +} + +uint32_t AbstractMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const { + MethodHelper mh(this); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + // Iterate over the catch handlers associated with dex_pc + for (CatchHandlerIterator it(*code_item, dex_pc); it.HasNext(); it.Next()) { + uint16_t iter_type_idx = it.GetHandlerTypeIndex(); + // Catch all case + if (iter_type_idx == DexFile::kDexNoIndex16) { + return it.GetHandlerAddress(); + } + // Does this catch exception type apply? + Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); + if (iter_exception_type == NULL) { + // The verifier should take care of resolving all exception classes early + LOG(WARNING) << "Unresolved exception class when finding catch block: " + << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); + } else if (iter_exception_type->IsAssignableFrom(exception_type)) { + return it.GetHandlerAddress(); + } + } + // Handler not found + return DexFile::kDexNoIndex; +} + +void AbstractMethod::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) { + if (kIsDebugBuild) { + self->AssertThreadSuspensionIsAllowable(); + CHECK_EQ(kRunnable, self->GetState()); + } + + // Push a transition back into managed code onto the linked list in thread. + ManagedStack fragment; + self->PushManagedStackFragment(&fragment); + + // Call the invoke stub associated with the method. + // Pass everything as arguments. + AbstractMethod::InvokeStub* stub = GetInvokeStub(); + + if (UNLIKELY(!Runtime::Current()->IsStarted())){ + LOG(INFO) << "Not invoking " << PrettyMethod(this) << " for a runtime that isn't started"; + if (result != NULL) { + result->SetJ(0); + } + } else { + bool interpret = self->ReadFlag(kEnterInterpreter) && !IsNative() && !IsProxyMethod(); + const bool kLogInvocationStartAndReturn = false; + if (!interpret && GetCode() != NULL && stub != NULL) { + if (kLogInvocationStartAndReturn) { + LOG(INFO) << StringPrintf("Invoking '%s' code=%p stub=%p", + PrettyMethod(this).c_str(), GetCode(), stub); + } + (*stub)(this, receiver, self, args, result); + if (kLogInvocationStartAndReturn) { + LOG(INFO) << StringPrintf("Returned '%s' code=%p stub=%p", + PrettyMethod(this).c_str(), GetCode(), stub); + } + } else { + const bool kInterpretMethodsWithNoCode = false; + if (interpret || kInterpretMethodsWithNoCode) { + if (kLogInvocationStartAndReturn) { + LOG(INFO) << "Interpreting " << PrettyMethod(this) << "'"; + } + art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args, result); + if (kLogInvocationStartAndReturn) { + LOG(INFO) << "Returned '" << PrettyMethod(this) << "'"; + } + } else { + LOG(INFO) << "Not invoking '" << PrettyMethod(this) + << "' code=" << reinterpret_cast(GetCode()) + << " stub=" << reinterpret_cast(stub); + if (result != NULL) { + result->SetJ(0); + } + } + } + } + + // Pop transition. + self->PopManagedStackFragment(fragment); +} + +bool AbstractMethod::IsRegistered() const { + void* native_method = GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_), false); + CHECK(native_method != NULL); + void* jni_stub = Runtime::Current()->GetJniDlsymLookupStub()->GetData(); + return native_method != jni_stub; +} + +void AbstractMethod::RegisterNative(Thread* self, const void* native_method) { + DCHECK(Thread::Current() == self); + CHECK(IsNative()) << PrettyMethod(this); + CHECK(native_method != NULL) << PrettyMethod(this); + if (!self->GetJniEnv()->vm->work_around_app_jni_bugs) { + SetNativeMethod(native_method); + } else { + // We've been asked to associate this method with the given native method but are working + // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct + // the native method to runtime support and store the target somewhere runtime support will + // find it. +#if defined(__arm__) && !defined(ART_USE_LLVM_COMPILER) + SetNativeMethod(native_method); +#else + UNIMPLEMENTED(FATAL); +#endif + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_gc_map_), + reinterpret_cast(native_method), false); + } +} + +void AbstractMethod::UnregisterNative(Thread* self) { + CHECK(IsNative()) << PrettyMethod(this); + // restore stub to lookup native pointer via dlsym + RegisterNative(self, Runtime::Current()->GetJniDlsymLookupStub()->GetData()); +} + +void AbstractMethod::SetNativeMethod(const void* native_method) { + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_), + native_method, false); +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/abstract_method.h b/src/mirror/abstract_method.h new file mode 100644 index 0000000000..1d57abb1d2 --- /dev/null +++ b/src/mirror/abstract_method.h @@ -0,0 +1,524 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_METHOD_H_ +#define ART_SRC_MIRROR_METHOD_H_ + +#include "class.h" +#include "invoke_type.h" +#include "locks.h" +#include "modifiers.h" +#include "object.h" + +namespace art { + +struct AbstractMethodOffsets; +struct ConstructorMethodOffsets; +union JValue; +struct MethodClassOffsets; +struct MethodOffsets; +class StringPiece; + +namespace mirror { + +class StaticStorageBase; + +// C++ mirror of java.lang.reflect.Method and java.lang.reflect.Constructor +class MANAGED AbstractMethod : public Object { + public: + // A function that invokes a method with an array of its arguments. + typedef void InvokeStub(const AbstractMethod* method, + Object* obj, + Thread* thread, + JValue* args, + JValue* result); + + Class* GetDeclaringClass() const; + + void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static MemberOffset DeclaringClassOffset() { + return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_)); + } + + uint32_t GetAccessFlags() const; + + void SetAccessFlags(uint32_t new_access_flags) { + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, access_flags_), new_access_flags, false); + } + + // Approximate what kind of method call would be used for this method. + InvokeType GetInvokeType() const; + + // Returns true if the method is declared public. + bool IsPublic() const { + return (GetAccessFlags() & kAccPublic) != 0; + } + + // Returns true if the method is declared private. + bool IsPrivate() const { + return (GetAccessFlags() & kAccPrivate) != 0; + } + + // Returns true if the method is declared static. + bool IsStatic() const { + return (GetAccessFlags() & kAccStatic) != 0; + } + + // Returns true if the method is a constructor. + bool IsConstructor() const { + return (GetAccessFlags() & kAccConstructor) != 0; + } + + // Returns true if the method is static, private, or a constructor. + bool IsDirect() const { + return IsDirect(GetAccessFlags()); + } + + static bool IsDirect(uint32_t access_flags) { + return (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0; + } + + // Returns true if the method is declared synchronized. + bool IsSynchronized() const { + uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized; + return (GetAccessFlags() & synchonized) != 0; + } + + bool IsFinal() const { + return (GetAccessFlags() & kAccFinal) != 0; + } + + bool IsMiranda() const { + return (GetAccessFlags() & kAccMiranda) != 0; + } + + bool IsNative() const { + return (GetAccessFlags() & kAccNative) != 0; + } + + bool IsAbstract() const { + return (GetAccessFlags() & kAccAbstract) != 0; + } + + bool IsSynthetic() const { + return (GetAccessFlags() & kAccSynthetic) != 0; + } + + bool IsProxyMethod() const; + + bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + uint16_t GetMethodIndex() const; + + size_t GetVtableIndex() const { + return GetMethodIndex(); + } + + void SetMethodIndex(uint16_t new_method_index) { + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_), new_method_index, false); + } + + static MemberOffset MethodIndexOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_); + } + + uint32_t GetCodeItemOffset() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_item_offset_), false); + } + + void SetCodeItemOffset(uint32_t new_code_off) { + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_item_offset_), new_code_off, false); + } + + // Number of 32bit registers that would be required to hold all the arguments + static size_t NumArgRegisters(const StringPiece& shorty); + + uint32_t GetDexMethodIndex() const; + + void SetDexMethodIndex(uint32_t new_idx) { + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_dex_index_), new_idx, false); + } + + ObjectArray* GetDexCacheStrings() const; + void SetDexCacheStrings(ObjectArray* new_dex_cache_strings) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static MemberOffset DexCacheStringsOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_strings_); + } + + static MemberOffset DexCacheResolvedMethodsOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_methods_); + } + + static MemberOffset DexCacheResolvedTypesOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_types_); + } + + static MemberOffset DexCacheInitializedStaticStorageOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, + dex_cache_initialized_static_storage_); + } + + ObjectArray* GetDexCacheResolvedMethods() const; + void SetDexCacheResolvedMethods(ObjectArray* new_dex_cache_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* GetDexCacheResolvedTypes() const; + void SetDexCacheResolvedTypes(ObjectArray* new_dex_cache_types) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* GetDexCacheInitializedStaticStorage() const; + void SetDexCacheInitializedStaticStorage(ObjectArray* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Find the method that this method overrides + AbstractMethod* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + const void* GetCode() const { + return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_), false); + } + + void SetCode(const void* code) { + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_), code, false); + } + + uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsWithinCode(uintptr_t pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uintptr_t code = reinterpret_cast(GetCode()); + if (code == 0) { + return pc == 0; + } + /* + * During a stack walk, a return PC may point to the end of the code + 1 + * (in the case that the last instruction is a call that isn't expected to + * return. Thus, we check <= code + GetCodeSize(). + */ + return (code <= pc && pc <= code + GetCodeSize()); + } + + void AssertPcIsWithinCode(uintptr_t pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + uint32_t GetOatCodeOffset() const; + + void SetOatCodeOffset(uint32_t code_offset); + + static MemberOffset GetCodeOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_); + } + + const uint32_t* GetMappingTable() const { + const uint32_t* map = GetMappingTableRaw(); + if (map == NULL) { + return map; + } + return map + 1; + } + + uint32_t GetPcToDexMappingTableLength() const { + const uint32_t* map = GetMappingTableRaw(); + if (map == NULL) { + return 0; + } + return map[2]; + } + + const uint32_t* GetPcToDexMappingTable() const { + const uint32_t* map = GetMappingTableRaw(); + if (map == NULL) { + return map; + } + return map + 3; + } + + + uint32_t GetDexToPcMappingTableLength() const { + const uint32_t* map = GetMappingTableRaw(); + if (map == NULL) { + return 0; + } + return map[1] - map[2]; + } + + const uint32_t* GetDexToPcMappingTable() const { + const uint32_t* map = GetMappingTableRaw(); + if (map == NULL) { + return map; + } + return map + 3 + map[2]; + } + + + const uint32_t* GetMappingTableRaw() const { + return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), false); + } + + void SetMappingTable(const uint32_t* mapping_table) { + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), + mapping_table, false); + } + + uint32_t GetOatMappingTableOffset() const; + + void SetOatMappingTableOffset(uint32_t mapping_table_offset); + + // Callers should wrap the uint16_t* in a VmapTable instance for convenient access. + const uint16_t* GetVmapTableRaw() const { + return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), false); + } + + void SetVmapTable(const uint16_t* vmap_table) { + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), vmap_table, false); + } + + uint32_t GetOatVmapTableOffset() const; + + void SetOatVmapTableOffset(uint32_t vmap_table_offset); + + const uint8_t* GetNativeGcMap() const { + return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_gc_map_), false); + } + void SetNativeGcMap(const uint8_t* data) { + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_gc_map_), data, + false); + } + + // When building the oat need a convenient place to stuff the offset of the native GC map. + void SetOatNativeGcMapOffset(uint32_t gc_map_offset); + uint32_t GetOatNativeGcMapOffset() const; + + size_t GetFrameSizeInBytes() const { + DCHECK_EQ(sizeof(size_t), sizeof(uint32_t)); + size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, frame_size_in_bytes_), false); + DCHECK_LE(static_cast(kStackAlignment), result); + return result; + } + + void SetFrameSizeInBytes(size_t new_frame_size_in_bytes) { + DCHECK_EQ(sizeof(size_t), sizeof(uint32_t)); + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, frame_size_in_bytes_), + new_frame_size_in_bytes, false); + } + + size_t GetReturnPcOffsetInBytes() const { + return GetFrameSizeInBytes() - kPointerSize; + } + + bool IsRegistered() const; + + void RegisterNative(Thread* self, const void* native_method) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static MemberOffset NativeMethodOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_); + } + + const void* GetNativeMethod() const { + return reinterpret_cast(GetField32(NativeMethodOffset(), false)); + } + + void SetNativeMethod(const void*); + + // Native to managed invocation stub entry point + InvokeStub* GetInvokeStub() const { + InvokeStub* result = GetFieldPtr( + OFFSET_OF_OBJECT_MEMBER(AbstractMethod, invoke_stub_), false); + // TODO: DCHECK(result != NULL); should be ahead of time compiled + return result; + } + + void SetInvokeStub(InvokeStub* invoke_stub) { + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, invoke_stub_), + invoke_stub, false); + } + + uint32_t GetInvokeStubSize() const { + uintptr_t invoke_stub = reinterpret_cast(GetInvokeStub()); + if (invoke_stub == 0) { + return 0; + } + // TODO: make this Thumb2 specific + invoke_stub &= ~0x1; + return reinterpret_cast(invoke_stub)[-1]; + } + + uint32_t GetOatInvokeStubOffset() const; + void SetOatInvokeStubOffset(uint32_t invoke_stub_offset); + + static MemberOffset GetInvokeStubOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, invoke_stub_); + } + + static MemberOffset GetMethodIndexOffset() { + return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_); + } + + uint32_t GetCoreSpillMask() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, core_spill_mask_), false); + } + + void SetCoreSpillMask(uint32_t core_spill_mask) { + // Computed during compilation + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, core_spill_mask_), core_spill_mask, false); + } + + uint32_t GetFpSpillMask() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, fp_spill_mask_), false); + } + + void SetFpSpillMask(uint32_t fp_spill_mask) { + // Computed during compilation + SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, fp_spill_mask_), fp_spill_mask, false); + } + + // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal + // conventions for a method of managed code. Returns false for Proxy methods. + bool IsRuntimeMethod() const; + + // Is this a hand crafted method used for something like describing callee saves? + bool IsCalleeSaveMethod() const; + + bool IsResolutionMethod() const; + + uintptr_t NativePcOffset(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Converts a native PC to a dex PC. + uint32_t ToDexPc(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Converts a dex PC to a native PC. + uintptr_t ToNativePc(const uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Converts a dex PC to the first corresponding safepoint PC. + uintptr_t ToFirstNativeSafepointPc(const uint32_t dex_pc) + const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Find the catch block for the given exception type and dex_pc + uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method); + + static Class* GetConstructorClass() { + return java_lang_reflect_Constructor_; + } + + static Class* GetMethodClass() { + return java_lang_reflect_Method_; + } + + static void ResetClasses(); + + protected: + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". + // The class we are a part of + Class* declaring_class_; + + // short cuts to declaring_class_->dex_cache_ member for fast compiled code access + ObjectArray* dex_cache_initialized_static_storage_; + + // short cuts to declaring_class_->dex_cache_ member for fast compiled code access + ObjectArray* dex_cache_resolved_methods_; + + // short cuts to declaring_class_->dex_cache_ member for fast compiled code access + ObjectArray* dex_cache_resolved_types_; + + // short cuts to declaring_class_->dex_cache_ member for fast compiled code access + ObjectArray* dex_cache_strings_; + + // Access flags; low 16 bits are defined by spec. + uint32_t access_flags_; + + // Compiled code associated with this method for callers from managed code. + // May be compiled managed code or a bridge for invoking a native method. + const void* code_; + + // Offset to the CodeItem. + uint32_t code_item_offset_; + + // Architecture-dependent register spill mask + uint32_t core_spill_mask_; + + // Architecture-dependent register spill mask + uint32_t fp_spill_mask_; + + // Total size in bytes of the frame + size_t frame_size_in_bytes_; + + // Garbage collection map of native PC offsets to reference bitmaps. + const uint8_t* native_gc_map_; + + // Native invocation stub entry point for calling from native to managed code. + InvokeStub* invoke_stub_; + + // Mapping from native pc to dex pc + const uint32_t* mapping_table_; + + // Index into method_ids of the dex file associated with this method + uint32_t method_dex_index_; + + // For concrete virtual methods, this is the offset of the method in Class::vtable_. + // + // For abstract methods in an interface class, this is the offset of the method in + // "iftable_->Get(n)->GetMethodArray()". + // + // For static and direct methods this is the index in the direct methods table. + uint32_t method_index_; + + // The target native method registered with this method + const void* native_method_; + + // When a register is promoted into a register, the spill mask holds which registers hold dex + // registers. The first promoted register's corresponding dex register is vmap_table_[1], the Nth + // is vmap_table_[N]. vmap_table_[0] holds the length of the table. + const uint16_t* vmap_table_; + + static Class* java_lang_reflect_Constructor_; + static Class* java_lang_reflect_Method_; + + friend struct art::AbstractMethodOffsets; // for verifying offset information + friend struct art::ConstructorMethodOffsets; // for verifying offset information + friend struct art::MethodOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod); +}; + +class MANAGED Method : public AbstractMethod { + +}; + +class MANAGED Constructor : public AbstractMethod { + +}; + +class MANAGED AbstractMethodClass : public Class { + private: + Object* ORDER_BY_SIGNATURE_; + friend struct art::MethodClassOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethodClass); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_METHOD_H_ diff --git a/src/mirror/array-inl.h b/src/mirror/array-inl.h new file mode 100644 index 0000000000..b7f212f50f --- /dev/null +++ b/src/mirror/array-inl.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_ARRAY_INL_H_ +#define ART_SRC_MIRROR_ARRAY_INL_H_ + +#include "array.h" + +#include "class.h" + +namespace art { +namespace mirror { + +inline size_t Array::SizeOf() const { + // This is safe from overflow because the array was already allocated, so we know it's sane. + size_t component_size = GetClass()->GetComponentSize(); + int32_t component_count = GetLength(); + size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); + size_t data_size = component_count * component_size; + return header_size + data_size; +} + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_ARRAY_INL_H_ diff --git a/src/mirror/array.cc b/src/mirror/array.cc new file mode 100644 index 0000000000..103efa36b5 --- /dev/null +++ b/src/mirror/array.cc @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "array.h" + +#include "class.h" +#include "class-inl.h" +#include "gc/card_table-inl.h" +#include "object-inl.h" +#include "object_array.h" +#include "object_array-inl.h" +#include "object_utils.h" +#include "sirt_ref.h" +#include "thread.h" +#include "utils.h" + +namespace art { +namespace mirror { + +Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, + size_t component_size) { + DCHECK(array_class != NULL); + DCHECK_GE(component_count, 0); + DCHECK(array_class->IsArrayClass()); + + size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); + size_t data_size = component_count * component_size; + size_t size = header_size + data_size; + + // Check for overflow and throw OutOfMemoryError if this was an unreasonable request. + size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size); + if (data_size >> component_shift != size_t(component_count) || size < data_size) { + self->ThrowNewExceptionF("Ljava/lang/OutOfMemoryError;", + "%s of length %d would overflow", + PrettyDescriptor(array_class).c_str(), component_count); + return NULL; + } + + Heap* heap = Runtime::Current()->GetHeap(); + Array* array = down_cast(heap->AllocObject(self, array_class, size)); + if (array != NULL) { + DCHECK(array->IsArrayInstance()); + array->SetLength(component_count); + } + return array; +} + +Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) { + DCHECK(array_class->IsArrayClass()); + return Alloc(self, array_class, component_count, array_class->GetComponentSize()); +} + +// Create a multi-dimensional array of Objects or primitive types. +// +// We have to generate the names for X[], X[][], X[][][], and so on. The +// easiest way to deal with that is to create the full name once and then +// subtract pieces off. Besides, we want to start with the outermost +// piece and work our way in. +// Recursively create an array with multiple dimensions. Elements may be +// Objects or primitive types. +static Array* RecursiveCreateMultiArray(Thread* self, Class* array_class, int current_dimension, + IntArray* dimensions) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t array_length = dimensions->Get(current_dimension); + SirtRef new_array(self, Array::Alloc(self, array_class, array_length)); + if (UNLIKELY(new_array.get() == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; + } + if ((current_dimension + 1) < dimensions->GetLength()) { + // Create a new sub-array in every element of the array. + for (int32_t i = 0; i < array_length; i++) { + Array* sub_array = RecursiveCreateMultiArray(self, array_class->GetComponentType(), + current_dimension + 1, dimensions); + if (UNLIKELY(sub_array == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; + } + new_array->AsObjectArray()->Set(i, sub_array); + } + } + return new_array.get(); +} + +Array* Array::CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions) { + // Verify dimensions. + // + // The caller is responsible for verifying that "dimArray" is non-null + // and has a length > 0 and <= 255. + int num_dimensions = dimensions->GetLength(); + DCHECK_GT(num_dimensions, 0); + DCHECK_LE(num_dimensions, 255); + + for (int i = 0; i < num_dimensions; i++) { + int dimension = dimensions->Get(i); + if (UNLIKELY(dimension < 0)) { + self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", + "Dimension %d: %d", i, dimension); + return NULL; + } + } + + // Generate the full name of the array class. + std::string descriptor(num_dimensions, '['); + descriptor += ClassHelper(element_class).GetDescriptor(); + + // Find/generate the array class. + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader()); + if (UNLIKELY(array_class == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; + } + // create the array + Array* new_array = RecursiveCreateMultiArray(self, array_class, 0, dimensions); + if (UNLIKELY(new_array == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; + } + return new_array; +} + +bool Array::ThrowArrayIndexOutOfBoundsException(int32_t index) const { + Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", + "length=%i; index=%i", length_, index); + return false; +} + +bool Array::ThrowArrayStoreException(Object* object) const { + Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", + "%s cannot be stored in an array of type %s", + PrettyTypeOf(object).c_str(), PrettyTypeOf(this).c_str()); + return false; +} + +template +PrimitiveArray* PrimitiveArray::Alloc(Thread* self, size_t length) { + DCHECK(array_class_ != NULL); + Array* raw_array = Array::Alloc(self, array_class_, length, sizeof(T)); + return down_cast*>(raw_array); +} + +template Class* PrimitiveArray::array_class_ = NULL; + +// Explicitly instantiate all the primitive array types. +template class PrimitiveArray; // BooleanArray +template class PrimitiveArray; // ByteArray +template class PrimitiveArray; // CharArray +template class PrimitiveArray; // DoubleArray +template class PrimitiveArray; // FloatArray +template class PrimitiveArray; // IntArray +template class PrimitiveArray; // LongArray +template class PrimitiveArray; // ShortArray + +} // namespace mirror +} // namespace art diff --git a/src/mirror/array.h b/src/mirror/array.h new file mode 100644 index 0000000000..8da906faf9 --- /dev/null +++ b/src/mirror/array.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_ARRAY_H_ +#define ART_SRC_MIRROR_ARRAY_H_ + +#include "object.h" + +namespace art { +namespace mirror { + +class MANAGED Array : public Object { + public: + // A convenience for code that doesn't know the component size, + // and doesn't want to have to work it out itself. + static Array* Alloc(Thread* self, Class* array_class, int32_t component_count) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, + size_t component_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t SizeOf() const; + + int32_t GetLength() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), false); + } + + void SetLength(int32_t length) { + CHECK_GE(length, 0); + SetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false); + } + + static MemberOffset LengthOffset() { + return OFFSET_OF_OBJECT_MEMBER(Array, length_); + } + + static MemberOffset DataOffset(size_t component_size) { + if (component_size != sizeof(int64_t)) { + return OFFSET_OF_OBJECT_MEMBER(Array, first_element_); + } else { + // Align longs and doubles. + return MemberOffset(OFFSETOF_MEMBER(Array, first_element_) + 4); + } + } + + void* GetRawData(size_t component_size) { + intptr_t data = reinterpret_cast(this) + DataOffset(component_size).Int32Value(); + return reinterpret_cast(data); + } + + const void* GetRawData(size_t component_size) const { + intptr_t data = reinterpret_cast(this) + DataOffset(component_size).Int32Value(); + return reinterpret_cast(data); + } + + protected: + bool IsValidIndex(int32_t index) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (UNLIKELY(index < 0 || index >= GetLength())) { + return ThrowArrayIndexOutOfBoundsException(index); + } + return true; + } + + protected: + bool ThrowArrayIndexOutOfBoundsException(int32_t index) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ThrowArrayStoreException(Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + private: + // The number of array elements. + int32_t length_; + // Marker for the data (used by generated code) + uint32_t first_element_[0]; + + DISALLOW_IMPLICIT_CONSTRUCTORS(Array); +}; + +template +class MANAGED PrimitiveArray : public Array { + public: + typedef T ElementType; + + static PrimitiveArray* Alloc(Thread* self, size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + const T* GetData() const { + intptr_t data = reinterpret_cast(this) + DataOffset(sizeof(T)).Int32Value(); + return reinterpret_cast(data); + } + + T* GetData() { + intptr_t data = reinterpret_cast(this) + DataOffset(sizeof(T)).Int32Value(); + return reinterpret_cast(data); + } + + T Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (!IsValidIndex(i)) { + return T(0); + } + return GetData()[i]; + } + + void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsValidIndex(i)) { + GetData()[i] = value; + } + } + + static void SetArrayClass(Class* array_class) { + CHECK(array_class_ == NULL); + CHECK(array_class != NULL); + array_class_ = array_class; + } + + static void ResetArrayClass() { + CHECK(array_class_ != NULL); + array_class_ = NULL; + } + + private: + static Class* array_class_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_ARRAY_H_ diff --git a/src/mirror/class-inl.h b/src/mirror/class-inl.h new file mode 100644 index 0000000000..7eb8601cd6 --- /dev/null +++ b/src/mirror/class-inl.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_CLASS_INL_H_ +#define ART_SRC_MIRROR_CLASS_INL_H_ + +#include "class.h" + +#include "abstract_method.h" +#include "field.h" +#include "iftable.h" +#include "object_array.h" +#include "runtime.h" +#include "string.h" + +namespace art { +namespace mirror { + +inline size_t Class::GetObjectSize() const { + CHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this); + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false); + CHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(this); + return result; +} + +inline Class* Class::GetSuperClass() const { + // Can only get super class for loaded classes (hack for when runtime is + // initializing) + DCHECK(IsLoaded() || !Runtime::Current()->IsStarted()) << IsLoaded(); + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false); +} + +inline ObjectArray* Class::GetDirectMethods() const { + DCHECK(IsLoaded() || IsErroneous()); + return GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); +} + +inline void Class::SetDirectMethods(ObjectArray* new_direct_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(NULL == GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false)); + DCHECK_NE(0, new_direct_methods->GetLength()); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), + new_direct_methods, false); +} + +inline AbstractMethod* Class::GetDirectMethod(int32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetDirectMethods()->Get(i); +} + +inline void Class::SetDirectMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + ObjectArray* direct_methods = + GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); + direct_methods->Set(i, f); +} + +// Returns the number of static, private, and constructor methods. +inline size_t Class::NumDirectMethods() const { + return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0; +} + +inline ObjectArray* Class::GetVirtualMethods() const { + DCHECK(IsLoaded() || IsErroneous()); + return GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); +} + +inline void Class::SetVirtualMethods(ObjectArray* new_virtual_methods) { + // TODO: we reassign virtual methods to grow the table for miranda + // methods.. they should really just be assigned once + DCHECK_NE(0, new_virtual_methods->GetLength()); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), + new_virtual_methods, false); +} + +inline size_t Class::NumVirtualMethods() const { + return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0; +} + +inline AbstractMethod* Class::GetVirtualMethod(uint32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(IsResolved() || IsErroneous()); + return GetVirtualMethods()->Get(i); +} + +inline AbstractMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(IsLoaded() || IsErroneous()); + return GetVirtualMethods()->Get(i); +} + +inline void Class::SetVirtualMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray* virtual_methods = + GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); + virtual_methods->Set(i, f); +} + +inline ObjectArray* Class::GetVTable() const { + DCHECK(IsResolved() || IsErroneous()); + return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false); +} + +inline ObjectArray* Class::GetVTableDuringLinking() const { + DCHECK(IsLoaded() || IsErroneous()); + return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false); +} + +inline void Class::SetVTable(ObjectArray* new_vtable) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable, false); +} + +inline AbstractMethod* Class::FindVirtualMethodForVirtual(AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(!method->GetDeclaringClass()->IsInterface()); + // The argument method may from a super class. + // Use the index to a potentially overridden one for this instance's class. + return GetVTable()->Get(method->GetMethodIndex()); +} + +inline AbstractMethod* Class::FindVirtualMethodForSuper(AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(!method->GetDeclaringClass()->IsInterface()); + return GetSuperClass()->GetVTable()->Get(method->GetMethodIndex()); +} + +inline AbstractMethod* Class::FindVirtualMethodForVirtualOrInterface(AbstractMethod* method) const { + if (method->IsDirect()) { + return method; + } + if (method->GetDeclaringClass()->IsInterface()) { + return FindVirtualMethodForInterface(method); + } + return FindVirtualMethodForVirtual(method); +} + +inline IfTable* Class::GetIfTable() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), false); +} + +inline int32_t Class::GetIfTableCount() const { + IfTable* iftable = GetIfTable(); + if (iftable == NULL) { + return 0; + } + return iftable->Count(); +} + +inline void Class::SetIfTable(IfTable* new_iftable) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable, false); +} + +inline ObjectArray* Class::GetIFields() const { + DCHECK(IsLoaded() || IsErroneous()); + return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); +} + +inline void Class::SetIFields(ObjectArray* new_ifields) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(NULL == GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false)); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields, false); +} + +inline ObjectArray* Class::GetSFields() const { + DCHECK(IsLoaded() || IsErroneous()); + return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); +} + +inline void Class::SetSFields(ObjectArray* new_sfields) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(NULL == GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false)); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields, false); +} + +inline size_t Class::NumStaticFields() const { + return (GetSFields() != NULL) ? GetSFields()->GetLength() : 0; +} + +inline Field* Class::GetStaticField(uint32_t i) const // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetSFields()->Get(i); +} + +inline void Class::SetStaticField(uint32_t i, Field* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray* sfields= GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); + sfields->Set(i, f); +} + +inline size_t Class::NumInstanceFields() const { + return (GetIFields() != NULL) ? GetIFields()->GetLength() : 0; +} + +inline Field* Class::GetInstanceField(uint32_t i) const // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + DCHECK_NE(NumInstanceFields(), 0U); + return GetIFields()->Get(i); +} + +inline void Class::SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + ObjectArray* ifields= GetFieldObject*>( + OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); + ifields->Set(i, f); +} + +inline void Class::SetVerifyErrorClass(Class* klass) { + CHECK(klass != NULL) << PrettyClass(this); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false); +} + +inline uint32_t Class::GetAccessFlags() const { + // Check class is loaded or this is java.lang.String that has a + // circularity issue during loading the names of its members + DCHECK(IsLoaded() || IsErroneous() || + this == String::GetJavaLangString() || + this == Field::GetJavaLangReflectField() || + this == AbstractMethod::GetConstructorClass() || + this == AbstractMethod::GetMethodClass()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false); +} + +inline String* Class::GetName() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), false); +} +inline void Class::SetName(String* name) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false); +} + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_CLASS_INL_H_ diff --git a/src/mirror/class.cc b/src/mirror/class.cc new file mode 100644 index 0000000000..7f52d17616 --- /dev/null +++ b/src/mirror/class.cc @@ -0,0 +1,668 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "class.h" + +#include "abstract_method-inl.h" +#include "class-inl.h" +#include "class_linker.h" +#include "class_loader.h" +#include "dex_cache.h" +#include "field-inl.h" +#include "gc/card_table-inl.h" +#include "object-inl.h" +#include "object_array-inl.h" +#include "object_utils.h" +#include "runtime.h" +#include "sirt_ref.h" +#include "thread.h" +#include "throwable.h" +#include "utils.h" +#include "well_known_classes.h" + +namespace art { +namespace mirror { + +Class* Class::java_lang_Class_ = NULL; + +void Class::SetClassClass(Class* java_lang_Class) { + CHECK(java_lang_Class_ == NULL) << java_lang_Class_ << " " << java_lang_Class; + CHECK(java_lang_Class != NULL); + java_lang_Class_ = java_lang_Class; +} + +void Class::ResetClass() { + CHECK(java_lang_Class_ != NULL); + java_lang_Class_ = NULL; +} + +void Class::SetStatus(Status new_status) { + CHECK(new_status > GetStatus() || new_status == kStatusError || !Runtime::Current()->IsStarted()) + << PrettyClass(this) << " " << GetStatus() << " -> " << new_status; + CHECK(sizeof(Status) == sizeof(uint32_t)) << PrettyClass(this); + if (new_status > kStatusResolved) { + CHECK_EQ(GetThinLockId(), Thread::Current()->GetThinLockId()) << PrettyClass(this); + } + if (new_status == kStatusError) { + CHECK_NE(GetStatus(), kStatusError) << PrettyClass(this); + + // stash current exception + Thread* self = Thread::Current(); + SirtRef exception(self, self->GetException()); + CHECK(exception.get() != NULL); + + // clear exception to call FindSystemClass + self->ClearException(); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Class* eiie_class = class_linker->FindSystemClass("Ljava/lang/ExceptionInInitializerError;"); + CHECK(!self->IsExceptionPending()); + + // only verification errors, not initialization problems, should set a verify error. + // this is to ensure that ThrowEarlierClassFailure will throw NoClassDefFoundError in that case. + Class* exception_class = exception->GetClass(); + if (!eiie_class->IsAssignableFrom(exception_class)) { + SetVerifyErrorClass(exception_class); + } + + // restore exception + self->SetException(exception.get()); + } + return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status, false); +} + +DexCache* Class::GetDexCache() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false); +} + +void Class::SetDexCache(DexCache* new_dex_cache) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache, false); +} + +Object* Class::AllocObject(Thread* self) { + DCHECK(!IsArrayClass()) << PrettyClass(this); + DCHECK(IsInstantiable()) << PrettyClass(this); + // TODO: decide whether we want this check. It currently fails during bootstrap. + // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this); + DCHECK_GE(this->object_size_, sizeof(Object)); + return Runtime::Current()->GetHeap()->AllocObject(self, this, this->object_size_); +} + +void Class::SetClassSize(size_t new_class_size) { + DCHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this); + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size, false); +} + +// Return the class' name. The exact format is bizarre, but it's the specified behavior for +// Class.getName: keywords for primitive types, regular "[I" form for primitive arrays (so "int" +// but "[I"), and arrays of reference types written between "L" and ";" but with dots rather than +// slashes (so "java.lang.String" but "[Ljava.lang.String;"). Madness. +String* Class::ComputeName() { + String* name = GetName(); + if (name != NULL) { + return name; + } + std::string descriptor(ClassHelper(this).GetDescriptor()); + if ((descriptor[0] != 'L') && (descriptor[0] != '[')) { + // The descriptor indicates that this is the class for + // a primitive type; special-case the return value. + const char* c_name = NULL; + switch (descriptor[0]) { + case 'Z': c_name = "boolean"; break; + case 'B': c_name = "byte"; break; + case 'C': c_name = "char"; break; + case 'S': c_name = "short"; break; + case 'I': c_name = "int"; break; + case 'J': c_name = "long"; break; + case 'F': c_name = "float"; break; + case 'D': c_name = "double"; break; + case 'V': c_name = "void"; break; + default: + LOG(FATAL) << "Unknown primitive type: " << PrintableChar(descriptor[0]); + } + name = String::AllocFromModifiedUtf8(Thread::Current(), c_name); + } else { + // Convert the UTF-8 name to a java.lang.String. The name must use '.' to separate package + // components. + if (descriptor.size() > 2 && descriptor[0] == 'L' && descriptor[descriptor.size() - 1] == ';') { + descriptor.erase(0, 1); + descriptor.erase(descriptor.size() - 1); + } + std::replace(descriptor.begin(), descriptor.end(), '/', '.'); + name = String::AllocFromModifiedUtf8(Thread::Current(), descriptor.c_str()); + } + SetName(name); + return name; +} + +void Class::DumpClass(std::ostream& os, int flags) const { + if ((flags & kDumpClassFullDetail) == 0) { + os << PrettyClass(this); + if ((flags & kDumpClassClassLoader) != 0) { + os << ' ' << GetClassLoader(); + } + if ((flags & kDumpClassInitialized) != 0) { + os << ' ' << GetStatus(); + } + os << "\n"; + return; + } + + Class* super = GetSuperClass(); + ClassHelper kh(this); + os << "----- " << (IsInterface() ? "interface" : "class") << " " + << "'" << kh.GetDescriptor() << "' cl=" << GetClassLoader() << " -----\n", + os << " objectSize=" << SizeOf() << " " + << "(" << (super != NULL ? super->SizeOf() : -1) << " from super)\n", + os << StringPrintf(" access=0x%04x.%04x\n", + GetAccessFlags() >> 16, GetAccessFlags() & kAccJavaFlagsMask); + if (super != NULL) { + os << " super='" << PrettyClass(super) << "' (cl=" << super->GetClassLoader() << ")\n"; + } + if (IsArrayClass()) { + os << " componentType=" << PrettyClass(GetComponentType()) << "\n"; + } + if (kh.NumDirectInterfaces() > 0) { + os << " interfaces (" << kh.NumDirectInterfaces() << "):\n"; + for (size_t i = 0; i < kh.NumDirectInterfaces(); ++i) { + Class* interface = kh.GetDirectInterface(i); + const ClassLoader* cl = interface->GetClassLoader(); + os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl); + } + } + os << " vtable (" << NumVirtualMethods() << " entries, " + << (super != NULL ? super->NumVirtualMethods() : 0) << " in super):\n"; + for (size_t i = 0; i < NumVirtualMethods(); ++i) { + os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(GetVirtualMethodDuringLinking(i)).c_str()); + } + os << " direct methods (" << NumDirectMethods() << " entries):\n"; + for (size_t i = 0; i < NumDirectMethods(); ++i) { + os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(GetDirectMethod(i)).c_str()); + } + if (NumStaticFields() > 0) { + os << " static fields (" << NumStaticFields() << " entries):\n"; + if (IsResolved() || IsErroneous()) { + for (size_t i = 0; i < NumStaticFields(); ++i) { + os << StringPrintf(" %2zd: %s\n", i, PrettyField(GetStaticField(i)).c_str()); + } + } else { + os << " "; + } + } + if (NumInstanceFields() > 0) { + os << " instance fields (" << NumInstanceFields() << " entries):\n"; + if (IsResolved() || IsErroneous()) { + for (size_t i = 0; i < NumInstanceFields(); ++i) { + os << StringPrintf(" %2zd: %s\n", i, PrettyField(GetInstanceField(i)).c_str()); + } + } else { + os << " "; + } + } +} + +void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) { + if (new_reference_offsets != CLASS_WALK_SUPER) { + // Sanity check that the number of bits set in the reference offset bitmap + // agrees with the number of references + size_t count = 0; + for (Class* c = this; c != NULL; c = c->GetSuperClass()) { + count += c->NumReferenceInstanceFieldsDuringLinking(); + } + CHECK_EQ((size_t)__builtin_popcount(new_reference_offsets), count); + } + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), + new_reference_offsets, false); +} + +void Class::SetReferenceStaticOffsets(uint32_t new_reference_offsets) { + if (new_reference_offsets != CLASS_WALK_SUPER) { + // Sanity check that the number of bits set in the reference offset bitmap + // agrees with the number of references + CHECK_EQ((size_t)__builtin_popcount(new_reference_offsets), + NumReferenceStaticFieldsDuringLinking()); + } + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_), + new_reference_offsets, false); +} + +bool Class::Implements(const Class* klass) const { + DCHECK(klass != NULL); + DCHECK(klass->IsInterface()) << PrettyClass(this); + // All interfaces implemented directly and by our superclass, and + // recursively all super-interfaces of those interfaces, are listed + // in iftable_, so we can just do a linear scan through that. + int32_t iftable_count = GetIfTableCount(); + IfTable* iftable = GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + if (iftable->GetInterface(i) == klass) { + return true; + } + } + return false; +} + +// Determine whether "this" is assignable from "src", where both of these +// are array classes. +// +// Consider an array class, e.g. Y[][], where Y is a subclass of X. +// Y[][] = Y[][] --> true (identity) +// X[][] = Y[][] --> true (element superclass) +// Y = Y[][] --> false +// Y[] = Y[][] --> false +// Object = Y[][] --> true (everything is an object) +// Object[] = Y[][] --> true +// Object[][] = Y[][] --> true +// Object[][][] = Y[][] --> false (too many []s) +// Serializable = Y[][] --> true (all arrays are Serializable) +// Serializable[] = Y[][] --> true +// Serializable[][] = Y[][] --> false (unless Y is Serializable) +// +// Don't forget about primitive types. +// Object[] = int[] --> false +// +bool Class::IsArrayAssignableFromArray(const Class* src) const { + DCHECK(IsArrayClass()) << PrettyClass(this); + DCHECK(src->IsArrayClass()) << PrettyClass(src); + return GetComponentType()->IsAssignableFrom(src->GetComponentType()); +} + +bool Class::IsAssignableFromArray(const Class* src) const { + DCHECK(!IsInterface()) << PrettyClass(this); // handled first in IsAssignableFrom + DCHECK(src->IsArrayClass()) << PrettyClass(src); + if (!IsArrayClass()) { + // If "this" is not also an array, it must be Object. + // src's super should be java_lang_Object, since it is an array. + Class* java_lang_Object = src->GetSuperClass(); + DCHECK(java_lang_Object != NULL) << PrettyClass(src); + DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src); + return this == java_lang_Object; + } + return IsArrayAssignableFromArray(src); +} + +bool Class::IsSubClass(const Class* klass) const { + DCHECK(!IsInterface()) << PrettyClass(this); + DCHECK(!IsArrayClass()) << PrettyClass(this); + const Class* current = this; + do { + if (current == klass) { + return true; + } + current = current->GetSuperClass(); + } while (current != NULL); + return false; +} + +bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) { + size_t i = 0; + while (descriptor1[i] != '\0' && descriptor1[i] == descriptor2[i]) { + ++i; + } + if (descriptor1.find('/', i) != StringPiece::npos || + descriptor2.find('/', i) != StringPiece::npos) { + return false; + } else { + return true; + } +} + +bool Class::IsInSamePackage(const Class* that) const { + const Class* klass1 = this; + const Class* klass2 = that; + if (klass1 == klass2) { + return true; + } + // Class loaders must match. + if (klass1->GetClassLoader() != klass2->GetClassLoader()) { + return false; + } + // Arrays are in the same package when their element classes are. + while (klass1->IsArrayClass()) { + klass1 = klass1->GetComponentType(); + } + while (klass2->IsArrayClass()) { + klass2 = klass2->GetComponentType(); + } + // Compare the package part of the descriptor string. + ClassHelper kh(klass1); + std::string descriptor1(kh.GetDescriptor()); + kh.ChangeClass(klass2); + std::string descriptor2(kh.GetDescriptor()); + return IsInSamePackage(descriptor1, descriptor2); +} + +bool Class::IsClassClass() const { + Class* java_lang_Class = GetClass()->GetClass(); + return this == java_lang_Class; +} + +bool Class::IsStringClass() const { + return this == String::GetJavaLangString(); +} + +bool Class::IsThrowableClass() const { + return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this); +} + +bool Class::IsFieldClass() const { + Class* java_lang_Class = GetClass(); + Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass(); + return this == java_lang_reflect_Field; + +} + +bool Class::IsMethodClass() const { + return (this == AbstractMethod::GetMethodClass()) || + (this == AbstractMethod::GetConstructorClass()); + +} + +ClassLoader* Class::GetClassLoader() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false); +} + +void Class::SetClassLoader(ClassLoader* new_class_loader) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false); +} + +AbstractMethod* Class::FindVirtualMethodForInterface(AbstractMethod* method) const { + Class* declaring_class = method->GetDeclaringClass(); + DCHECK(declaring_class != NULL) << PrettyClass(this); + DCHECK(declaring_class->IsInterface()) << PrettyMethod(method); + // TODO cache to improve lookup speed + int32_t iftable_count = GetIfTableCount(); + IfTable* iftable = GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + if (iftable->GetInterface(i) == declaring_class) { + return iftable->GetMethodArray(i)->Get(method->GetMethodIndex()); + } + } + return NULL; +} + +AbstractMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) const { + // Check the current class before checking the interfaces. + AbstractMethod* method = FindDeclaredVirtualMethod(name, signature); + if (method != NULL) { + return method; + } + + int32_t iftable_count = GetIfTableCount(); + IfTable* iftable = GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature); + if (method != NULL) { + return method; + } + } + return NULL; +} + +AbstractMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { + // Check the current class before checking the interfaces. + AbstractMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx); + if (method != NULL) { + return method; + } + + int32_t iftable_count = GetIfTableCount(); + IfTable* iftable = GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(dex_cache, dex_method_idx); + if (method != NULL) { + return method; + } + } + return NULL; +} + + +AbstractMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const { + MethodHelper mh; + for (size_t i = 0; i < NumDirectMethods(); ++i) { + AbstractMethod* method = GetDirectMethod(i); + mh.ChangeMethod(method); + if (name == mh.GetName() && signature == mh.GetSignature()) { + return method; + } + } + return NULL; +} + +AbstractMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { + if (GetDexCache() == dex_cache) { + for (size_t i = 0; i < NumDirectMethods(); ++i) { + AbstractMethod* method = GetDirectMethod(i); + if (method->GetDexMethodIndex() == dex_method_idx) { + return method; + } + } + } + return NULL; +} + +AbstractMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature) const { + for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { + AbstractMethod* method = klass->FindDeclaredDirectMethod(name, signature); + if (method != NULL) { + return method; + } + } + return NULL; +} + +AbstractMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { + for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { + AbstractMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx); + if (method != NULL) { + return method; + } + } + return NULL; +} + +AbstractMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, + const StringPiece& signature) const { + MethodHelper mh; + for (size_t i = 0; i < NumVirtualMethods(); ++i) { + AbstractMethod* method = GetVirtualMethod(i); + mh.ChangeMethod(method); + if (name == mh.GetName() && signature == mh.GetSignature()) { + return method; + } + } + return NULL; +} + +AbstractMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { + if (GetDexCache() == dex_cache) { + for (size_t i = 0; i < NumVirtualMethods(); ++i) { + AbstractMethod* method = GetVirtualMethod(i); + if (method->GetDexMethodIndex() == dex_method_idx) { + return method; + } + } + } + return NULL; +} + +AbstractMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& signature) const { + for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { + AbstractMethod* method = klass->FindDeclaredVirtualMethod(name, signature); + if (method != NULL) { + return method; + } + } + return NULL; +} + +AbstractMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { + for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { + AbstractMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx); + if (method != NULL) { + return method; + } + } + return NULL; +} + +Field* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) { + // Is the field in this class? + // Interfaces are not relevant because they can't contain instance fields. + FieldHelper fh; + for (size_t i = 0; i < NumInstanceFields(); ++i) { + Field* f = GetInstanceField(i); + fh.ChangeField(f); + if (name == fh.GetName() && type == fh.GetTypeDescriptor()) { + return f; + } + } + return NULL; +} + +Field* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) { + if (GetDexCache() == dex_cache) { + for (size_t i = 0; i < NumInstanceFields(); ++i) { + Field* f = GetInstanceField(i); + if (f->GetDexFieldIndex() == dex_field_idx) { + return f; + } + } + } + return NULL; +} + +Field* Class::FindInstanceField(const StringPiece& name, const StringPiece& type) { + // Is the field in this class, or any of its superclasses? + // Interfaces are not relevant because they can't contain instance fields. + for (Class* c = this; c != NULL; c = c->GetSuperClass()) { + Field* f = c->FindDeclaredInstanceField(name, type); + if (f != NULL) { + return f; + } + } + return NULL; +} + +Field* Class::FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) { + // Is the field in this class, or any of its superclasses? + // Interfaces are not relevant because they can't contain instance fields. + for (Class* c = this; c != NULL; c = c->GetSuperClass()) { + Field* f = c->FindDeclaredInstanceField(dex_cache, dex_field_idx); + if (f != NULL) { + return f; + } + } + return NULL; +} + +Field* Class::FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) { + DCHECK(type != NULL); + FieldHelper fh; + for (size_t i = 0; i < NumStaticFields(); ++i) { + Field* f = GetStaticField(i); + fh.ChangeField(f); + if (name == fh.GetName() && type == fh.GetTypeDescriptor()) { + return f; + } + } + return NULL; +} + +Field* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) { + if (dex_cache == GetDexCache()) { + for (size_t i = 0; i < NumStaticFields(); ++i) { + Field* f = GetStaticField(i); + if (f->GetDexFieldIndex() == dex_field_idx) { + return f; + } + } + } + return NULL; +} + +Field* Class::FindStaticField(const StringPiece& name, const StringPiece& type) { + // Is the field in this class (or its interfaces), or any of its + // superclasses (or their interfaces)? + ClassHelper kh; + for (Class* k = this; k != NULL; k = k->GetSuperClass()) { + // Is the field in this class? + Field* f = k->FindDeclaredStaticField(name, type); + if (f != NULL) { + return f; + } + // Is this field in any of this class' interfaces? + kh.ChangeClass(k); + for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) { + Class* interface = kh.GetDirectInterface(i); + f = interface->FindStaticField(name, type); + if (f != NULL) { + return f; + } + } + } + return NULL; +} + +Field* Class::FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) { + ClassHelper kh; + for (Class* k = this; k != NULL; k = k->GetSuperClass()) { + // Is the field in this class? + Field* f = k->FindDeclaredStaticField(dex_cache, dex_field_idx); + if (f != NULL) { + return f; + } + // Is this field in any of this class' interfaces? + kh.ChangeClass(k); + for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) { + Class* interface = kh.GetDirectInterface(i); + f = interface->FindStaticField(dex_cache, dex_field_idx); + if (f != NULL) { + return f; + } + } + } + return NULL; +} + +Field* Class::FindField(const StringPiece& name, const StringPiece& type) { + // Find a field using the JLS field resolution order + ClassHelper kh; + for (Class* k = this; k != NULL; k = k->GetSuperClass()) { + // Is the field in this class? + Field* f = k->FindDeclaredInstanceField(name, type); + if (f != NULL) { + return f; + } + f = k->FindDeclaredStaticField(name, type); + if (f != NULL) { + return f; + } + // Is this field in any of this class' interfaces? + kh.ChangeClass(k); + for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) { + Class* interface = kh.GetDirectInterface(i); + f = interface->FindStaticField(name, type); + if (f != NULL) { + return f; + } + } + } + return NULL; +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/class.h b/src/mirror/class.h new file mode 100644 index 0000000000..843e07cb37 --- /dev/null +++ b/src/mirror/class.h @@ -0,0 +1,866 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_CLASS_H_ +#define ART_SRC_MIRROR_CLASS_H_ + +#include "modifiers.h" +#include "object.h" +#include "primitive.h" + +/* + * A magic value for refOffsets. Ignore the bits and walk the super + * chain when this is the value. + * [This is an unlikely "natural" value, since it would be 30 non-ref instance + * fields followed by 2 ref instance fields.] + */ +#define CLASS_WALK_SUPER ((unsigned int)(3)) +#define CLASS_BITS_PER_WORD (sizeof(unsigned long int) * 8) +#define CLASS_OFFSET_ALIGNMENT 4 +#define CLASS_HIGH_BIT ((unsigned int)1 << (CLASS_BITS_PER_WORD - 1)) +/* + * Given an offset, return the bit number which would encode that offset. + * Local use only. + */ +#define _CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset) \ + ((unsigned int)(byteOffset) / \ + CLASS_OFFSET_ALIGNMENT) +/* + * Is the given offset too large to be encoded? + */ +#define CLASS_CAN_ENCODE_OFFSET(byteOffset) \ + (_CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset) < CLASS_BITS_PER_WORD) +/* + * Return a single bit, encoding the offset. + * Undefined if the offset is too large, as defined above. + */ +#define CLASS_BIT_FROM_OFFSET(byteOffset) \ + (CLASS_HIGH_BIT >> _CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset)) +/* + * Return an offset, given a bit number as returned from CLZ. + */ +#define CLASS_OFFSET_FROM_CLZ(rshift) \ + MemberOffset((static_cast(rshift) * CLASS_OFFSET_ALIGNMENT)) + +namespace art { + +struct ClassClassOffsets; +struct ClassOffsets; +class StringPiece; + +namespace mirror { + +class ClassLoader; +class DexCache; +class Field; +class IfTable; + +// Type for the InitializedStaticStorage table. Currently the Class +// provides the static storage. However, this might change to an Array +// to improve image sharing, so we use this type to avoid assumptions +// on the current storage. +class MANAGED StaticStorageBase : public Object { +}; + +// C++ mirror of java.lang.Class +class MANAGED Class : public StaticStorageBase { + public: + // Class Status + // + // kStatusNotReady: If a Class cannot be found in the class table by + // FindClass, it allocates an new one with AllocClass in the + // kStatusNotReady and calls LoadClass. Note if it does find a + // class, it may not be kStatusResolved and it will try to push it + // forward toward kStatusResolved. + // + // kStatusIdx: LoadClass populates with Class with information from + // the DexFile, moving the status to kStatusIdx, indicating that the + // Class value in super_class_ has not been populated. The new Class + // can then be inserted into the classes table. + // + // kStatusLoaded: After taking a lock on Class, the ClassLinker will + // attempt to move a kStatusIdx class forward to kStatusLoaded by + // using ResolveClass to initialize the super_class_ and ensuring the + // interfaces are resolved. + // + // kStatusResolved: Still holding the lock on Class, the ClassLinker + // shows linking is complete and fields of the Class populated by making + // it kStatusResolved. Java allows circularities of the form where a super + // class has a field that is of the type of the sub class. We need to be able + // to fully resolve super classes while resolving types for fields. + // + // kStatusRetryVerificationAtRuntime: The verifier sets a class to + // this state if it encounters a soft failure at compile time. This + // often happens when there are unresolved classes in other dex + // files, and this status marks a class as needing to be verified + // again at runtime. + // + // TODO: Explain the other states + enum Status { + kStatusError = -1, + kStatusNotReady = 0, + kStatusIdx = 1, // Loaded, DEX idx in super_class_type_idx_ and interfaces_type_idx_. + kStatusLoaded = 2, // DEX idx values resolved. + kStatusResolved = 3, // Part of linking. + kStatusVerifying = 4, // In the process of being verified. + kStatusRetryVerificationAtRuntime = 5, // Compile time verification failed, retry at runtime. + kStatusVerifyingAtRuntime = 6, // Retrying verification at runtime. + kStatusVerified = 7, // Logically part of linking; done pre-init. + kStatusInitializing = 8, // Class init in progress. + kStatusInitialized = 9, // Ready to go. + }; + + Status GetStatus() const { + DCHECK_EQ(sizeof(Status), sizeof(uint32_t)); + return static_cast(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), false)); + } + + void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Returns true if the class has failed to link. + bool IsErroneous() const { + return GetStatus() == kStatusError; + } + + // Returns true if the class has been loaded. + bool IsIdxLoaded() const { + return GetStatus() >= kStatusIdx; + } + + // Returns true if the class has been loaded. + bool IsLoaded() const { + return GetStatus() >= kStatusLoaded; + } + + // Returns true if the class has been linked. + bool IsResolved() const { + return GetStatus() >= kStatusResolved; + } + + // Returns true if the class was compile-time verified. + bool IsCompileTimeVerified() const { + return GetStatus() >= kStatusRetryVerificationAtRuntime; + } + + // Returns true if the class has been verified. + bool IsVerified() const { + return GetStatus() >= kStatusVerified; + } + + // Returns true if the class is initializing. + bool IsInitializing() const { + return GetStatus() >= kStatusInitializing; + } + + // Returns true if the class is initialized. + bool IsInitialized() const { + return GetStatus() == kStatusInitialized; + } + + uint32_t GetAccessFlags() const; + + void SetAccessFlags(uint32_t new_access_flags) { + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags, false); + } + + // Returns true if the class is an interface. + bool IsInterface() const { + return (GetAccessFlags() & kAccInterface) != 0; + } + + // Returns true if the class is declared public. + bool IsPublic() const { + return (GetAccessFlags() & kAccPublic) != 0; + } + + // Returns true if the class is declared final. + bool IsFinal() const { + return (GetAccessFlags() & kAccFinal) != 0; + } + + bool IsFinalizable() const { + return (GetAccessFlags() & kAccClassIsFinalizable) != 0; + } + + void SetFinalizable() { + uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false); + SetAccessFlags(flags | kAccClassIsFinalizable); + } + + // Returns true if the class is abstract. + bool IsAbstract() const { + return (GetAccessFlags() & kAccAbstract) != 0; + } + + // Returns true if the class is an annotation. + bool IsAnnotation() const { + return (GetAccessFlags() & kAccAnnotation) != 0; + } + + // Returns true if the class is synthetic. + bool IsSynthetic() const { + return (GetAccessFlags() & kAccSynthetic) != 0; + } + + bool IsReferenceClass() const { + return (GetAccessFlags() & kAccClassIsReference) != 0; + } + + bool IsWeakReferenceClass() const { + return (GetAccessFlags() & kAccClassIsWeakReference) != 0; + } + + bool IsSoftReferenceClass() const { + return (GetAccessFlags() & kAccReferenceFlagsMask) == kAccClassIsReference; + } + + bool IsFinalizerReferenceClass() const { + return (GetAccessFlags() & kAccClassIsFinalizerReference) != 0; + } + + bool IsPhantomReferenceClass() const { + return (GetAccessFlags() & kAccClassIsPhantomReference) != 0; + } + + + String* GetName() const; // Returns the cached name. + void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets the cached name. + // Computes the name, then sets the cached value. + String* ComputeName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsProxyClass() const { + // Read access flags without using getter as whether something is a proxy can be check in + // any loaded state + // TODO: switch to a check if the super class is java.lang.reflect.Proxy? + uint32_t access_flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false); + return (access_flags & kAccClassIsProxy) != 0; + } + + Primitive::Type GetPrimitiveType() const { + DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t)); + return static_cast( + GetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), false)); + } + + void SetPrimitiveType(Primitive::Type new_type) { + DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t)); + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), new_type, false); + } + + // Returns true if the class is a primitive type. + bool IsPrimitive() const { + return GetPrimitiveType() != Primitive::kPrimNot; + } + + bool IsPrimitiveBoolean() const { + return GetPrimitiveType() == Primitive::kPrimBoolean; + } + + bool IsPrimitiveByte() const { + return GetPrimitiveType() == Primitive::kPrimByte; + } + + bool IsPrimitiveChar() const { + return GetPrimitiveType() == Primitive::kPrimChar; + } + + bool IsPrimitiveShort() const { + return GetPrimitiveType() == Primitive::kPrimShort; + } + + bool IsPrimitiveInt() const { + return GetPrimitiveType() == Primitive::kPrimInt; + } + + bool IsPrimitiveLong() const { + return GetPrimitiveType() == Primitive::kPrimLong; + } + + bool IsPrimitiveFloat() const { + return GetPrimitiveType() == Primitive::kPrimFloat; + } + + bool IsPrimitiveDouble() const { + return GetPrimitiveType() == Primitive::kPrimDouble; + } + + bool IsPrimitiveVoid() const { + return GetPrimitiveType() == Primitive::kPrimVoid; + } + + bool IsPrimitiveArray() const { + return IsArrayClass() && GetComponentType()->IsPrimitive(); + } + + // Depth of class from java.lang.Object + size_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t depth = 0; + for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) { + depth++; + } + return depth; + } + + bool IsArrayClass() const { + return GetComponentType() != NULL; + } + + bool IsClassClass() const; + + bool IsStringClass() const; + + bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsFieldClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsMethodClass() const; + + Class* GetComponentType() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), false); + } + + void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(GetComponentType() == NULL); + DCHECK(new_component_type != NULL); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), new_component_type, false); + } + + size_t GetComponentSize() const { + return Primitive::ComponentSize(GetComponentType()->GetPrimitiveType()); + } + + bool IsObjectClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return !IsPrimitive() && GetSuperClass() == NULL; + } + bool IsInstantiable() const { + return !IsPrimitive() && !IsInterface() && !IsAbstract(); + } + + bool IsObjectArrayClass() const { + return GetComponentType() != NULL && !GetComponentType()->IsPrimitive(); + } + + // Creates a raw object instance but does not invoke the default constructor. + Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsVariableSize() const { + // Classes and arrays vary in size, and so the object_size_ field cannot + // be used to get their instance size + return IsClassClass() || IsArrayClass(); + } + + size_t SizeOf() const { + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false); + } + + size_t GetClassSize() const { + DCHECK_EQ(sizeof(size_t), sizeof(uint32_t)); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false); + } + + void SetClassSize(size_t new_class_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetObjectSize(size_t new_object_size) { + DCHECK(!IsVariableSize()); + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size, false); + } + + // Returns true if this class is in the same packages as that class. + bool IsInSamePackage(const Class* that) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); + + // Returns true if this class can access that class. + bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return that->IsPublic() || this->IsInSamePackage(that); + } + + // Can this class access a member in the provided class with the provided member access flags? + // Note that access to the class isn't checked in case the declaring class is protected and the + // method has been exposed by a public sub-class + bool CanAccessMember(Class* access_to, uint32_t member_flags) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Classes can access all of their own members + if (this == access_to) { + return true; + } + // Public members are trivially accessible + if (member_flags & kAccPublic) { + return true; + } + // Private members are trivially not accessible + if (member_flags & kAccPrivate) { + return false; + } + // Check for protected access from a sub-class, which may or may not be in the same package. + if (member_flags & kAccProtected) { + if (this->IsSubClass(access_to)) { + return true; + } + } + // Allow protected access from other classes in the same package. + return this->IsInSamePackage(access_to); + } + + bool IsSubClass(const Class* klass) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Can src be assigned to this class? For example, String can be assigned to Object (by an + // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing + // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface + // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign + // to themselves. Classes for primitive types may not assign to each other. + bool IsAssignableFrom(const Class* src) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(src != NULL); + if (this == src) { + // Can always assign to things of the same type. + return true; + } else if (IsObjectClass()) { + // Can assign any reference to java.lang.Object. + return !src->IsPrimitive(); + } else if (IsInterface()) { + return src->Implements(this); + } else if (src->IsArrayClass()) { + return IsAssignableFromArray(src); + } else { + return !src->IsInterface() && src->IsSubClass(this); + } + } + + Class* GetSuperClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // super class is assigned once, except during class linker initialization + Class* old_super_class = GetFieldObject( + OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false); + DCHECK(old_super_class == NULL || old_super_class == new_super_class); + DCHECK(new_super_class != NULL); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class, false); + } + + bool HasSuperClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetSuperClass() != NULL; + } + + static MemberOffset SuperClassOffset() { + return MemberOffset(OFFSETOF_MEMBER(Class, super_class_)); + } + + ClassLoader* GetClassLoader() const; + + void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static MemberOffset DexCacheOffset() { + return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_)); + } + + enum { + kDumpClassFullDetail = 1, + kDumpClassClassLoader = (1 << 1), + kDumpClassInitialized = (1 << 2), + }; + + void DumpClass(std::ostream& os, int flags) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + DexCache* GetDexCache() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* GetDirectMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetDirectMethods(ObjectArray* new_direct_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* GetDirectMethod(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetDirectMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Returns the number of static, private, and constructor methods. + size_t NumDirectMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* GetVirtualMethods() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetVirtualMethods(ObjectArray* new_virtual_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Returns the number of non-inherited virtual methods. + size_t NumVirtualMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* GetVirtualMethod(uint32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* GetVirtualMethodDuringLinking(uint32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetVirtualMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* GetVTable() const; + + ObjectArray* GetVTableDuringLinking() const; + + void SetVTable(ObjectArray* new_vtable) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static MemberOffset VTableOffset() { + return OFFSET_OF_OBJECT_MEMBER(Class, vtable_); + } + + // Given a method implemented by this class but potentially from a super class, return the + // specific implementation method for this class. + AbstractMethod* FindVirtualMethodForVirtual(AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Given a method implemented by this class' super class, return the specific implementation + // method for this class. + AbstractMethod* FindVirtualMethodForSuper(AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Given a method implemented by this class, but potentially from a + // super class or interface, return the specific implementation + // method for this class. + AbstractMethod* FindVirtualMethodForInterface(AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindVirtualMethodForVirtualOrInterface(AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + AbstractMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + int32_t GetIfTableCount() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + IfTable* GetIfTable() const; + + void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Get instance fields of the class (See also GetSFields). + ObjectArray* GetIFields() const; + + void SetIFields(ObjectArray* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t NumInstanceFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Field* GetInstanceField(uint32_t i) const // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Returns the number of instance fields containing reference types. + size_t NumReferenceInstanceFields() const { + DCHECK(IsResolved() || IsErroneous()); + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false); + } + + size_t NumReferenceInstanceFieldsDuringLinking() const { + DCHECK(IsLoaded() || IsErroneous()); + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false); + } + + void SetNumReferenceInstanceFields(size_t new_num) { + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num, false); + } + + uint32_t GetReferenceInstanceOffsets() const { + DCHECK(IsResolved() || IsErroneous()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), false); + } + + void SetReferenceInstanceOffsets(uint32_t new_reference_offsets) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Beginning of static field data + static MemberOffset FieldsOffset() { + return OFFSET_OF_OBJECT_MEMBER(Class, fields_); + } + + // Returns the number of static fields containing reference types. + size_t NumReferenceStaticFields() const { + DCHECK(IsResolved() || IsErroneous()); + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false); + } + + size_t NumReferenceStaticFieldsDuringLinking() const { + DCHECK(IsLoaded() || IsErroneous()); + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false); + } + + void SetNumReferenceStaticFields(size_t new_num) { + DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num, false); + } + + // Gets the static fields of the class. + ObjectArray* GetSFields() const; + + void SetSFields(ObjectArray* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t NumStaticFields() const; + + Field* GetStaticField(uint32_t i) const; // TODO: uint16_t + + void SetStaticField(uint32_t i, Field* f); // TODO: uint16_t + + uint32_t GetReferenceStaticOffsets() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_), false); + } + + void SetReferenceStaticOffsets(uint32_t new_reference_offsets); + + // Find a static or instance field using the JLS resolution order + Field* FindField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Finds the given instance field in this class or a superclass. + Field* FindInstanceField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Finds the given instance field in this class or a superclass, only searches classes that + // have the same dex cache. + Field* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Field* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Field* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Finds the given static field in this class or a superclass. + Field* FindStaticField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Finds the given static field in this class or superclass, only searches classes that + // have the same dex cache. + Field* FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Field* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Field* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + pid_t GetClinitThreadId() const { + DCHECK(IsIdxLoaded() || IsErroneous()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), false); + } + + void SetClinitThreadId(pid_t new_clinit_thread_id) { + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id, false); + } + + Class* GetVerifyErrorClass() const { + // DCHECK(IsErroneous()); + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), false); + } + + uint16_t GetDexTypeIndex() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), false); + } + + void SetDexTypeIndex(uint16_t type_idx) { + SetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx, false); + } + + static Class* GetJavaLangClass() { + DCHECK(java_lang_Class_ != NULL); + return java_lang_Class_; + } + + // Can't call this SetClass or else gets called instead of Object::SetClass in places. + static void SetClassClass(Class* java_lang_Class); + static void ResetClass(); + + private: + void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool Implements(const Class* klass) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsArrayAssignableFromArray(const Class* klass) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsAssignableFromArray(const Class* klass) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // defining class loader, or NULL for the "bootstrap" system loader + ClassLoader* class_loader_; + + // For array classes, the component class object for instanceof/checkcast + // (for String[][][], this will be String[][]). NULL for non-array classes. + Class* component_type_; + + // DexCache of resolved constant pool entries (will be NULL for classes generated by the + // runtime such as arrays and primitive classes). + DexCache* dex_cache_; + + // static, private, and methods + ObjectArray* direct_methods_; + + // instance fields + // + // These describe the layout of the contents of an Object. + // Note that only the fields directly declared by this class are + // listed in ifields; fields declared by a superclass are listed in + // the superclass's Class.ifields. + // + // All instance fields that refer to objects are guaranteed to be at + // the beginning of the field list. num_reference_instance_fields_ + // specifies the number of reference fields. + ObjectArray* ifields_; + + // The interface table (iftable_) contains pairs of a interface class and an array of the + // interface methods. There is one pair per interface supported by this class. That means one + // pair for each interface we support directly, indirectly via superclass, or indirectly via a + // superinterface. This will be null if neither we nor our superclass implement any interfaces. + // + // Why we need this: given "class Foo implements Face", declare "Face faceObj = new Foo()". + // Invoke faceObj.blah(), where "blah" is part of the Face interface. We can't easily use a + // single vtable. + // + // For every interface a concrete class implements, we create an array of the concrete vtable_ + // methods for the methods in the interface. + IfTable* iftable_; + + // descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName + String* name_; + + // Static fields + ObjectArray* sfields_; + + // The superclass, or NULL if this is java.lang.Object, an interface or primitive type. + Class* super_class_; + + // If class verify fails, we must return same error on subsequent tries. + Class* verify_error_class_; + + // virtual methods defined in this class; invoked through vtable + ObjectArray* virtual_methods_; + + // Virtual method table (vtable), for use by "invoke-virtual". The vtable from the superclass is + // copied in, and virtual methods from our class either replace those from the super or are + // appended. For abstract classes, methods may be created in the vtable that aren't in + // virtual_ methods_ for miranda methods. + ObjectArray* vtable_; + + // access flags; low 16 bits are defined by VM spec + uint32_t access_flags_; + + // Total size of the Class instance; used when allocating storage on gc heap. + // See also object_size_. + size_t class_size_; + + // tid used to check for recursive invocation + pid_t clinit_thread_id_; + + // type index from dex file + // TODO: really 16bits + uint32_t dex_type_idx_; + + // number of instance fields that are object refs + size_t num_reference_instance_fields_; + + // number of static fields that are object refs + size_t num_reference_static_fields_; + + // Total object size; used when allocating storage on gc heap. + // (For interfaces and abstract classes this will be zero.) + // See also class_size_. + size_t object_size_; + + // primitive type value, or Primitive::kPrimNot (0); set for generated prim classes + Primitive::Type primitive_type_; + + // Bitmap of offsets of ifields. + uint32_t reference_instance_offsets_; + + // Bitmap of offsets of sfields. + uint32_t reference_static_offsets_; + + // state of class initialization + Status status_; + + // TODO: ? + // initiating class loader list + // NOTE: for classes with low serialNumber, these are unused, and the + // values are kept in a table in gDvm. + // InitiatingLoaderList initiating_loader_list_; + + // Location of first static field. + uint32_t fields_[0]; + + // java.lang.Class + static Class* java_lang_Class_; + + friend struct art::ClassOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(Class); +}; + +std::ostream& operator<<(std::ostream& os, const Class::Status& rhs); + +class MANAGED ClassClass : public Class { + private: + int32_t padding_; + int64_t serialVersionUID_; + friend struct art::ClassClassOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(ClassClass); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_CLASS_H_ diff --git a/src/mirror/class_loader.h b/src/mirror/class_loader.h new file mode 100644 index 0000000000..0d635f1d21 --- /dev/null +++ b/src/mirror/class_loader.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_CLASS_LOADER_H_ +#define ART_SRC_CLASS_LOADER_H_ + +#include + +#include "dex_file.h" +#include "mirror/object.h" + +namespace art { + +struct ClassLoaderOffsets; + +namespace mirror { + +// C++ mirror of java.lang.ClassLoader +class MANAGED ClassLoader : public Object { + private: + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". + Object* packages_; + ClassLoader* parent_; + Object* proxyCache_; + + friend struct art::ClassLoaderOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_CLASS_LOADER_H_ diff --git a/src/mirror/dex_cache.cc b/src/mirror/dex_cache.cc new file mode 100644 index 0000000000..3009786baa --- /dev/null +++ b/src/mirror/dex_cache.cc @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_cache.h" + +#include "abstract_method-inl.h" +#include "base/logging.h" +#include "class_linker.h" +#include "heap.h" +#include "gc/card_table-inl.h" +#include "globals.h" +#include "object.h" +#include "object-inl.h" +#include "object_array-inl.h" +#include "runtime.h" +#include "string.h" + +namespace art { +namespace mirror { + +void DexCache::Init(const DexFile* dex_file, + String* location, + ObjectArray* strings, + ObjectArray* resolved_types, + ObjectArray* resolved_methods, + ObjectArray* resolved_fields, + ObjectArray* initialized_static_storage) { + CHECK(dex_file != NULL); + CHECK(location != NULL); + CHECK(strings != NULL); + CHECK(resolved_types != NULL); + CHECK(resolved_methods != NULL); + CHECK(resolved_fields != NULL); + CHECK(initialized_static_storage != NULL); + + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location, false); + SetFieldObject(StringsOffset(), strings, false); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types, false); + SetFieldObject(ResolvedMethodsOffset(), resolved_methods, false); + SetFieldObject(ResolvedFieldsOffset(), resolved_fields, false); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, initialized_static_storage_), + initialized_static_storage, false); + + Runtime* runtime = Runtime::Current(); + if (runtime->HasResolutionMethod()) { + // Initialize the resolve methods array to contain trampolines for resolution. + AbstractMethod* trampoline = runtime->GetResolutionMethod(); + size_t length = resolved_methods->GetLength(); + for (size_t i = 0; i < length; i++) { + resolved_methods->SetWithoutChecks(i, trampoline); + } + } +} + +void DexCache::Fixup(AbstractMethod* trampoline) { + // Fixup the resolve methods array to contain trampoline for resolution. + CHECK(trampoline != NULL); + ObjectArray* resolved_methods = GetResolvedMethods(); + size_t length = resolved_methods->GetLength(); + for (size_t i = 0; i < length; i++) { + if (resolved_methods->GetWithoutChecks(i) == NULL) { + resolved_methods->SetWithoutChecks(i, trampoline); + } + } +} + +AbstractMethod* DexCache::GetResolvedMethod(uint32_t method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + AbstractMethod* method = GetResolvedMethods()->Get(method_idx); + // Hide resolution trampoline methods from the caller + if (method != NULL && method->IsRuntimeMethod()) { + DCHECK(method == Runtime::Current()->GetResolutionMethod()); + return NULL; + } else { + return method; + } +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/dex_cache.h b/src/mirror/dex_cache.h new file mode 100644 index 0000000000..307588b581 --- /dev/null +++ b/src/mirror/dex_cache.h @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_DEX_CACHE_H_ +#define ART_SRC_MIRROR_DEX_CACHE_H_ + +#include "abstract_method.h" +#include "class.h" +#include "object.h" +#include "object_array.h" +#include "string.h" + +namespace art { + +struct DexCacheOffsets; +class DexFile; +class ImageWriter; +union JValue; + +namespace mirror { + +class Class; +class Field; + +class MANAGED DexCacheClass : public Class { + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(DexCacheClass); +}; + +class MANAGED DexCache : public Object { + public: + void Init(const DexFile* dex_file, + String* location, + ObjectArray* strings, + ObjectArray* types, + ObjectArray* methods, + ObjectArray* fields, + ObjectArray* initialized_static_storage) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Fixup(AbstractMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + String* GetLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), false); + } + + static MemberOffset StringsOffset() { + return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_); + } + + static MemberOffset ResolvedFieldsOffset() { + return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_); + } + + static MemberOffset ResolvedMethodsOffset() { + return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_); + } + + size_t NumStrings() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetStrings()->GetLength(); + } + + size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetResolvedTypes()->GetLength(); + } + + size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetResolvedMethods()->GetLength(); + } + + size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetResolvedFields()->GetLength(); + } + + size_t NumInitializedStaticStorage() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetInitializedStaticStorage()->GetLength(); + } + + String* GetResolvedString(uint32_t string_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetStrings()->Get(string_idx); + } + + void SetResolvedString(uint32_t string_idx, String* resolved) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + GetStrings()->Set(string_idx, resolved); + } + + Class* GetResolvedType(uint32_t type_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetResolvedTypes()->Get(type_idx); + } + + void SetResolvedType(uint32_t type_idx, Class* resolved) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + GetResolvedTypes()->Set(type_idx, resolved); + } + + AbstractMethod* GetResolvedMethod(uint32_t method_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetResolvedMethod(uint32_t method_idx, AbstractMethod* resolved) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + GetResolvedMethods()->Set(method_idx, resolved); + } + + Field* GetResolvedField(uint32_t field_idx) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetResolvedFields()->Get(field_idx); + } + + void SetResolvedField(uint32_t field_idx, Field* resolved) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + GetResolvedFields()->Set(field_idx, resolved); + } + + ObjectArray* GetStrings() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject< ObjectArray* >(StringsOffset(), false); + } + + ObjectArray* GetResolvedTypes() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject< ObjectArray* >( + OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), false); + } + + ObjectArray* GetResolvedMethods() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject< ObjectArray* >(ResolvedMethodsOffset(), false); + } + + ObjectArray* GetResolvedFields() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject< ObjectArray* >(ResolvedFieldsOffset(), false); + } + + ObjectArray* GetInitializedStaticStorage() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject< ObjectArray* >( + OFFSET_OF_OBJECT_MEMBER(DexCache, initialized_static_storage_), false); + } + + const DexFile* GetDexFile() const { + return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), false); + } + + void SetDexFile(const DexFile* dex_file) { + return SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false); + } + + private: + ObjectArray* initialized_static_storage_; + String* location_; + ObjectArray* resolved_fields_; + ObjectArray* resolved_methods_; + ObjectArray* resolved_types_; + ObjectArray* strings_; + uint32_t dex_file_; + + friend struct art::DexCacheOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_DEX_CACHE_H_ diff --git a/src/mirror/dex_cache_test.cc b/src/mirror/dex_cache_test.cc new file mode 100644 index 0000000000..98176602a1 --- /dev/null +++ b/src/mirror/dex_cache_test.cc @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "class_linker.h" +#include "common_test.h" +#include "dex_cache.h" +#include "heap.h" +#include "mirror/object_array-inl.h" +#include "sirt_ref.h" + +#include + +namespace art { +namespace mirror { + +class DexCacheTest : public CommonTest {}; + +TEST_F(DexCacheTest, Open) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef dex_cache(soa.Self(), class_linker_->AllocDexCache(soa.Self(), + *java_lang_dex_file_)); + ASSERT_TRUE(dex_cache.get() != NULL); + + EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings()); + EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes()); + EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods()); + EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields()); + EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumInitializedStaticStorage()); + + EXPECT_LE(0, dex_cache->GetStrings()->GetLength()); + EXPECT_LE(0, dex_cache->GetResolvedTypes()->GetLength()); + EXPECT_LE(0, dex_cache->GetResolvedMethods()->GetLength()); + EXPECT_LE(0, dex_cache->GetResolvedFields()->GetLength()); + EXPECT_LE(0, dex_cache->GetInitializedStaticStorage()->GetLength()); + + EXPECT_EQ(java_lang_dex_file_->NumStringIds(), + static_cast(dex_cache->GetStrings()->GetLength())); + EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), + static_cast(dex_cache->GetResolvedTypes()->GetLength())); + EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), + static_cast(dex_cache->GetResolvedMethods()->GetLength())); + EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), + static_cast(dex_cache->GetResolvedFields()->GetLength())); + EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), + static_cast(dex_cache->GetInitializedStaticStorage()->GetLength())); +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/field-inl.h b/src/mirror/field-inl.h new file mode 100644 index 0000000000..b73cf19214 --- /dev/null +++ b/src/mirror/field-inl.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_FIELD_INL_H_ +#define ART_SRC_MIRROR_FIELD_INL_H_ + +#include "field.h" + +namespace art { +namespace mirror { + +inline Class* Field::GetDeclaringClass() const { + Class* result = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), false); + DCHECK(result != NULL); + DCHECK(result->IsLoaded() || result->IsErroneous()); + return result; +} + +inline void Field::SetDeclaringClass(Class *new_declaring_class) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), new_declaring_class, false); +} + +inline uint32_t Field::GetAccessFlags() const { + DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); + return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), false); +} + +inline MemberOffset Field::GetOffset() const { + DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous()); + return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), false)); +} + +inline MemberOffset Field::GetOffsetDuringLinking() const { + DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); + return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), false)); +} + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_FIELD_INL_H_ diff --git a/src/mirror/field.cc b/src/mirror/field.cc new file mode 100644 index 0000000000..dab7868ae6 --- /dev/null +++ b/src/mirror/field.cc @@ -0,0 +1,223 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "field.h" + +#include "field-inl.h" +#include "gc/card_table-inl.h" +#include "object-inl.h" +#include "object_utils.h" +#include "runtime.h" +#include "utils.h" + +namespace art { +namespace mirror { + +// TODO: get global references for these +Class* Field::java_lang_reflect_Field_ = NULL; + +void Field::SetClass(Class* java_lang_reflect_Field) { + CHECK(java_lang_reflect_Field_ == NULL); + CHECK(java_lang_reflect_Field != NULL); + java_lang_reflect_Field_ = java_lang_reflect_Field; +} + +void Field::ResetClass() { + CHECK(java_lang_reflect_Field_ != NULL); + java_lang_reflect_Field_ = NULL; +} + +void Field::SetOffset(MemberOffset num_bytes) { + DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); +#if 0 // TODO enable later in boot and under !NDEBUG + FieldHelper fh(this); + Primitive::Type type = fh.GetTypeAsPrimitiveType(); + if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) { + DCHECK_ALIGNED(num_bytes.Uint32Value(), 8); + } +#endif + SetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), num_bytes.Uint32Value(), false); +} + +uint32_t Field::Get32(const Object* object) const { + DCHECK(object != NULL) << PrettyField(this); + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); + return object->GetField32(GetOffset(), IsVolatile()); +} + +void Field::Set32(Object* object, uint32_t new_value) const { + DCHECK(object != NULL) << PrettyField(this); + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); + object->SetField32(GetOffset(), new_value, IsVolatile()); +} + +uint64_t Field::Get64(const Object* object) const { + DCHECK(object != NULL) << PrettyField(this); + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); + return object->GetField64(GetOffset(), IsVolatile()); +} + +void Field::Set64(Object* object, uint64_t new_value) const { + DCHECK(object != NULL) << PrettyField(this); + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); + object->SetField64(GetOffset(), new_value, IsVolatile()); +} + +Object* Field::GetObj(const Object* object) const { + DCHECK(object != NULL) << PrettyField(this); + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); + return object->GetFieldObject(GetOffset(), IsVolatile()); +} + +void Field::SetObj(Object* object, const Object* new_value) const { + DCHECK(object != NULL) << PrettyField(this); + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); + object->SetFieldObject(GetOffset(), new_value, IsVolatile()); +} + +bool Field::GetBoolean(const Object* object) const { + DCHECK_EQ(Primitive::kPrimBoolean, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + return Get32(object); +} + +void Field::SetBoolean(Object* object, bool z) const { + DCHECK_EQ(Primitive::kPrimBoolean, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Set32(object, z); +} + +int8_t Field::GetByte(const Object* object) const { + DCHECK_EQ(Primitive::kPrimByte, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + return Get32(object); +} + +void Field::SetByte(Object* object, int8_t b) const { + DCHECK_EQ(Primitive::kPrimByte, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Set32(object, b); +} + +uint16_t Field::GetChar(const Object* object) const { + DCHECK_EQ(Primitive::kPrimChar, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + return Get32(object); +} + +void Field::SetChar(Object* object, uint16_t c) const { + DCHECK_EQ(Primitive::kPrimChar, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Set32(object, c); +} + +int16_t Field::GetShort(const Object* object) const { + DCHECK_EQ(Primitive::kPrimShort, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + return Get32(object); +} + +void Field::SetShort(Object* object, int16_t s) const { + DCHECK_EQ(Primitive::kPrimShort, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Set32(object, s); +} + +int32_t Field::GetInt(const Object* object) const { +#ifndef NDEBUG + Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); + CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this); +#endif + return Get32(object); +} + +void Field::SetInt(Object* object, int32_t i) const { +#ifndef NDEBUG + Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); + CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this); +#endif + Set32(object, i); +} + +int64_t Field::GetLong(const Object* object) const { +#ifndef NDEBUG + Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); + CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this); +#endif + return Get64(object); +} + +void Field::SetLong(Object* object, int64_t j) const { +#ifndef NDEBUG + Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); + CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this); +#endif + Set64(object, j); +} + +union Bits { + jdouble d; + jfloat f; + jint i; + jlong j; +}; + +float Field::GetFloat(const Object* object) const { + DCHECK_EQ(Primitive::kPrimFloat, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Bits bits; + bits.i = Get32(object); + return bits.f; +} + +void Field::SetFloat(Object* object, float f) const { + DCHECK_EQ(Primitive::kPrimFloat, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Bits bits; + bits.f = f; + Set32(object, bits.i); +} + +double Field::GetDouble(const Object* object) const { + DCHECK_EQ(Primitive::kPrimDouble, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Bits bits; + bits.j = Get64(object); + return bits.d; +} + +void Field::SetDouble(Object* object, double d) const { + DCHECK_EQ(Primitive::kPrimDouble, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + Bits bits; + bits.d = d; + Set64(object, bits.j); +} + +Object* Field::GetObject(const Object* object) const { + DCHECK_EQ(Primitive::kPrimNot, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + return GetObj(object); +} + +void Field::SetObject(Object* object, const Object* l) const { + DCHECK_EQ(Primitive::kPrimNot, FieldHelper(this).GetTypeAsPrimitiveType()) + << PrettyField(this); + SetObj(object, l); +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/field.h b/src/mirror/field.h new file mode 100644 index 0000000000..4e7abe8550 --- /dev/null +++ b/src/mirror/field.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_FIELD_H_ +#define ART_SRC_MIRROR_FIELD_H_ + +#include "class.h" +#include "modifiers.h" +#include "object.h" + +namespace art { + +struct FieldClassOffsets; +struct FieldOffsets; + +namespace mirror { + +// C++ mirror of java.lang.reflect.Field +class MANAGED Field : public Object { + public: + Class* GetDeclaringClass() const; + + void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + uint32_t GetAccessFlags() const; + + void SetAccessFlags(uint32_t new_access_flags) { + SetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), new_access_flags, false); + } + + bool IsPublic() const { + return (GetAccessFlags() & kAccPublic) != 0; + } + + bool IsStatic() const { + return (GetAccessFlags() & kAccStatic) != 0; + } + + bool IsFinal() const { + return (GetAccessFlags() & kAccFinal) != 0; + } + + uint32_t GetDexFieldIndex() const { + return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, field_dex_idx_), false); + } + + void SetDexFieldIndex(uint32_t new_idx) { + SetField32(OFFSET_OF_OBJECT_MEMBER(Field, field_dex_idx_), new_idx, false); + } + + // Offset to field within an Object + MemberOffset GetOffset() const; + + static MemberOffset OffsetOffset() { + return MemberOffset(OFFSETOF_MEMBER(Field, offset_)); + } + + MemberOffset GetOffsetDuringLinking() const; + + void SetOffset(MemberOffset num_bytes); + + // field access, null object for static fields + bool GetBoolean(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetBoolean(Object* object, bool z) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int8_t GetByte(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetByte(Object* object, int8_t b) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetChar(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetChar(Object* object, uint16_t c) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int16_t GetShort(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetShort(Object* object, int16_t s) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetInt(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetInt(Object* object, int32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int64_t GetLong(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetLong(Object* object, int64_t j) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + float GetFloat(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetFloat(Object* object, float f) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + double GetDouble(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDouble(Object* object, double d) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* GetObject(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetObject(Object* object, const Object* l) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // raw field accesses + uint32_t Get32(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Set32(Object* object, uint32_t new_value) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint64_t Get64(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Set64(Object* object, uint64_t new_value) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* GetObj(const Object* object) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetObj(Object* object, const Object* new_value) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static Class* GetJavaLangReflectField() { + DCHECK(java_lang_reflect_Field_ != NULL); + return java_lang_reflect_Field_; + } + + static void SetClass(Class* java_lang_reflect_Field); + static void ResetClass(); + + bool IsVolatile() const { + return (GetAccessFlags() & kAccVolatile) != 0; + } + + private: + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". + // The class we are a part of + Class* declaring_class_; + + uint32_t access_flags_; + + // Dex cache index of field id + uint32_t field_dex_idx_; + + // Offset of field within an instance or in the Class' static fields + uint32_t offset_; + + static Class* java_lang_reflect_Field_; + + friend struct art::FieldOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(Field); +}; + +class MANAGED FieldClass : public Class { + private: + Object* ORDER_BY_NAME_AND_DECLARING_CLASS_; + friend struct art::FieldClassOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(FieldClass); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_FIELD_H_ diff --git a/src/mirror/iftable-inl.h b/src/mirror/iftable-inl.h new file mode 100644 index 0000000000..72803b8002 --- /dev/null +++ b/src/mirror/iftable-inl.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_IFTABLE_INL_H_ +#define ART_SRC_MIRROR_IFTABLE_INL_H_ + +#include "iftable.h" + +namespace art { +namespace mirror { + +inline void IfTable::SetInterface(int32_t i, Class* interface) { + DCHECK(interface != NULL); + DCHECK(interface->IsInterface()); + DCHECK(Get((i * kMax) + kInterface) == NULL); + Set((i * kMax) + kInterface, interface); +} + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_IFTABLE_INL_H_ diff --git a/src/mirror/iftable.h b/src/mirror/iftable.h new file mode 100644 index 0000000000..ffb2e51582 --- /dev/null +++ b/src/mirror/iftable.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_IFTABLE_H_ +#define ART_SRC_MIRROR_IFTABLE_H_ + +#include "object_array.h" + +namespace art { +namespace mirror { + +class MANAGED IfTable : public ObjectArray { + public: + Class* GetInterface(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Class* interface = Get((i * kMax) + kInterface)->AsClass(); + DCHECK(interface != NULL); + return interface; + } + + void SetInterface(int32_t i, Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* GetMethodArray(int32_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray* method_array = + down_cast*>(Get((i * kMax) + kMethodArray)); + DCHECK(method_array != NULL); + return method_array; + } + + size_t GetMethodArrayCount(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray* method_array = + down_cast*>(Get((i * kMax) + kMethodArray)); + if (method_array == NULL) { + return 0; + } + return method_array->GetLength(); + } + + void SetMethodArray(int32_t i, ObjectArray* new_ma) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(new_ma != NULL); + DCHECK(Get((i * kMax) + kMethodArray) == NULL); + Set((i * kMax) + kMethodArray, new_ma); + } + + size_t Count() const { + return GetLength() / kMax; + } + + enum { + // Points to the interface class. + kInterface = 0, + // Method pointers into the vtable, allow fast map from interface method index to concrete + // instance method. + kMethodArray = 1, + kMax = 2, + }; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(IfTable); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_IFTABLE_H_ diff --git a/src/mirror/object-inl.h b/src/mirror/object-inl.h new file mode 100644 index 0000000000..723192dc8a --- /dev/null +++ b/src/mirror/object-inl.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_OBJECT_INL_H_ +#define ART_SRC_MIRROR_OBJECT_INL_H_ + +#include "object.h" + +#include "abstract_method.h" +#include "atomic.h" +#include "array.h" +#include "field.h" +#include "class.h" +#include "runtime.h" + +namespace art { +namespace mirror { + +inline Class* Object::GetClass() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false); +} + +inline void Object::SetClass(Class* new_klass) { + // new_klass may be NULL prior to class linker initialization + // We don't mark the card since the class is guaranteed to be referenced from another location. + // Proxy classes are held live by the class loader, and other classes are roots of the class + // linker. + SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false, false); +} + +inline bool Object::InstanceOf(const Class* klass) const { + DCHECK(klass != NULL); + DCHECK(GetClass() != NULL); + return klass->IsAssignableFrom(GetClass()); +} + +inline bool Object::IsClass() const { + Class* java_lang_Class = GetClass()->GetClass(); + return GetClass() == java_lang_Class; +} + +inline Class* Object::AsClass() { + DCHECK(IsClass()); + return down_cast(this); +} + +inline const Class* Object::AsClass() const { + DCHECK(IsClass()); + return down_cast(this); +} + +inline bool Object::IsObjectArray() const { + return IsArrayInstance() && !GetClass()->GetComponentType()->IsPrimitive(); +} + +template +inline ObjectArray* Object::AsObjectArray() { + DCHECK(IsObjectArray()); + return down_cast*>(this); +} + +template +inline const ObjectArray* Object::AsObjectArray() const { + DCHECK(IsObjectArray()); + return down_cast*>(this); +} + +inline bool Object::IsArrayInstance() const { + return GetClass()->IsArrayClass(); +} + +inline bool Object::IsField() const { + return GetClass()->IsFieldClass(); +} + +inline Field* Object::AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(IsField()); + return down_cast(this); +} + +inline const Field* Object::AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(IsField()); + return down_cast(this); +} + +inline bool Object::IsMethod() const { + return GetClass()->IsMethodClass(); +} + +inline AbstractMethod* Object::AsMethod() { + DCHECK(IsMethod()); + return down_cast(this); +} + +inline const AbstractMethod* Object::AsMethod() const { + DCHECK(IsMethod()); + return down_cast(this); +} + +inline bool Object::IsReferenceInstance() const { + return GetClass()->IsReferenceClass(); +} + +inline bool Object::IsWeakReferenceInstance() const { + return GetClass()->IsWeakReferenceClass(); +} + +inline bool Object::IsSoftReferenceInstance() const { + return GetClass()->IsSoftReferenceClass(); +} + +inline bool Object::IsFinalizerReferenceInstance() const { + return GetClass()->IsFinalizerReferenceClass(); +} + +inline bool Object::IsPhantomReferenceInstance() const { + return GetClass()->IsPhantomReferenceClass(); +} + +inline size_t Object::SizeOf() const { + size_t result; + if (IsArrayInstance()) { + result = AsArray()->SizeOf(); + } else if (IsClass()) { + result = AsClass()->SizeOf(); + } else { + result = GetClass()->GetObjectSize(); + } + DCHECK(!IsField() || result == sizeof(Field)); + DCHECK(!IsMethod() || result == sizeof(AbstractMethod)); + return result; +} + +inline uint64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) const { + VerifyObject(this); + const byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); + const int64_t* addr = reinterpret_cast(raw_addr); + if (UNLIKELY(is_volatile)) { + uint64_t result = QuasiAtomic::Read64(addr); + ANDROID_MEMBAR_FULL(); + return result; + } else { + return *addr; + } +} + +inline void Object::SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile) { + VerifyObject(this); + byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); + int64_t* addr = reinterpret_cast(raw_addr); + if (UNLIKELY(is_volatile)) { + ANDROID_MEMBAR_STORE(); + QuasiAtomic::Write64(addr, new_value); + // Post-store barrier not required due to use of atomic op or mutex. + } else { + *addr = new_value; + } +} + +inline void Object::WriteBarrierField(const Object* dst, MemberOffset field_offset, + const Object* new_value) { + Runtime::Current()->GetHeap()->WriteBarrierField(dst, field_offset, new_value); +} + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_OBJECT_INL_H_ diff --git a/src/mirror/object.cc b/src/mirror/object.cc new file mode 100644 index 0000000000..27a42d3334 --- /dev/null +++ b/src/mirror/object.cc @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "object.h" + +#include "array-inl.h" +#include "class.h" +#include "class-inl.h" +#include "field.h" +#include "field-inl.h" +#include "gc/card_table-inl.h" +#include "heap.h" +#include "monitor.h" +#include "object-inl.h" +#include "object_array.h" +#include "object_utils.h" +#include "runtime.h" +#include "sirt_ref.h" +#include "throwable.h" +#include "well_known_classes.h" + +namespace art { +namespace mirror { + +Array* Object::AsArray() { + DCHECK(IsArrayInstance()); + return down_cast(this); +} + +const Array* Object::AsArray() const { + DCHECK(IsArrayInstance()); + return down_cast(this); +} + +BooleanArray* Object::AsBooleanArray() { + DCHECK(GetClass()->IsArrayClass()); + DCHECK(GetClass()->GetComponentType()->IsPrimitiveBoolean()); + return down_cast(this); +} + +ByteArray* Object::AsByteArray() { + DCHECK(GetClass()->IsArrayClass()); + DCHECK(GetClass()->GetComponentType()->IsPrimitiveByte()); + return down_cast(this); +} + +CharArray* Object::AsCharArray() { + DCHECK(GetClass()->IsArrayClass()); + DCHECK(GetClass()->GetComponentType()->IsPrimitiveChar()); + return down_cast(this); +} + +ShortArray* Object::AsShortArray() { + DCHECK(GetClass()->IsArrayClass()); + DCHECK(GetClass()->GetComponentType()->IsPrimitiveShort()); + return down_cast(this); +} + +IntArray* Object::AsIntArray() { + DCHECK(GetClass()->IsArrayClass()); + DCHECK(GetClass()->GetComponentType()->IsPrimitiveInt() || + GetClass()->GetComponentType()->IsPrimitiveFloat()); + return down_cast(this); +} + +LongArray* Object::AsLongArray() { + DCHECK(GetClass()->IsArrayClass()); + DCHECK(GetClass()->GetComponentType()->IsPrimitiveLong() || + GetClass()->GetComponentType()->IsPrimitiveDouble()); + return down_cast(this); +} + +String* Object::AsString() { + DCHECK(GetClass()->IsStringClass()); + return down_cast(this); +} + +Throwable* Object::AsThrowable() { + DCHECK(GetClass()->IsThrowableClass()); + return down_cast(this); +} + +Object* Object::Clone(Thread* self) { + Class* c = GetClass(); + DCHECK(!c->IsClassClass()); + + // Object::SizeOf gets the right size even if we're an array. + // Using c->AllocObject() here would be wrong. + size_t num_bytes = SizeOf(); + Heap* heap = Runtime::Current()->GetHeap(); + SirtRef copy(self, heap->AllocObject(self, c, num_bytes)); + if (copy.get() == NULL) { + return NULL; + } + + // Copy instance data. We assume memcpy copies by words. + // TODO: expose and use move32. + byte* src_bytes = reinterpret_cast(this); + byte* dst_bytes = reinterpret_cast(copy.get()); + size_t offset = sizeof(Object); + memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset); + + // Perform write barriers on copied object references. + if (c->IsArrayClass()) { + if (!c->GetComponentType()->IsPrimitive()) { + const ObjectArray* array = copy->AsObjectArray(); + heap->WriteBarrierArray(copy.get(), 0, array->GetLength()); + } + } else { + for (const Class* klass = c; klass != NULL; klass = klass->GetSuperClass()) { + size_t num_reference_fields = klass->NumReferenceInstanceFields(); + for (size_t i = 0; i < num_reference_fields; ++i) { + Field* field = klass->GetInstanceField(i); + MemberOffset field_offset = field->GetOffset(); + const Object* ref = copy->GetFieldObject(field_offset, false); + heap->WriteBarrierField(copy.get(), field_offset, ref); + } + } + } + + if (c->IsFinalizable()) { + heap->AddFinalizerReference(Thread::Current(), copy.get()); + } + + return copy.get(); +} + +uint32_t Object::GetThinLockId() { + return Monitor::GetThinLockId(monitor_); +} + +void Object::MonitorEnter(Thread* thread) { + Monitor::MonitorEnter(thread, this); +} + +bool Object::MonitorExit(Thread* thread) { + return Monitor::MonitorExit(thread, this); +} + +void Object::Notify() { + Monitor::Notify(Thread::Current(), this); +} + +void Object::NotifyAll() { + Monitor::NotifyAll(Thread::Current(), this); +} + +void Object::Wait() { + Monitor::Wait(Thread::Current(), this, 0, 0, true, kWaiting); +} + +void Object::Wait(int64_t ms, int32_t ns) { + Monitor::Wait(Thread::Current(), this, ms, ns, true, kTimedWaiting); +} + +#if VERIFY_OBJECT_ENABLED +void Object::CheckFieldAssignment(MemberOffset field_offset, const Object* new_value) { + const Class* c = GetClass(); + if (Runtime::Current()->GetClassLinker() == NULL || + !Runtime::Current()->GetHeap()->IsObjectValidationEnabled() || + !c->IsResolved()) { + return; + } + for (const Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) { + ObjectArray* fields = cur->GetIFields(); + if (fields != NULL) { + size_t num_ref_ifields = cur->NumReferenceInstanceFields(); + for (size_t i = 0; i < num_ref_ifields; ++i) { + Field* field = fields->Get(i); + if (field->GetOffset().Int32Value() == field_offset.Int32Value()) { + FieldHelper fh(field); + CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass())); + return; + } + } + } + } + if (c->IsArrayClass()) { + // Bounds and assign-ability done in the array setter. + return; + } + if (IsClass()) { + ObjectArray* fields = AsClass()->GetSFields(); + if (fields != NULL) { + size_t num_ref_sfields = AsClass()->NumReferenceStaticFields(); + for (size_t i = 0; i < num_ref_sfields; ++i) { + Field* field = fields->Get(i); + if (field->GetOffset().Int32Value() == field_offset.Int32Value()) { + FieldHelper fh(field); + CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass())); + return; + } + } + } + } + LOG(FATAL) << "Failed to find field for assignment to " << reinterpret_cast(this) + << " of type " << PrettyDescriptor(c) << " at offset " << field_offset; +} +#endif + +} // namespace mirror +} // namespace art diff --git a/src/mirror/object.h b/src/mirror/object.h new file mode 100644 index 0000000000..e2cedd860b --- /dev/null +++ b/src/mirror/object.h @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_OBJECT_H_ +#define ART_SRC_MIRROR_OBJECT_H_ + +#include "base/casts.h" +#include "base/logging.h" +#include "base/macros.h" +#include "cutils/atomic-inline.h" +#include "offsets.h" + +namespace art { + +class ImageWriter; +struct ObjectOffsets; +class Thread; + +namespace mirror { + +class AbstractMethod; +class Array; +class Class; +class Field; +template class ObjectArray; +template class PrimitiveArray; +typedef PrimitiveArray BooleanArray; +typedef PrimitiveArray ByteArray; +typedef PrimitiveArray CharArray; +typedef PrimitiveArray DoubleArray; +typedef PrimitiveArray FloatArray; +typedef PrimitiveArray IntArray; +typedef PrimitiveArray LongArray; +typedef PrimitiveArray ShortArray; +class String; +class Throwable; + +// Classes shared with the managed side of the world need to be packed so that they don't have +// extra platform specific padding. +#define MANAGED PACKED(4) + +// Fields within mirror objects aren't accessed directly so that the appropriate amount of +// handshaking is done with GC (for example, read and write barriers). This macro is used to +// compute an offset for the Set/Get methods defined in Object that can safely access fields. +#define OFFSET_OF_OBJECT_MEMBER(type, field) \ + MemberOffset(OFFSETOF_MEMBER(type, field)) + +// C++ mirror of java.lang.Object +class MANAGED Object { + public: + static MemberOffset ClassOffset() { + return OFFSET_OF_OBJECT_MEMBER(Object, klass_); + } + + Class* GetClass() const; + + void SetClass(Class* new_klass); + + bool InstanceOf(const Class* klass) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + int32_t IdentityHashCode() const { + #ifdef MOVING_GARBAGE_COLLECTOR + // TODO: we'll need to use the Object's internal concept of identity + UNIMPLEMENTED(FATAL); + #endif + return reinterpret_cast(this); + } + + static MemberOffset MonitorOffset() { + return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); + } + + volatile int32_t* GetRawLockWordAddress() { + byte* raw_addr = reinterpret_cast(this) + + OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value(); + int32_t* word_addr = reinterpret_cast(raw_addr); + return const_cast(word_addr); + } + + uint32_t GetThinLockId(); + + void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); + + bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_); + + void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Wait() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Wait(int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsClass() const; + + Class* AsClass(); + + const Class* AsClass() const; + + bool IsObjectArray() const; + + template + ObjectArray* AsObjectArray(); + + template + const ObjectArray* AsObjectArray() const; + + bool IsArrayInstance() const; + + Array* AsArray(); + + const Array* AsArray() const; + + BooleanArray* AsBooleanArray(); + ByteArray* AsByteArray(); + CharArray* AsCharArray(); + ShortArray* AsShortArray(); + IntArray* AsIntArray(); + LongArray* AsLongArray(); + + String* AsString(); + + Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsMethod() const; + + AbstractMethod* AsMethod(); + + const AbstractMethod* AsMethod() const; + + bool IsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + Field* AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + const Field* AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsReferenceInstance() const; + + bool IsWeakReferenceInstance() const; + + bool IsSoftReferenceInstance() const; + + bool IsFinalizerReferenceInstance() const; + + bool IsPhantomReferenceInstance() const; + + // Accessors for Java type fields + template + T GetFieldObject(MemberOffset field_offset, bool is_volatile) const { + T result = reinterpret_cast(GetField32(field_offset, is_volatile)); + VerifyObject(result); + return result; + } + + void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile, + bool this_is_valid = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + VerifyObject(new_value); + SetField32(field_offset, reinterpret_cast(new_value), is_volatile, this_is_valid); + if (new_value != NULL) { + CheckFieldAssignment(field_offset, new_value); + WriteBarrierField(this, field_offset, new_value); + } + } + + uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const { + VerifyObject(this); + const byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); + const int32_t* word_addr = reinterpret_cast(raw_addr); + if (UNLIKELY(is_volatile)) { + return android_atomic_acquire_load(word_addr); + } else { + return *word_addr; + } + } + + void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile, + bool this_is_valid = true) { + if (this_is_valid) { + VerifyObject(this); + } + byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); + uint32_t* word_addr = reinterpret_cast(raw_addr); + if (UNLIKELY(is_volatile)) { + /* + * TODO: add an android_atomic_synchronization_store() function and + * use it in the 32-bit volatile set handlers. On some platforms we + * can use a fast atomic instruction and avoid the barriers. + */ + ANDROID_MEMBAR_STORE(); + *word_addr = new_value; + ANDROID_MEMBAR_FULL(); + } else { + *word_addr = new_value; + } + } + + uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const; + + void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile); + + protected: + // Accessors for non-Java type fields + template + T GetFieldPtr(MemberOffset field_offset, bool is_volatile) const { + return reinterpret_cast(GetField32(field_offset, is_volatile)); + } + + template + void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile, bool this_is_valid = true) { + SetField32(field_offset, reinterpret_cast(new_value), is_volatile, this_is_valid); + } + + private: +#if VERIFY_OBJECT_ENABLED + static void VerifyObject(const Object* obj); + void CheckFieldAssignment(MemberOffset field_offset, const Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +#else + static void VerifyObject(const Object*) {} + void CheckFieldAssignment(MemberOffset, const Object*) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {} +#endif + + // Write barrier called post update to a reference bearing field. + static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value); + + Class* klass_; + + uint32_t monitor_; + + friend class art::ImageWriter; + friend struct art::ObjectOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(Object); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_OBJECT_H_ diff --git a/src/mirror/object_array-inl.h b/src/mirror/object_array-inl.h new file mode 100644 index 0000000000..d98142829a --- /dev/null +++ b/src/mirror/object_array-inl.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_OBJECT_ARRAY_INL_H_ +#define ART_SRC_MIRROR_OBJECT_ARRAY_INL_H_ + +#include "object_array.h" + +#include "heap.h" +#include "mirror/class.h" +#include "mirror/field.h" +#include "runtime.h" + +namespace art { +namespace mirror { + +template +inline ObjectArray* ObjectArray::Alloc(Thread* self, Class* object_array_class, int32_t length) { + Array* array = Array::Alloc(self, object_array_class, length, sizeof(Object*)); + if (UNLIKELY(array == NULL)) { + return NULL; + } else { + return array->AsObjectArray(); + } +} + +template +inline T* ObjectArray::Get(int32_t i) const { + if (UNLIKELY(!IsValidIndex(i))) { + return NULL; + } + MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); + return GetFieldObject(data_offset, false); +} + +template +inline void ObjectArray::Set(int32_t i, T* object) { + if (LIKELY(IsValidIndex(i))) { + if (object != NULL) { + Class* element_class = GetClass()->GetComponentType(); + if (UNLIKELY(!object->InstanceOf(element_class))) { + ThrowArrayStoreException(object); + return; + } + } + MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); + SetFieldObject(data_offset, object, false); + } +} + +template +inline void ObjectArray::SetWithoutChecks(int32_t i, T* object) { + DCHECK(IsValidIndex(i)); + MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); + SetFieldObject(data_offset, object, false); +} + +template +inline void ObjectArray::SetPtrWithoutChecks(int32_t i, T* object) { + DCHECK(IsValidIndex(i)); + MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); + SetFieldPtr(data_offset, object, false); +} + +template +inline T* ObjectArray::GetWithoutChecks(int32_t i) const { + DCHECK(IsValidIndex(i)); + MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); + return GetFieldObject(data_offset, false); +} + +template +inline void ObjectArray::Copy(const ObjectArray* src, int src_pos, + ObjectArray* dst, int dst_pos, + size_t length) { + if (src->IsValidIndex(src_pos) && + src->IsValidIndex(src_pos+length-1) && + dst->IsValidIndex(dst_pos) && + dst->IsValidIndex(dst_pos+length-1)) { + MemberOffset src_offset(DataOffset(sizeof(Object*)).Int32Value() + src_pos * sizeof(Object*)); + MemberOffset dst_offset(DataOffset(sizeof(Object*)).Int32Value() + dst_pos * sizeof(Object*)); + Class* array_class = dst->GetClass(); + Heap* heap = Runtime::Current()->GetHeap(); + if (array_class == src->GetClass()) { + // No need for array store checks if arrays are of the same type + for (size_t i = 0; i < length; i++) { + Object* object = src->GetFieldObject(src_offset, false); + heap->VerifyObject(object); + // directly set field, we do a bulk write barrier at the end + dst->SetField32(dst_offset, reinterpret_cast(object), false, true); + src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*)); + dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*)); + } + } else { + Class* element_class = array_class->GetComponentType(); + CHECK(!element_class->IsPrimitive()); + for (size_t i = 0; i < length; i++) { + Object* object = src->GetFieldObject(src_offset, false); + if (object != NULL && !object->InstanceOf(element_class)) { + dst->ThrowArrayStoreException(object); + return; + } + heap->VerifyObject(object); + // directly set field, we do a bulk write barrier at the end + dst->SetField32(dst_offset, reinterpret_cast(object), false, true); + src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*)); + dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*)); + } + } + heap->WriteBarrierArray(dst, dst_pos, length); + } +} + +template +inline ObjectArray* ObjectArray::CopyOf(Thread* self, int32_t new_length) { + ObjectArray* new_array = Alloc(self, GetClass(), new_length); + Copy(this, 0, new_array, 0, std::min(GetLength(), new_length)); + return new_array; +} + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_OBJET_ARRAY_INL_H_ diff --git a/src/mirror/object_array.h b/src/mirror/object_array.h new file mode 100644 index 0000000000..3d04b396da --- /dev/null +++ b/src/mirror/object_array.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_OBJECT_ARRAY_H_ +#define ART_SRC_MIRROR_OBJECT_ARRAY_H_ + +#include "array.h" + +namespace art { +namespace mirror { + +template +class MANAGED ObjectArray : public Array { + public: + static ObjectArray* Alloc(Thread* self, Class* object_array_class, int32_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Set element without bound and element type checks, to be used in limited + // circumstances, such as during boot image writing + void SetWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Set element without bound and element type checks, to be used in limited circumstances, such + // as during boot image writing. Does not do write barrier. + void SetPtrWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static void Copy(const ObjectArray* src, int src_pos, + ObjectArray* dst, int dst_pos, + size_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ObjectArray* CopyOf(Thread* self, int32_t new_length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_OBJECT_ARRAY_H_ diff --git a/src/mirror/object_test.cc b/src/mirror/object_test.cc new file mode 100644 index 0000000000..29cf2f10a1 --- /dev/null +++ b/src/mirror/object_test.cc @@ -0,0 +1,640 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "object.h" + +#include +#include + +#include "array-inl.h" +#include "asm_support.h" +#include "class-inl.h" +#include "class_linker.h" +#include "class_linker-inl.h" +#include "common_test.h" +#include "dex_file.h" +#include "field-inl.h" +#include "gc/card_table-inl.h" +#include "heap.h" +#include "iftable-inl.h" +#include "abstract_method-inl.h" +#include "object-inl.h" +#include "object_array-inl.h" +#include "runtime_support.h" +#include "sirt_ref.h" +#include "UniquePtr.h" + +namespace art { +namespace mirror { + +class ObjectTest : public CommonTest { + protected: + void AssertString(int32_t length, + const char* utf8_in, + const char* utf16_expected_le, + int32_t expected_hash) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + UniquePtr utf16_expected(new uint16_t[length]); + for (int32_t i = 0; i < length; i++) { + uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | + ((utf16_expected_le[i*2 + 1] & 0xff) << 0)); + utf16_expected[i] = ch; + } + + Thread* self = Thread::Current(); + SirtRef string(self, String::AllocFromModifiedUtf8(self, length, utf8_in)); + ASSERT_EQ(length, string->GetLength()); + ASSERT_TRUE(string->GetCharArray() != NULL); + ASSERT_TRUE(string->GetCharArray()->GetData() != NULL); + // strlen is necessary because the 1-character string "\0" is interpreted as "" + ASSERT_TRUE(string->Equals(utf8_in) || length != static_cast(strlen(utf8_in))); + for (int32_t i = 0; i < length; i++) { + EXPECT_EQ(utf16_expected[i], string->CharAt(i)); + } + EXPECT_EQ(expected_hash, string->GetHashCode()); + } +}; + +// Keep the assembly code in sync +TEST_F(ObjectTest, AsmConstants) { + ASSERT_EQ(STRING_VALUE_OFFSET, String::ValueOffset().Int32Value()); + ASSERT_EQ(STRING_COUNT_OFFSET, String::CountOffset().Int32Value()); + ASSERT_EQ(STRING_OFFSET_OFFSET, String::OffsetOffset().Int32Value()); + ASSERT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value()); +} + +TEST_F(ObjectTest, IsInSamePackage) { + // Matches + EXPECT_TRUE(Class::IsInSamePackage("Ljava/lang/Object;", "Ljava/lang/Class;")); + EXPECT_TRUE(Class::IsInSamePackage("LFoo;", "LBar;")); + + // Mismatches + EXPECT_FALSE(Class::IsInSamePackage("Ljava/lang/Object;", "Ljava/io/File;")); + EXPECT_FALSE(Class::IsInSamePackage("Ljava/lang/Object;", "Ljava/lang/reflect/Method;")); +} + +TEST_F(ObjectTest, Clone) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef > a1(soa.Self(), + class_linker_->AllocObjectArray(soa.Self(), 256)); + size_t s1 = a1->SizeOf(); + Object* clone = a1->Clone(soa.Self()); + EXPECT_EQ(s1, clone->SizeOf()); + EXPECT_TRUE(clone->GetClass() == a1->GetClass()); +} + +TEST_F(ObjectTest, AllocObjectArray) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef > oa(soa.Self(), + class_linker_->AllocObjectArray(soa.Self(), 2)); + EXPECT_EQ(2, oa->GetLength()); + EXPECT_TRUE(oa->Get(0) == NULL); + EXPECT_TRUE(oa->Get(1) == NULL); + oa->Set(0, oa.get()); + EXPECT_TRUE(oa->Get(0) == oa.get()); + EXPECT_TRUE(oa->Get(1) == NULL); + oa->Set(1, oa.get()); + EXPECT_TRUE(oa->Get(0) == oa.get()); + EXPECT_TRUE(oa->Get(1) == oa.get()); + + Class* aioobe = class_linker_->FindSystemClass("Ljava/lang/ArrayIndexOutOfBoundsException;"); + + EXPECT_TRUE(oa->Get(-1) == NULL); + EXPECT_TRUE(soa.Self()->IsExceptionPending()); + EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + soa.Self()->ClearException(); + + EXPECT_TRUE(oa->Get(2) == NULL); + EXPECT_TRUE(soa.Self()->IsExceptionPending()); + EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + soa.Self()->ClearException(); + + ASSERT_TRUE(oa->GetClass() != NULL); + ClassHelper oa_ch(oa->GetClass()); + ASSERT_EQ(2U, oa_ch.NumDirectInterfaces()); + EXPECT_EQ(class_linker_->FindSystemClass("Ljava/lang/Cloneable;"), oa_ch.GetDirectInterface(0)); + EXPECT_EQ(class_linker_->FindSystemClass("Ljava/io/Serializable;"), oa_ch.GetDirectInterface(1)); +} + +TEST_F(ObjectTest, AllocArray) { + ScopedObjectAccess soa(Thread::Current()); + Class* c = class_linker_->FindSystemClass("[I"); + SirtRef a(soa.Self(), Array::Alloc(soa.Self(), c, 1)); + ASSERT_TRUE(c == a->GetClass()); + + c = class_linker_->FindSystemClass("[Ljava/lang/Object;"); + a.reset(Array::Alloc(soa.Self(), c, 1)); + ASSERT_TRUE(c == a->GetClass()); + + c = class_linker_->FindSystemClass("[[Ljava/lang/Object;"); + a.reset(Array::Alloc(soa.Self(), c, 1)); + ASSERT_TRUE(c == a->GetClass()); +} + +template +void TestPrimitiveArray(ClassLinker* cl) { + ScopedObjectAccess soa(Thread::Current()); + typedef typename ArrayT::ElementType T; + + ArrayT* a = ArrayT::Alloc(soa.Self(), 2); + EXPECT_EQ(2, a->GetLength()); + EXPECT_EQ(0, a->Get(0)); + EXPECT_EQ(0, a->Get(1)); + a->Set(0, T(123)); + EXPECT_EQ(T(123), a->Get(0)); + EXPECT_EQ(0, a->Get(1)); + a->Set(1, T(321)); + EXPECT_EQ(T(123), a->Get(0)); + EXPECT_EQ(T(321), a->Get(1)); + + Class* aioobe = cl->FindSystemClass("Ljava/lang/ArrayIndexOutOfBoundsException;"); + + EXPECT_EQ(0, a->Get(-1)); + EXPECT_TRUE(soa.Self()->IsExceptionPending()); + EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + soa.Self()->ClearException(); + + EXPECT_EQ(0, a->Get(2)); + EXPECT_TRUE(soa.Self()->IsExceptionPending()); + EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + soa.Self()->ClearException(); +} + +TEST_F(ObjectTest, PrimitiveArray_Boolean_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Byte_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Char_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Int_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Long_Alloc) { + TestPrimitiveArray(class_linker_); +} +TEST_F(ObjectTest, PrimitiveArray_Short_Alloc) { + TestPrimitiveArray(class_linker_); +} + +TEST_F(ObjectTest, CheckAndAllocArrayFromCode) { + // pretend we are trying to call 'new char[3]' from String.toCharArray + ScopedObjectAccess soa(Thread::Current()); + Class* java_util_Arrays = class_linker_->FindSystemClass("Ljava/util/Arrays;"); + AbstractMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V"); + const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I"); + ASSERT_TRUE(string_id != NULL); + const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId( + java_lang_dex_file_->GetIndexForStringId(*string_id)); + ASSERT_TRUE(type_id != NULL); + uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id); + Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false); + EXPECT_TRUE(array->IsArrayInstance()); + EXPECT_EQ(3, array->AsArray()->GetLength()); + EXPECT_TRUE(array->GetClass()->IsArrayClass()); + EXPECT_TRUE(array->GetClass()->GetComponentType()->IsPrimitive()); +} + +TEST_F(ObjectTest, CreateMultiArray) { + ScopedObjectAccess soa(Thread::Current()); + + SirtRef c(soa.Self(), class_linker_->FindSystemClass("I")); + SirtRef dims(soa.Self(), IntArray::Alloc(soa.Self(), 1)); + dims->Set(0, 1); + Array* multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); + EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass("[I")); + EXPECT_EQ(1, multi->GetLength()); + + dims->Set(0, -1); + multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); + EXPECT_TRUE(soa.Self()->IsExceptionPending()); + EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException()->GetClass()), + "java.lang.NegativeArraySizeException"); + soa.Self()->ClearException(); + + dims.reset(IntArray::Alloc(soa.Self(), 2)); + for (int i = 1; i < 20; ++i) { + for (int j = 0; j < 20; ++j) { + dims->Set(0, i); + dims->Set(1, j); + multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); + EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass("[[I")); + EXPECT_EQ(i, multi->GetLength()); + for (int k = 0; k < i; ++k) { + Array* outer = multi->AsObjectArray()->Get(k); + EXPECT_TRUE(outer->GetClass() == class_linker_->FindSystemClass("[I")); + EXPECT_EQ(j, outer->GetLength()); + } + } + } +} + +TEST_F(ObjectTest, StaticFieldFromCode) { + // pretend we are trying to access 'Static.s0' from StaticsFromCode. + ScopedObjectAccess soa(Thread::Current()); + jobject class_loader = LoadDex("StaticsFromCode"); + const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0]; + CHECK(dex_file != NULL); + + Class* klass = + class_linker_->FindClass("LStaticsFromCode;", soa.Decode(class_loader)); + AbstractMethod* clinit = klass->FindDirectMethod("", "()V"); + const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;"); + ASSERT_TRUE(klass_string_id != NULL); + const DexFile::TypeId* klass_type_id = dex_file->FindTypeId( + dex_file->GetIndexForStringId(*klass_string_id)); + ASSERT_TRUE(klass_type_id != NULL); + + const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;"); + ASSERT_TRUE(type_string_id != NULL); + const DexFile::TypeId* type_type_id = dex_file->FindTypeId( + dex_file->GetIndexForStringId(*type_string_id)); + ASSERT_TRUE(type_type_id != NULL); + + const DexFile::StringId* name_str_id = dex_file->FindStringId("s0"); + ASSERT_TRUE(name_str_id != NULL); + + const DexFile::FieldId* field_id = dex_file->FindFieldId( + *klass_type_id, *name_str_id, *type_type_id); + ASSERT_TRUE(field_id != NULL); + uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id); + + Field* field = FindFieldFromCode(field_idx, clinit, Thread::Current(), StaticObjectRead, + sizeof(Object*)); + Object* s0 = field->GetObj(klass); + EXPECT_TRUE(s0 != NULL); + + SirtRef char_array(soa.Self(), CharArray::Alloc(soa.Self(), 0)); + field->SetObj(field->GetDeclaringClass(), char_array.get()); + EXPECT_EQ(char_array.get(), field->GetObj(klass)); + + field->SetObj(field->GetDeclaringClass(), NULL); + EXPECT_EQ(NULL, field->GetObj(klass)); + + // TODO: more exhaustive tests of all 6 cases of Field::*FromCode +} + +TEST_F(ObjectTest, String) { + ScopedObjectAccess soa(Thread::Current()); + // Test the empty string. + AssertString(0, "", "", 0); + + // Test one-byte characters. + AssertString(1, " ", "\x00\x20", 0x20); + AssertString(1, "", "\x00\x00", 0); + AssertString(1, "\x7f", "\x00\x7f", 0x7f); + AssertString(2, "hi", "\x00\x68\x00\x69", (31 * 0x68) + 0x69); + + // Test two-byte characters. + AssertString(1, "\xc2\x80", "\x00\x80", 0x80); + AssertString(1, "\xd9\xa6", "\x06\x66", 0x0666); + AssertString(1, "\xdf\xbf", "\x07\xff", 0x07ff); + AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69", (31 * ((31 * 0x68) + 0x0666)) + 0x69); + + // Test three-byte characters. + AssertString(1, "\xe0\xa0\x80", "\x08\x00", 0x0800); + AssertString(1, "\xe1\x88\xb4", "\x12\x34", 0x1234); + AssertString(1, "\xef\xbf\xbf", "\xff\xff", 0xffff); + AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69", (31 * ((31 * 0x68) + 0x1234)) + 0x69); +} + +TEST_F(ObjectTest, StringEqualsUtf8) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); + EXPECT_TRUE(string->Equals("android")); + EXPECT_FALSE(string->Equals("Android")); + EXPECT_FALSE(string->Equals("ANDROID")); + EXPECT_FALSE(string->Equals("")); + EXPECT_FALSE(string->Equals("and")); + EXPECT_FALSE(string->Equals("androids")); + + SirtRef empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); + EXPECT_TRUE(empty->Equals("")); + EXPECT_FALSE(empty->Equals("a")); +} + +TEST_F(ObjectTest, StringEquals) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); + SirtRef string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); + EXPECT_TRUE(string->Equals(string_2.get())); + EXPECT_FALSE(string->Equals("Android")); + EXPECT_FALSE(string->Equals("ANDROID")); + EXPECT_FALSE(string->Equals("")); + EXPECT_FALSE(string->Equals("and")); + EXPECT_FALSE(string->Equals("androids")); + + SirtRef empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); + EXPECT_TRUE(empty->Equals("")); + EXPECT_FALSE(empty->Equals("a")); +} + +TEST_F(ObjectTest, StringCompareTo) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); + SirtRef string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); + SirtRef string_3(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "Android")); + SirtRef string_4(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "and")); + SirtRef string_5(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); + EXPECT_EQ(0, string->CompareTo(string_2.get())); + EXPECT_LT(0, string->CompareTo(string_3.get())); + EXPECT_GT(0, string_3->CompareTo(string.get())); + EXPECT_LT(0, string->CompareTo(string_4.get())); + EXPECT_GT(0, string_4->CompareTo(string.get())); + EXPECT_LT(0, string->CompareTo(string_5.get())); + EXPECT_GT(0, string_5->CompareTo(string.get())); +} + +TEST_F(ObjectTest, StringLength) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); + EXPECT_EQ(string->GetLength(), 7); + EXPECT_EQ(string->GetUtfLength(), 7); + + string->SetOffset(2); + string->SetCount(5); + EXPECT_TRUE(string->Equals("droid")); + EXPECT_EQ(string->GetLength(), 5); + EXPECT_EQ(string->GetUtfLength(), 5); +} + +TEST_F(ObjectTest, DescriptorCompare) { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* linker = class_linker_; + + jobject jclass_loader_1 = LoadDex("ProtoCompare"); + jobject jclass_loader_2 = LoadDex("ProtoCompare2"); + SirtRef class_loader_1(soa.Self(), soa.Decode(jclass_loader_1)); + SirtRef class_loader_2(soa.Self(), soa.Decode(jclass_loader_2)); + + Class* klass1 = linker->FindClass("LProtoCompare;", class_loader_1.get()); + ASSERT_TRUE(klass1 != NULL); + Class* klass2 = linker->FindClass("LProtoCompare2;", class_loader_2.get()); + ASSERT_TRUE(klass2 != NULL); + + AbstractMethod* m1_1 = klass1->GetVirtualMethod(0); + MethodHelper mh(m1_1); + EXPECT_STREQ(mh.GetName(), "m1"); + AbstractMethod* m2_1 = klass1->GetVirtualMethod(1); + mh.ChangeMethod(m2_1); + EXPECT_STREQ(mh.GetName(), "m2"); + AbstractMethod* m3_1 = klass1->GetVirtualMethod(2); + mh.ChangeMethod(m3_1); + EXPECT_STREQ(mh.GetName(), "m3"); + AbstractMethod* m4_1 = klass1->GetVirtualMethod(3); + mh.ChangeMethod(m4_1); + EXPECT_STREQ(mh.GetName(), "m4"); + + AbstractMethod* m1_2 = klass2->GetVirtualMethod(0); + mh.ChangeMethod(m1_2); + EXPECT_STREQ(mh.GetName(), "m1"); + AbstractMethod* m2_2 = klass2->GetVirtualMethod(1); + mh.ChangeMethod(m2_2); + EXPECT_STREQ(mh.GetName(), "m2"); + AbstractMethod* m3_2 = klass2->GetVirtualMethod(2); + mh.ChangeMethod(m3_2); + EXPECT_STREQ(mh.GetName(), "m3"); + AbstractMethod* m4_2 = klass2->GetVirtualMethod(3); + mh.ChangeMethod(m4_2); + EXPECT_STREQ(mh.GetName(), "m4"); + + mh.ChangeMethod(m1_1); + MethodHelper mh2(m1_2); + EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); + EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); + + mh.ChangeMethod(m2_1); + mh2.ChangeMethod(m2_2); + EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); + EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); + + mh.ChangeMethod(m3_1); + mh2.ChangeMethod(m3_2); + EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); + EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); + + mh.ChangeMethod(m4_1); + mh2.ChangeMethod(m4_2); + EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); + EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); +} + + +TEST_F(ObjectTest, StringHashCode) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); + SirtRef A(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "A")); + SirtRef ABC(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC")); + + EXPECT_EQ(0, empty->GetHashCode()); + EXPECT_EQ(65, A->GetHashCode()); + EXPECT_EQ(64578, ABC->GetHashCode()); +} + +TEST_F(ObjectTest, InstanceOf) { + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("XandY"); + SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); + + Class* X = class_linker_->FindClass("LX;", class_loader.get()); + Class* Y = class_linker_->FindClass("LY;", class_loader.get()); + ASSERT_TRUE(X != NULL); + ASSERT_TRUE(Y != NULL); + + SirtRef x(soa.Self(), X->AllocObject(soa.Self())); + SirtRef y(soa.Self(), Y->AllocObject(soa.Self())); + ASSERT_TRUE(x.get() != NULL); + ASSERT_TRUE(y.get() != NULL); + + EXPECT_TRUE(x->InstanceOf(X)); + EXPECT_FALSE(x->InstanceOf(Y)); + EXPECT_TRUE(y->InstanceOf(X)); + EXPECT_TRUE(y->InstanceOf(Y)); + + Class* java_lang_Class = class_linker_->FindSystemClass("Ljava/lang/Class;"); + Class* Object_array_class = class_linker_->FindSystemClass("[Ljava/lang/Object;"); + + EXPECT_FALSE(java_lang_Class->InstanceOf(Object_array_class)); + EXPECT_TRUE(Object_array_class->InstanceOf(java_lang_Class)); + + // All array classes implement Cloneable and Serializable. + Object* array = ObjectArray::Alloc(soa.Self(), Object_array_class, 1); + Class* java_lang_Cloneable = class_linker_->FindSystemClass("Ljava/lang/Cloneable;"); + Class* java_io_Serializable = class_linker_->FindSystemClass("Ljava/io/Serializable;"); + EXPECT_TRUE(array->InstanceOf(java_lang_Cloneable)); + EXPECT_TRUE(array->InstanceOf(java_io_Serializable)); +} + +TEST_F(ObjectTest, IsAssignableFrom) { + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("XandY"); + SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); + Class* X = class_linker_->FindClass("LX;", class_loader.get()); + Class* Y = class_linker_->FindClass("LY;", class_loader.get()); + + EXPECT_TRUE(X->IsAssignableFrom(X)); + EXPECT_TRUE(X->IsAssignableFrom(Y)); + EXPECT_FALSE(Y->IsAssignableFrom(X)); + EXPECT_TRUE(Y->IsAssignableFrom(Y)); + + // class final String implements CharSequence, .. + Class* string = class_linker_->FindSystemClass("Ljava/lang/String;"); + Class* charseq = class_linker_->FindSystemClass("Ljava/lang/CharSequence;"); + // Can String be assigned to CharSequence without a cast? + EXPECT_TRUE(charseq->IsAssignableFrom(string)); + // Can CharSequence be assigned to String without a cast? + EXPECT_FALSE(string->IsAssignableFrom(charseq)); + + // Primitive types are only assignable to themselves + const char* prims = "ZBCSIJFD"; + Class* prim_types[strlen(prims)]; + for (size_t i = 0; i < strlen(prims); i++) { + prim_types[i] = class_linker_->FindPrimitiveClass(prims[i]); + } + for (size_t i = 0; i < strlen(prims); i++) { + for (size_t j = 0; i < strlen(prims); i++) { + if (i == j) { + EXPECT_TRUE(prim_types[i]->IsAssignableFrom(prim_types[j])); + } else { + EXPECT_FALSE(prim_types[i]->IsAssignableFrom(prim_types[j])); + } + } + } +} + +TEST_F(ObjectTest, IsAssignableFromArray) { + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader = LoadDex("XandY"); + SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); + Class* X = class_linker_->FindClass("LX;", class_loader.get()); + Class* Y = class_linker_->FindClass("LY;", class_loader.get()); + ASSERT_TRUE(X != NULL); + ASSERT_TRUE(Y != NULL); + + Class* YA = class_linker_->FindClass("[LY;", class_loader.get()); + Class* YAA = class_linker_->FindClass("[[LY;", class_loader.get()); + ASSERT_TRUE(YA != NULL); + ASSERT_TRUE(YAA != NULL); + + Class* XAA = class_linker_->FindClass("[[LX;", class_loader.get()); + ASSERT_TRUE(XAA != NULL); + + Class* O = class_linker_->FindSystemClass("Ljava/lang/Object;"); + Class* OA = class_linker_->FindSystemClass("[Ljava/lang/Object;"); + Class* OAA = class_linker_->FindSystemClass("[[Ljava/lang/Object;"); + Class* OAAA = class_linker_->FindSystemClass("[[[Ljava/lang/Object;"); + ASSERT_TRUE(O != NULL); + ASSERT_TRUE(OA != NULL); + ASSERT_TRUE(OAA != NULL); + ASSERT_TRUE(OAAA != NULL); + + Class* S = class_linker_->FindSystemClass("Ljava/io/Serializable;"); + Class* SA = class_linker_->FindSystemClass("[Ljava/io/Serializable;"); + Class* SAA = class_linker_->FindSystemClass("[[Ljava/io/Serializable;"); + ASSERT_TRUE(S != NULL); + ASSERT_TRUE(SA != NULL); + ASSERT_TRUE(SAA != NULL); + + Class* IA = class_linker_->FindSystemClass("[I"); + ASSERT_TRUE(IA != NULL); + + EXPECT_TRUE(YAA->IsAssignableFrom(YAA)); // identity + EXPECT_TRUE(XAA->IsAssignableFrom(YAA)); // element superclass + EXPECT_FALSE(YAA->IsAssignableFrom(XAA)); + EXPECT_FALSE(Y->IsAssignableFrom(YAA)); + EXPECT_FALSE(YA->IsAssignableFrom(YAA)); + EXPECT_TRUE(O->IsAssignableFrom(YAA)); // everything is an Object + EXPECT_TRUE(OA->IsAssignableFrom(YAA)); + EXPECT_TRUE(OAA->IsAssignableFrom(YAA)); + EXPECT_TRUE(S->IsAssignableFrom(YAA)); // all arrays are Serializable + EXPECT_TRUE(SA->IsAssignableFrom(YAA)); + EXPECT_FALSE(SAA->IsAssignableFrom(YAA)); // unless Y was Serializable + + EXPECT_FALSE(IA->IsAssignableFrom(OA)); + EXPECT_FALSE(OA->IsAssignableFrom(IA)); + EXPECT_TRUE(O->IsAssignableFrom(IA)); +} + +TEST_F(ObjectTest, FindInstanceField) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC")); + ASSERT_TRUE(s.get() != NULL); + Class* c = s->GetClass(); + ASSERT_TRUE(c != NULL); + + // Wrong type. + EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == NULL); + EXPECT_TRUE(c->FindInstanceField("count", "J") == NULL); + + // Wrong name. + EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == NULL); + EXPECT_TRUE(c->FindInstanceField("Count", "I") == NULL); + + // Right name and type. + Field* f1 = c->FindDeclaredInstanceField("count", "I"); + Field* f2 = c->FindInstanceField("count", "I"); + EXPECT_TRUE(f1 != NULL); + EXPECT_TRUE(f2 != NULL); + EXPECT_EQ(f1, f2); + + // TODO: check that s.count == 3. + + // Ensure that we handle superclass fields correctly... + c = class_linker_->FindSystemClass("Ljava/lang/StringBuilder;"); + ASSERT_TRUE(c != NULL); + // No StringBuilder.count... + EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == NULL); + // ...but there is an AbstractStringBuilder.count. + EXPECT_TRUE(c->FindInstanceField("count", "I") != NULL); +} + +TEST_F(ObjectTest, FindStaticField) { + ScopedObjectAccess soa(Thread::Current()); + SirtRef s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC")); + ASSERT_TRUE(s.get() != NULL); + Class* c = s->GetClass(); + ASSERT_TRUE(c != NULL); + + // Wrong type. + EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL); + EXPECT_TRUE(c->FindStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL); + + // Wrong name. + EXPECT_TRUE(c->FindDeclaredStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL); + EXPECT_TRUE(c->FindStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL); + + // Right name and type. + Field* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;"); + Field* f2 = c->FindStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;"); + EXPECT_TRUE(f1 != NULL); + EXPECT_TRUE(f2 != NULL); + EXPECT_EQ(f1, f2); + + // TODO: test static fields via superclasses. + // TODO: test static fields via interfaces. + // TODO: test that interfaces trump superclasses. +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/proxy.h b/src/mirror/proxy.h new file mode 100644 index 0000000000..cac028a731 --- /dev/null +++ b/src/mirror/proxy.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_PROXY_H_ +#define ART_SRC_MIRROR_PROXY_H_ + +#include "mirror/object.h" + +namespace art { + +struct ProxyOffsets; + +namespace mirror { + +class MANAGED SynthesizedProxyClass : public Class { + public: + ObjectArray* GetInterfaces() { + return interfaces_; + } + + ObjectArray >* GetThrows() { + return throws_; + } + + private: + ObjectArray* interfaces_; + ObjectArray >* throws_; + DISALLOW_IMPLICIT_CONSTRUCTORS(SynthesizedProxyClass); +}; + +class MANAGED Proxy : public Object { + private: + Object* h_; + + friend struct art::ProxyOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_PROXY_H_ diff --git a/src/mirror/stack_trace_element.cc b/src/mirror/stack_trace_element.cc new file mode 100644 index 0000000000..9d557ec9e8 --- /dev/null +++ b/src/mirror/stack_trace_element.cc @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "stack_trace_element.h" + +#include "class.h" +#include "gc/card_table-inl.h" +#include "object-inl.h" +#include "string.h" + +namespace art { +namespace mirror { + +Class* StackTraceElement::java_lang_StackTraceElement_ = NULL; + +void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) { + CHECK(java_lang_StackTraceElement_ == NULL); + CHECK(java_lang_StackTraceElement != NULL); + java_lang_StackTraceElement_ = java_lang_StackTraceElement; +} + +void StackTraceElement::ResetClass() { + CHECK(java_lang_StackTraceElement_ != NULL); + java_lang_StackTraceElement_ = NULL; +} + +StackTraceElement* StackTraceElement::Alloc(Thread* self, + String* declaring_class, + String* method_name, + String* file_name, + int32_t line_number) { + StackTraceElement* trace = + down_cast(GetStackTraceElement()->AllocObject(self)); + trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_), + const_cast(declaring_class), false); + trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_), + const_cast(method_name), false); + trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_), + const_cast(file_name), false); + trace->SetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), + line_number, false); + return trace; +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/stack_trace_element.h b/src/mirror/stack_trace_element.h new file mode 100644 index 0000000000..d53c8602dc --- /dev/null +++ b/src/mirror/stack_trace_element.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_STACK_TRACE_ELEMENT_H_ +#define ART_SRC_MIRROR_STACK_TRACE_ELEMENT_H_ + +#include "object.h" + +namespace art { + +struct StackTraceElementOffsets; + +namespace mirror { + +// C++ mirror of java.lang.StackTraceElement +class MANAGED StackTraceElement : public Object { + public: + const String* GetDeclaringClass() const { + return GetFieldObject( + OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_), false); + } + + const String* GetMethodName() const { + return GetFieldObject( + OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_), false); + } + + const String* GetFileName() const { + return GetFieldObject( + OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_), false); + } + + int32_t GetLineNumber() const { + return GetField32( + OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), false); + } + + static StackTraceElement* Alloc(Thread* self, + String* declaring_class, + String* method_name, + String* file_name, + int32_t line_number) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static void SetClass(Class* java_lang_StackTraceElement); + + static void ResetClass(); + + private: + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". + String* declaring_class_; + String* file_name_; + String* method_name_; + int32_t line_number_; + + static Class* GetStackTraceElement() { + DCHECK(java_lang_StackTraceElement_ != NULL); + return java_lang_StackTraceElement_; + } + + static Class* java_lang_StackTraceElement_; + + friend struct art::StackTraceElementOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(StackTraceElement); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_STACK_TRACE_ELEMENT_H_ diff --git a/src/mirror/string.cc b/src/mirror/string.cc new file mode 100644 index 0000000000..f571fb828a --- /dev/null +++ b/src/mirror/string.cc @@ -0,0 +1,289 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "string.h" + +#include "array.h" +#include "gc/card_table-inl.h" +#include "intern_table.h" +#include "object-inl.h" +#include "runtime.h" +#include "sirt_ref.h" +#include "thread.h" +#include "utf.h" + +namespace art { +namespace mirror { + +const CharArray* String::GetCharArray() const { + return GetFieldObject(ValueOffset(), false); +} + +void String::ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength())); +} + +int32_t String::GetUtfLength() const { + return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength()); +} + +int32_t String::FastIndexOf(int32_t ch, int32_t start) const { + int32_t count = GetLength(); + if (start < 0) { + start = 0; + } else if (start > count) { + start = count; + } + const uint16_t* chars = GetCharArray()->GetData() + GetOffset(); + const uint16_t* p = chars + start; + const uint16_t* end = chars + count; + while (p < end) { + if (*p++ == ch) { + return (p - 1) - chars; + } + } + return -1; +} + +void String::SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(new_array != NULL); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array, false); +} + +// TODO: get global references for these +Class* String::java_lang_String_ = NULL; + +void String::SetClass(Class* java_lang_String) { + CHECK(java_lang_String_ == NULL); + CHECK(java_lang_String != NULL); + java_lang_String_ = java_lang_String; +} + +void String::ResetClass() { + CHECK(java_lang_String_ != NULL); + java_lang_String_ = NULL; +} + +String* String::Intern() { + return Runtime::Current()->GetInternTable()->InternWeak(this); +} + +int32_t String::GetHashCode() { + int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false); + if (result == 0) { + ComputeHashCode(); + } + result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false); + DCHECK(result != 0 || ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()) == 0) + << ToModifiedUtf8() << " " << result; + return result; +} + +int32_t String::GetLength() const { + int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), false); + DCHECK(result >= 0 && result <= GetCharArray()->GetLength()); + return result; +} + +uint16_t String::CharAt(int32_t index) const { + // TODO: do we need this? Equals is the only caller, and could + // bounds check itself. + if (index < 0 || index >= count_) { + Thread* self = Thread::Current(); + self->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", + "length=%i; index=%i", count_, index); + return 0; + } + return GetCharArray()->Get(index + GetOffset()); +} + +String* String::AllocFromUtf16(Thread* self, + int32_t utf16_length, + const uint16_t* utf16_data_in, + int32_t hash_code) { + CHECK(utf16_data_in != NULL || utf16_length == 0); + String* string = Alloc(self, GetJavaLangString(), utf16_length); + if (string == NULL) { + return NULL; + } + // TODO: use 16-bit wide memset variant + CharArray* array = const_cast(string->GetCharArray()); + if (array == NULL) { + return NULL; + } + for (int i = 0; i < utf16_length; i++) { + array->Set(i, utf16_data_in[i]); + } + if (hash_code != 0) { + string->SetHashCode(hash_code); + } else { + string->ComputeHashCode(); + } + return string; +} + + String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) { + if (utf == NULL) { + return NULL; + } + size_t char_count = CountModifiedUtf8Chars(utf); + return AllocFromModifiedUtf8(self, char_count, utf); +} + +String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, + const char* utf8_data_in) { + String* string = Alloc(self, GetJavaLangString(), utf16_length); + if (string == NULL) { + return NULL; + } + uint16_t* utf16_data_out = + const_cast(string->GetCharArray()->GetData()); + ConvertModifiedUtf8ToUtf16(utf16_data_out, utf8_data_in); + string->ComputeHashCode(); + return string; +} + +String* String::Alloc(Thread* self, Class* java_lang_String, int32_t utf16_length) { + SirtRef array(self, CharArray::Alloc(self, utf16_length)); + if (array.get() == NULL) { + return NULL; + } + return Alloc(self, java_lang_String, array.get()); +} + +String* String::Alloc(Thread* self, Class* java_lang_String, CharArray* array) { + // Hold reference in case AllocObject causes GC. + SirtRef array_ref(self, array); + String* string = down_cast(java_lang_String->AllocObject(self)); + if (string == NULL) { + return NULL; + } + string->SetArray(array); + string->SetCount(array->GetLength()); + return string; +} + +bool String::Equals(const String* that) const { + if (this == that) { + // Quick reference equality test + return true; + } else if (that == NULL) { + // Null isn't an instanceof anything + return false; + } else if (this->GetLength() != that->GetLength()) { + // Quick length inequality test + return false; + } else { + // Note: don't short circuit on hash code as we're presumably here as the + // hash code was already equal + for (int32_t i = 0; i < that->GetLength(); ++i) { + if (this->CharAt(i) != that->CharAt(i)) { + return false; + } + } + return true; + } +} + +bool String::Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) const { + if (this->GetLength() != that_length) { + return false; + } else { + for (int32_t i = 0; i < that_length; ++i) { + if (this->CharAt(i) != that_chars[that_offset + i]) { + return false; + } + } + return true; + } +} + +bool String::Equals(const char* modified_utf8) const { + for (int32_t i = 0; i < GetLength(); ++i) { + uint16_t ch = GetUtf16FromUtf8(&modified_utf8); + if (ch == '\0' || ch != CharAt(i)) { + return false; + } + } + return *modified_utf8 == '\0'; +} + +bool String::Equals(const StringPiece& modified_utf8) const { + if (modified_utf8.size() != GetLength()) { + return false; + } + const char* p = modified_utf8.data(); + for (int32_t i = 0; i < GetLength(); ++i) { + uint16_t ch = GetUtf16FromUtf8(&p); + if (ch != CharAt(i)) { + return false; + } + } + return true; +} + +// Create a modified UTF-8 encoded std::string from a java/lang/String object. +std::string String::ToModifiedUtf8() const { + const uint16_t* chars = GetCharArray()->GetData() + GetOffset(); + size_t byte_count = GetUtfLength(); + std::string result(byte_count, static_cast(0)); + ConvertUtf16ToModifiedUtf8(&result[0], chars, GetLength()); + return result; +} + +#ifdef HAVE__MEMCMP16 +// "count" is in 16-bit units. +extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count); +#define MemCmp16 __memcmp16 +#else +static uint32_t MemCmp16(const uint16_t* s0, const uint16_t* s1, size_t count) { + for (size_t i = 0; i < count; i++) { + if (s0[i] != s1[i]) { + return static_cast(s0[i]) - static_cast(s1[i]); + } + } + return 0; +} +#endif + +int32_t String::CompareTo(String* rhs) const { + // Quick test for comparison of a string with itself. + const String* lhs = this; + if (lhs == rhs) { + return 0; + } + // TODO: is this still true? + // The annoying part here is that 0x00e9 - 0xffff != 0x00ea, + // because the interpreter converts the characters to 32-bit integers + // *without* sign extension before it subtracts them (which makes some + // sense since "char" is unsigned). So what we get is the result of + // 0x000000e9 - 0x0000ffff, which is 0xffff00ea. + int lhsCount = lhs->GetLength(); + int rhsCount = rhs->GetLength(); + int countDiff = lhsCount - rhsCount; + int minCount = (countDiff < 0) ? lhsCount : rhsCount; + const uint16_t* lhsChars = lhs->GetCharArray()->GetData() + lhs->GetOffset(); + const uint16_t* rhsChars = rhs->GetCharArray()->GetData() + rhs->GetOffset(); + int otherRes = MemCmp16(lhsChars, rhsChars, minCount); + if (otherRes != 0) { + return otherRes; + } + return countDiff; +} + +} // namespace mirror +} // namespace art + diff --git a/src/mirror/string.h b/src/mirror/string.h new file mode 100644 index 0000000000..ef74fed93c --- /dev/null +++ b/src/mirror/string.h @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_STRING_H_ +#define ART_SRC_MIRROR_STRING_H_ + +#include "class.h" +#include "gtest/gtest.h" + +namespace art { + +struct StringClassOffsets; +struct StringOffsets; +class StringPiece; + +namespace mirror { + +// C++ mirror of java.lang.String +class MANAGED String : public Object { + public: + static MemberOffset CountOffset() { + return OFFSET_OF_OBJECT_MEMBER(String, count_); + } + + static MemberOffset ValueOffset() { + return OFFSET_OF_OBJECT_MEMBER(String, array_); + } + + static MemberOffset OffsetOffset() { + return OFFSET_OF_OBJECT_MEMBER(String, offset_); + } + + const CharArray* GetCharArray() const; + + int32_t GetOffset() const { + int32_t result = GetField32(OffsetOffset(), false); + DCHECK_LE(0, result); + return result; + } + + int32_t GetLength() const; + + int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + int32_t GetUtfLength() const; + + uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static String* AllocFromUtf16(Thread* self, + int32_t utf16_length, + const uint16_t* utf16_data_in, + int32_t hash_code = 0) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static String* AllocFromModifiedUtf8(Thread* self, const char* utf) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, + const char* utf8_data_in) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static String* Alloc(Thread* self, Class* java_lang_String, int32_t utf16_length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static String* Alloc(Thread* self, Class* java_lang_String, CharArray* array) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool Equals(const char* modified_utf8) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // TODO: do we need this overload? give it a more intention-revealing name. + bool Equals(const StringPiece& modified_utf8) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Compare UTF-16 code point values not in a locale-sensitive manner + int Compare(int32_t utf16_length, const char* utf8_data_in); + + // TODO: do we need this overload? give it a more intention-revealing name. + bool Equals(const uint16_t* that_chars, int32_t that_offset, + int32_t that_length) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Create a modified UTF-8 encoded std::string from a java/lang/String object. + std::string ToModifiedUtf8() const; + + int32_t FastIndexOf(int32_t ch, int32_t start) const; + + int32_t CompareTo(String* other) const; + + static Class* GetJavaLangString() { + DCHECK(java_lang_String_ != NULL); + return java_lang_String_; + } + + static void SetClass(Class* java_lang_String); + static void ResetClass(); + + private: + void SetHashCode(int32_t new_hash_code) { + DCHECK_EQ(0u, + GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false)); + SetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), + new_hash_code, false); + } + + void SetCount(int32_t new_count) { + DCHECK_LE(0, new_count); + SetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count, false); + } + + void SetOffset(int32_t new_offset) { + DCHECK_LE(0, new_offset); + DCHECK_GE(GetLength(), new_offset); + SetField32(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset, false); + } + + void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". + CharArray* array_; + + int32_t count_; + + uint32_t hash_code_; + + int32_t offset_; + + static Class* java_lang_String_; + + friend struct art::StringOffsets; // for verifying offset information + FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount + DISALLOW_IMPLICIT_CONSTRUCTORS(String); +}; + +// TODO: remove? only used in a unit test of itself. +struct StringHashCode { + int32_t operator()(String* string) const { + return string->GetHashCode(); + } +}; + +class MANAGED StringClass : public Class { + private: + CharArray* ASCII_; + Object* CASE_INSENSITIVE_ORDER_; + uint32_t REPLACEMENT_CHAR_; + int64_t serialVersionUID_; + friend struct art::StringClassOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(StringClass); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_STRING_H_ diff --git a/src/mirror/throwable.cc b/src/mirror/throwable.cc new file mode 100644 index 0000000000..bbab1dd60d --- /dev/null +++ b/src/mirror/throwable.cc @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "throwable.h" + +#include "abstract_method-inl.h" +#include "class-inl.h" +#include "gc/card_table-inl.h" +#include "object-inl.h" +#include "object_array.h" +#include "object_array-inl.h" +#include "object_utils.h" +#include "utils.h" +#include "well_known_classes.h" + +namespace art { +namespace mirror { + +Class* Throwable::java_lang_Throwable_ = NULL; + +void Throwable::SetCause(Throwable* cause) { + CHECK(cause != NULL); + CHECK(cause != this); + CHECK(GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false) == NULL); + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause, false); +} + +bool Throwable::IsCheckedException() const { + if (InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_Error))) { + return false; + } + return !InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_RuntimeException)); +} + +std::string Throwable::Dump() const { + std::string result(PrettyTypeOf(this)); + result += ": "; + String* msg = GetDetailMessage(); + if (msg != NULL) { + result += msg->ToModifiedUtf8(); + } + result += "\n"; + Object* stack_state = GetStackState(); + // check stack state isn't missing or corrupt + if (stack_state != NULL && stack_state->IsObjectArray()) { + // Decode the internal stack trace into the depth and method trace + ObjectArray* method_trace = down_cast*>(stack_state); + int32_t depth = method_trace->GetLength() - 1; + IntArray* pc_trace = down_cast(method_trace->Get(depth)); + MethodHelper mh; + for (int32_t i = 0; i < depth; ++i) { + AbstractMethod* method = down_cast(method_trace->Get(i)); + mh.ChangeMethod(method); + uint32_t dex_pc = pc_trace->Get(i); + int32_t line_number = mh.GetLineNumFromDexPC(dex_pc); + const char* source_file = mh.GetDeclaringClassSourceFile(); + result += StringPrintf(" at %s (%s:%d)\n", PrettyMethod(method, true).c_str(), + source_file, line_number); + } + } + Throwable* cause = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false); + if (cause != NULL && cause != this) { // Constructor makes cause == this by default. + result += "Caused by: "; + result += cause->Dump(); + } + return result; +} + +void Throwable::SetClass(Class* java_lang_Throwable) { + CHECK(java_lang_Throwable_ == NULL); + CHECK(java_lang_Throwable != NULL); + java_lang_Throwable_ = java_lang_Throwable; +} + +void Throwable::ResetClass() { + CHECK(java_lang_Throwable_ != NULL); + java_lang_Throwable_ = NULL; +} + +} // namespace mirror +} // namespace art diff --git a/src/mirror/throwable.h b/src/mirror/throwable.h new file mode 100644 index 0000000000..aafcc07d86 --- /dev/null +++ b/src/mirror/throwable.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_MIRROR_THROWABLE_H_ +#define ART_SRC_MIRROR_THROWABLE_H_ + +#include "object.h" +#include "string.h" + +namespace art { + +struct ThrowableOffsets; + +namespace mirror { + +// C++ mirror of java.lang.Throwable +class MANAGED Throwable : public Object { + public: + void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message, false); + } + String* GetDetailMessage() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false); + } + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // This is a runtime version of initCause, you shouldn't use it if initCause may have been + // overridden. Also it asserts rather than throwing exceptions. Currently this is only used + // in cases like the verifier where the checks cannot fail and initCause isn't overridden. + void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsCheckedException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static Class* GetJavaLangThrowable() { + DCHECK(java_lang_Throwable_ != NULL); + return java_lang_Throwable_; + } + + static void SetClass(Class* java_lang_Throwable); + static void ResetClass(); + + private: + Object* GetStackState() const { + return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), true); + } + + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". + Throwable* cause_; + String* detail_message_; + Object* stack_state_; // Note this is Java volatile: + Object* stack_trace_; + Object* suppressed_exceptions_; + + static Class* java_lang_Throwable_; + + friend struct art::ThrowableOffsets; // for verifying offset information + DISALLOW_IMPLICIT_CONSTRUCTORS(Throwable); +}; + +} // namespace mirror +} // namespace art + +#endif // ART_SRC_MIRROR_THROWABLE_H_ diff --git a/src/modifiers.h b/src/modifiers.h index ee2d4ff597..78ba1404ef 100644 --- a/src/modifiers.h +++ b/src/modifiers.h @@ -17,6 +17,8 @@ #ifndef ART_SRC_MODIFIERS_H_ #define ART_SRC_MODIFIERS_H_ +#include + static const uint32_t kAccPublic = 0x0001; // class, field, method, ic static const uint32_t kAccPrivate = 0x0002; // field, method, ic static const uint32_t kAccProtected = 0x0004; // field, method, ic diff --git a/src/monitor.cc b/src/monitor.cc index 47c87cb259..aa4e5acefe 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -22,7 +22,9 @@ #include "base/stl_util.h" #include "class_linker.h" #include "dex_instruction.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" @@ -111,7 +113,7 @@ void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread is_sensitive_thread_hook_ = is_sensitive_thread_hook; } -Monitor::Monitor(Thread* owner, Object* obj) +Monitor::Monitor(Thread* owner, mirror::Object* obj) : monitor_lock_("a monitor lock", kMonitorLock), owner_(owner), lock_count_(0), @@ -186,7 +188,7 @@ void Monitor::RemoveFromWaitSet(Thread *thread) { } } -Object* Monitor::GetObject() { +mirror::Object* Monitor::GetObject() { return obj_; } @@ -200,7 +202,7 @@ void Monitor::Lock(Thread* self) { uint64_t waitStart = 0; uint64_t waitEnd = 0; uint32_t wait_threshold = lock_profiling_threshold_; - const AbstractMethod* current_locking_method = NULL; + const mirror::AbstractMethod* current_locking_method = NULL; uint32_t current_locking_dex_pc = 0; { ScopedThreadStateChange tsc(self, kBlocked); @@ -270,7 +272,7 @@ static std::string ThreadToString(Thread* thread) { return oss.str(); } -void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner, +void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* found_owner, Monitor* monitor) { Thread* current_owner = NULL; std::string current_owner_string; @@ -426,7 +428,7 @@ void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns, int prev_lock_count = lock_count_; lock_count_ = 0; owner_ = NULL; - const AbstractMethod* saved_method = locking_method_; + const mirror::AbstractMethod* saved_method = locking_method_; locking_method_ = NULL; uintptr_t saved_dex_pc = locking_dex_pc_; locking_dex_pc_ = 0; @@ -570,7 +572,7 @@ void Monitor::NotifyAllWithLock() { * Changes the shape of a monitor from thin to fat, preserving the * internal lock state. The calling thread must own the lock. */ -void Monitor::Inflate(Thread* self, Object* obj) { +void Monitor::Inflate(Thread* self, mirror::Object* obj) { DCHECK(self != NULL); DCHECK(obj != NULL); DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN); @@ -583,7 +585,7 @@ void Monitor::Inflate(Thread* self, Object* obj) { Runtime::Current()->GetMonitorList()->Add(m); } -void Monitor::MonitorEnter(Thread* self, Object* obj) { +void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { volatile int32_t* thinp = obj->GetRawLockWordAddress(); uint32_t sleepDelayNs; uint32_t minSleepDelayNs = 1000000; /* 1 millisecond */ @@ -686,7 +688,7 @@ void Monitor::MonitorEnter(Thread* self, Object* obj) { } } -bool Monitor::MonitorExit(Thread* self, Object* obj) { +bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) { volatile int32_t* thinp = obj->GetRawLockWordAddress(); DCHECK(self != NULL); @@ -748,7 +750,7 @@ bool Monitor::MonitorExit(Thread* self, Object* obj) { /* * Object.wait(). Also called for class init. */ -void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, +void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow, ThreadState why) { volatile int32_t* thinp = obj->GetRawLockWordAddress(); @@ -772,7 +774,7 @@ void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow, why); } -void Monitor::Notify(Thread* self, Object *obj) { +void Monitor::Notify(Thread* self, mirror::Object *obj) { uint32_t thin = *obj->GetRawLockWordAddress(); // If the lock is still thin, there aren't any waiters; @@ -791,7 +793,7 @@ void Monitor::Notify(Thread* self, Object *obj) { } } -void Monitor::NotifyAll(Thread* self, Object *obj) { +void Monitor::NotifyAll(Thread* self, mirror::Object *obj) { uint32_t thin = *obj->GetRawLockWordAddress(); // If the lock is still thin, there aren't any waiters; @@ -822,7 +824,7 @@ uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) { void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { ThreadState state = thread->GetState(); - Object* object = NULL; + mirror::Object* object = NULL; uint32_t lock_owner = ThreadList::kInvalidId; if (state == kWaiting || state == kTimedWaiting || state == kSleeping) { if (state == kSleeping) { @@ -860,10 +862,10 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { os << "\n"; } -Object* Monitor::GetContendedMonitor(Thread* thread) { +mirror::Object* Monitor::GetContendedMonitor(Thread* thread) { // This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre // definition of contended that includes a monitor a thread is trying to enter... - Object* result = thread->monitor_enter_object_; + mirror::Object* result = thread->monitor_enter_object_; if (result != NULL) { return result; } @@ -878,15 +880,16 @@ Object* Monitor::GetContendedMonitor(Thread* thread) { return NULL; } -void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(Object*, void*), void* callback_context) { - AbstractMethod* m = stack_visitor->GetMethod(); +void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*), + void* callback_context) { + mirror::AbstractMethod* m = stack_visitor->GetMethod(); CHECK(m != NULL); // Native methods are an easy special case. // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too. if (m->IsNative()) { if (m->IsSynchronized()) { - Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0); + mirror::Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0); callback(jni_this, callback_context); } return; @@ -933,13 +936,13 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(Object*, } uint16_t monitor_register = ((monitor_enter_instruction >> 8) & 0xff); - Object* o = reinterpret_cast(stack_visitor->GetVReg(m, monitor_register, - kReferenceVReg)); + mirror::Object* o = reinterpret_cast(stack_visitor->GetVReg(m, monitor_register, + kReferenceVReg)); callback(o, callback_context); } } -void Monitor::TranslateLocation(const AbstractMethod* method, uint32_t dex_pc, +void Monitor::TranslateLocation(const mirror::AbstractMethod* method, uint32_t dex_pc, const char*& source_file, uint32_t& line_number) const { // If method is null, location is unknown if (method == NULL) { @@ -968,7 +971,7 @@ void MonitorList::Add(Monitor* m) { list_.push_front(m); } -void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) { +void MonitorList::SweepMonitorList(IsMarkedTester is_marked, void* arg) { MutexLock mu(Thread::Current(), monitor_list_lock_); typedef std::list::iterator It; // TODO: C++0x auto It it = list_.begin(); @@ -984,7 +987,7 @@ void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) { } } -MonitorInfo::MonitorInfo(Object* o) : owner(NULL), entry_count(0) { +MonitorInfo::MonitorInfo(mirror::Object* o) : owner(NULL), entry_count(0) { uint32_t lock_word = *o->GetRawLockWordAddress(); if (LW_SHAPE(lock_word) == LW_SHAPE_THIN) { uint32_t owner_thin_lock_id = LW_LOCK_OWNER(lock_word); diff --git a/src/monitor.h b/src/monitor.h index 1b5ab763e2..9194c08ab4 100644 --- a/src/monitor.h +++ b/src/monitor.h @@ -25,8 +25,8 @@ #include #include "base/mutex.h" -#include "heap.h" -#include "thread.h" +#include "root_visitor.h" +#include "thread_state.h" namespace art { @@ -55,8 +55,10 @@ namespace art { #define LW_LOCK_OWNER_SHIFT 3 #define LW_LOCK_OWNER(x) (((x) >> LW_LOCK_OWNER_SHIFT) & LW_LOCK_OWNER_MASK) +namespace mirror { class AbstractMethod; class Object; +} // namespace mirror class Thread; class StackVisitor; @@ -70,18 +72,19 @@ class Monitor { static uint32_t GetThinLockId(uint32_t raw_lock_word) NO_THREAD_SAFETY_ANALYSIS; // Reading lock owner without holding lock is racy. - static void MonitorEnter(Thread* thread, Object* obj) + static void MonitorEnter(Thread* thread, mirror::Object* obj) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool MonitorExit(Thread* thread, Object* obj) + static bool MonitorExit(Thread* thread, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_); - static void Notify(Thread* self, Object* obj) + static void Notify(Thread* self, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void NotifyAll(Thread* self, Object* obj) + static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow, ThreadState why) + static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns, + bool interruptShouldThrow, ThreadState why) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DescribeWait(std::ostream& os, const Thread* thread) @@ -89,30 +92,31 @@ class Monitor { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Used to implement JDWP's ThreadReference.CurrentContendedMonitor. - static Object* GetContendedMonitor(Thread* thread); + static mirror::Object* GetContendedMonitor(Thread* thread); // Calls 'callback' once for each lock held in the single stack frame represented by // the current state of 'stack_visitor'. - static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(Object*, void*), void* callback_context) + static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*), + void* callback_context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* GetObject(); + mirror::Object* GetObject(); private: - explicit Monitor(Thread* owner, Object* obj) + explicit Monitor(Thread* owner, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); - static void Inflate(Thread* self, Object* obj) + static void Inflate(Thread* self, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) + static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) LOCKS_EXCLUDED(Locks::thread_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -137,7 +141,7 @@ class Monitor { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. - void TranslateLocation(const AbstractMethod* method, uint32_t pc, + void TranslateLocation(const mirror::AbstractMethod* method, uint32_t pc, const char*& source_file, uint32_t& line_number) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -153,7 +157,7 @@ class Monitor { int lock_count_ GUARDED_BY(monitor_lock_); // What object are we part of (for debugging). - Object* const obj_; + mirror::Object* const obj_; // Threads currently waiting on this monitor. Thread* wait_set_ GUARDED_BY(monitor_lock_); @@ -161,12 +165,12 @@ class Monitor { // Method and dex pc where the lock owner acquired the lock, used when lock // sampling is enabled. locking_method_ may be null if the lock is currently // unlocked, or if the lock is acquired by the system when the stack is empty. - const AbstractMethod* locking_method_ GUARDED_BY(monitor_lock_); + const mirror::AbstractMethod* locking_method_ GUARDED_BY(monitor_lock_); uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_); friend class MonitorInfo; friend class MonitorList; - friend class Object; + friend class mirror::Object; DISALLOW_COPY_AND_ASSIGN(Monitor); }; @@ -177,7 +181,7 @@ class MonitorList { void Add(Monitor* m); - void SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) + void SweepMonitorList(IsMarkedTester is_marked, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); private: @@ -192,7 +196,7 @@ class MonitorList { // For use only by the JDWP implementation. class MonitorInfo { public: - MonitorInfo(Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); Thread* owner; size_t entry_count; diff --git a/src/monitor_android.cc b/src/monitor_android.cc index 6e22237afb..d3ac14355a 100644 --- a/src/monitor_android.cc +++ b/src/monitor_android.cc @@ -15,7 +15,6 @@ */ #include "monitor.h" -#include "object.h" #include "thread.h" #include @@ -79,7 +78,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample // Emit the source code file name, <= 37 bytes. uintptr_t pc; - AbstractMethod* m = self->GetCurrentMethod(&pc); + mirror::AbstractMethod* m = self->GetCurrentMethod(&pc); const char* filename; uint32_t line_number; TranslateLocation(m, pc, filename, line_number); diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 7485600fe4..e549a8bf74 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -18,11 +18,12 @@ #include "base/logging.h" #include "class_linker.h" -#include "class_loader.h" #include "dex_file.h" #include "gc/space.h" #include "image.h" #include "jni_internal.h" +#include "mirror/class_loader.h" +#include "mirror/string.h" #include "oat.h" #include "os.h" #include "runtime.h" @@ -150,9 +151,8 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); class_linker->RegisterDexFile(*dex_file); - Object* class_loader_object = soa.Decode(javaLoader); - ClassLoader* class_loader = down_cast(class_loader_object); - Class* result = class_linker->DefineClass(descriptor, class_loader, *dex_file, *dex_class_def); + mirror::ClassLoader* class_loader = soa.Decode(javaLoader); + mirror::Class* result = class_linker->DefineClass(descriptor, class_loader, *dex_file, *dex_class_def); return soa.AddLocalReference(result); } diff --git a/src/native/dalvik_system_VMDebug.cc b/src/native/dalvik_system_VMDebug.cc index e5a398ab4b..dc07a31ee3 100644 --- a/src/native/dalvik_system_VMDebug.cc +++ b/src/native/dalvik_system_VMDebug.cc @@ -21,6 +21,7 @@ #include "debugger.h" #include "hprof/hprof.h" #include "jni_internal.h" +#include "mirror/class.h" #include "ScopedUtfChars.h" #include "scoped_thread_state_change.h" #include "toStringArray.h" @@ -220,11 +221,11 @@ static void VMDebug_infopoint(JNIEnv*, jclass, jint id) { static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass, jboolean countAssignable) { ScopedObjectAccess soa(env); - Class* c = soa.Decode(javaClass); + mirror::Class* c = soa.Decode(javaClass); if (c == NULL) { return 0; } - std::vector classes; + std::vector classes; classes.push_back(c); uint64_t count = 0; Runtime::Current()->GetHeap()->CountInstances(classes, countAssignable, &count); diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index bf518dcefb..5ce27fbcc0 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -19,7 +19,8 @@ #include "class_linker.h" #include "debugger.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/object.h" +#include "mirror/object-inl.h" #include "object_utils.h" #include "scoped_thread_state_change.h" #include "gc/space.h" @@ -52,7 +53,7 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle UNIMPLEMENTED(FATAL); #endif - Class* element_class = soa.Decode(javaElementClass); + mirror::Class* element_class = soa.Decode(javaElementClass); if (element_class == NULL) { soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "element class == null"); return NULL; @@ -66,8 +67,8 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle std::string descriptor; descriptor += "["; descriptor += ClassHelper(element_class).GetDescriptor(); - Class* array_class = class_linker->FindClass(descriptor.c_str(), NULL); - Array* result = Array::Alloc(soa.Self(), array_class, length); + mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), NULL); + mirror::Array* result = mirror::Array::Alloc(soa.Self(), array_class, length); if (result == NULL) { return NULL; } @@ -79,7 +80,7 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) { return 0; } ScopedObjectAccess soa(env); - Array* array = soa.Decode(javaArray); + mirror::Array* array = soa.Decode(javaArray); if (!array->IsArrayInstance()) { soa.Self()->ThrowNewException("Ljava/lang/IllegalArgumentException;", "not an array"); return 0; diff --git a/src/native/dalvik_system_VMStack.cc b/src/native/dalvik_system_VMStack.cc index 494f38d38a..bb2ed8889d 100644 --- a/src/native/dalvik_system_VMStack.cc +++ b/src/native/dalvik_system_VMStack.cc @@ -14,10 +14,11 @@ * limitations under the License. */ -#include "class_loader.h" #include "jni_internal.h" #include "nth_caller_visitor.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class.h" +#include "mirror/class_loader.h" #include "scoped_thread_state_change.h" #include "thread_list.h" @@ -26,7 +27,7 @@ namespace art { static jobject GetThreadStack(JNIEnv* env, jobject peer) { { ScopedObjectAccess soa(env); - if (soa.Decode(peer) == soa.Self()->GetPeer()) { + if (soa.Decode(peer) == soa.Self()->GetPeer()) { return soa.Self()->CreateInternalStackTrace(soa); } } @@ -73,13 +74,13 @@ static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) { static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap, jobject javaSystem) { struct ClosestUserClassLoaderVisitor : public StackVisitor { - ClosestUserClassLoaderVisitor(Thread* thread, Object* bootstrap, Object* system) + ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap, mirror::Object* system) : StackVisitor(thread, NULL), bootstrap(bootstrap), system(system), class_loader(NULL) {} bool VisitFrame() { DCHECK(class_loader == NULL); - Class* c = GetMethod()->GetDeclaringClass(); - Object* cl = c->GetClassLoader(); + mirror::Class* c = GetMethod()->GetDeclaringClass(); + mirror::Object* cl = c->GetClassLoader(); if (cl != NULL && cl != bootstrap && cl != system) { class_loader = cl; return false; @@ -87,13 +88,13 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject ja return true; } - Object* bootstrap; - Object* system; - Object* class_loader; + mirror::Object* bootstrap; + mirror::Object* system; + mirror::Object* class_loader; }; ScopedObjectAccess soa(env); - Object* bootstrap = soa.Decode(javaBootstrap); - Object* system = soa.Decode(javaSystem); + mirror::Object* bootstrap = soa.Decode(javaBootstrap); + mirror::Object* system = soa.Decode(javaSystem); ClosestUserClassLoaderVisitor visitor(soa.Self(), bootstrap, system); visitor.WalkStack(); return soa.AddLocalReference(visitor.class_loader); diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index 59c9bef0f1..dded787214 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -15,10 +15,12 @@ */ #include "class_linker.h" -#include "class_loader.h" #include "jni_internal.h" #include "nth_caller_visitor.h" -#include "object.h" +#include "mirror/class.h" +#include "mirror/class_loader.h" +#include "mirror/object-inl.h" +#include "mirror/proxy.h" #include "object_utils.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" @@ -27,9 +29,9 @@ namespace art { -static Class* DecodeClass(const ScopedObjectAccess& soa, jobject java_class) +static mirror::Class* DecodeClass(const ScopedObjectAccess& soa, jobject java_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* c = soa.Decode(java_class); + mirror::Class* c = soa.Decode(java_class); DCHECK(c != NULL); DCHECK(c->IsClass()); // TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke . @@ -56,9 +58,9 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean } std::string descriptor(DotToDescriptor(name.c_str())); - ClassLoader* class_loader = soa.Decode(javaLoader); + mirror::ClassLoader* class_loader = soa.Decode(javaLoader); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* c = class_linker->FindClass(descriptor.c_str(), class_loader); + mirror::Class* c = class_linker->FindClass(descriptor.c_str(), class_loader); if (c == NULL) { ScopedLocalRef cause(env, env->ExceptionOccurred()); env->ExceptionClear(); @@ -76,7 +78,7 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { ScopedObjectAccess soa(env); - Class* c = DecodeClass(soa, javaClass); + mirror::Class* c = DecodeClass(soa, javaClass); if (c->IsPrimitive() || c->IsArrayClass() || c->IsProxyClass()) { return 0; // primitive, array and proxy classes don't have class definitions } @@ -90,9 +92,9 @@ static jint Class_getAnnotationDirectoryOffset(JNIEnv* env, jclass javaClass) { static jobject Class_getDex(JNIEnv* env, jobject javaClass) { ScopedObjectAccess soa(env); - Class* c = DecodeClass(soa, javaClass); + mirror::Class* c = DecodeClass(soa, javaClass); - DexCache* dex_cache = c->GetDexCache(); + mirror::DexCache* dex_cache = c->GetDexCache(); if (dex_cache == NULL) { return NULL; } @@ -105,13 +107,14 @@ static jobject Class_getDex(JNIEnv* env, jobject javaClass) { static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) { ScopedObjectAccess soa(env); - Class* c = DecodeClass(soa, javaThis); + mirror::Class* c = DecodeClass(soa, javaThis); return soa.AddLocalReference(c->ComputeName()); } static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { ScopedObjectAccess soa(env); - SynthesizedProxyClass* c = down_cast(DecodeClass(soa, javaThis)); + mirror::SynthesizedProxyClass* c = + down_cast(DecodeClass(soa, javaThis)); return soa.AddLocalReference(c->GetInterfaces()->Clone(soa.Self())); } diff --git a/src/native/java_lang_Object.cc b/src/native/java_lang_Object.cc index f3c295e6dc..75d5f70bef 100644 --- a/src/native/java_lang_Object.cc +++ b/src/native/java_lang_Object.cc @@ -15,7 +15,7 @@ */ #include "jni_internal.h" -#include "object.h" +#include "mirror/object.h" #include "scoped_thread_state_change.h" // TODO: better support for overloading. @@ -27,31 +27,31 @@ namespace art { static jobject Object_internalClone(JNIEnv* env, jobject java_this) { ScopedObjectAccess soa(env); - Object* o = soa.Decode(java_this); + mirror::Object* o = soa.Decode(java_this); return soa.AddLocalReference(o->Clone(soa.Self())); } static void Object_notify(JNIEnv* env, jobject java_this) { ScopedObjectAccess soa(env); - Object* o = soa.Decode(java_this); + mirror::Object* o = soa.Decode(java_this); o->Notify(); } static void Object_notifyAll(JNIEnv* env, jobject java_this) { ScopedObjectAccess soa(env); - Object* o = soa.Decode(java_this); + mirror::Object* o = soa.Decode(java_this); o->NotifyAll(); } static void Object_wait(JNIEnv* env, jobject java_this) { ScopedObjectAccess soa(env); - Object* o = soa.Decode(java_this); + mirror::Object* o = soa.Decode(java_this); o->Wait(); } static void Object_waitJI(JNIEnv* env, jobject java_this, jlong ms, jint ns) { ScopedObjectAccess soa(env); - Object* o = soa.Decode(java_this); + mirror::Object* o = soa.Decode(java_this); o->Wait(ms, ns); } diff --git a/src/native/java_lang_Runtime.cc b/src/native/java_lang_Runtime.cc index d197b7340f..54ccddc9f0 100644 --- a/src/native/java_lang_Runtime.cc +++ b/src/native/java_lang_Runtime.cc @@ -18,10 +18,9 @@ #include #include -#include "class_loader.h" #include "heap.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/class_loader.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" @@ -59,7 +58,7 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job } } - ClassLoader* classLoader = soa.Decode(javaLoader); + mirror::ClassLoader* classLoader = soa.Decode(javaLoader); std::string detail; JavaVMExt* vm = Runtime::Current()->GetJavaVM(); bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, detail); diff --git a/src/native/java_lang_String.cc b/src/native/java_lang_String.cc index 8b7a69162d..44ab1ca8be 100644 --- a/src/native/java_lang_String.cc +++ b/src/native/java_lang_String.cc @@ -15,7 +15,7 @@ */ #include "jni_internal.h" -#include "object.h" +#include "mirror/string.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" @@ -28,7 +28,7 @@ static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) { return -1; } else { ScopedObjectAccess soa(env); - return soa.Decode(javaThis)->CompareTo(soa.Decode(javaRhs)); + return soa.Decode(javaThis)->CompareTo(soa.Decode(javaRhs)); } } @@ -37,14 +37,14 @@ static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint sta // This method does not handle supplementary characters. They're dealt with in managed code. DCHECK_LE(ch, 0xffff); - String* s = soa.Decode(java_this); + mirror::String* s = soa.Decode(java_this); return s->FastIndexOf(ch, start); } static jstring String_intern(JNIEnv* env, jobject javaThis) { ScopedObjectAccess soa(env); - String* s = soa.Decode(javaThis); - String* result = s->Intern(); + mirror::String* s = soa.Decode(javaThis); + mirror::String* result = s->Intern(); return soa.AddLocalReference(result); } diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index d74c9dbc7f..54ee2e914e 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -14,8 +14,12 @@ * limitations under the License. */ +#include "gc/card_table-inl.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/array.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" #include "scoped_thread_state_change.h" /* @@ -101,7 +105,7 @@ static void move32(void* dst, const void* src, size_t n) { namespace art { -static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) +static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", @@ -122,8 +126,8 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, } // Make sure source and destination are both arrays. - Object* srcObject = soa.Decode(javaSrc); - Object* dstObject = soa.Decode(javaDst); + mirror::Object* srcObject = soa.Decode(javaSrc); + mirror::Object* dstObject = soa.Decode(javaDst); if (!srcObject->IsArrayInstance()) { ThrowArrayStoreException_NotAnArray("source", srcObject); return; @@ -132,10 +136,10 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, ThrowArrayStoreException_NotAnArray("destination", dstObject); return; } - Array* srcArray = srcObject->AsArray(); - Array* dstArray = dstObject->AsArray(); - Class* srcComponentType = srcArray->GetClass()->GetComponentType(); - Class* dstComponentType = dstArray->GetClass()->GetComponentType(); + mirror::Array* srcArray = srcObject->AsArray(); + mirror::Array* dstArray = dstObject->AsArray(); + mirror::Class* srcComponentType = srcArray->GetClass()->GetComponentType(); + mirror::Class* dstComponentType = dstArray->GetClass()->GetComponentType(); // Bounds checking. if (srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length) { @@ -182,7 +186,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, } // Neither class is primitive. Are the types trivially compatible? - const size_t width = sizeof(Object*); + const size_t width = sizeof(mirror::Object*); uint8_t* dstBytes = reinterpret_cast(dstArray->GetRawData(width)); const uint8_t* srcBytes = reinterpret_cast(srcArray->GetRawData(width)); if (dstArray == srcArray || dstComponentType->IsAssignableFrom(srcComponentType)) { @@ -202,20 +206,21 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, // We already dealt with overlapping copies, so we don't need to cope with that case below. CHECK_NE(dstArray, srcArray); - Object* const * srcObjects = reinterpret_cast(srcBytes + srcPos * width); - Object** dstObjects = reinterpret_cast(dstBytes + dstPos * width); - Class* dstClass = dstArray->GetClass()->GetComponentType(); + mirror::Object* const * srcObjects = + reinterpret_cast(srcBytes + srcPos * width); + mirror::Object** dstObjects = reinterpret_cast(dstBytes + dstPos * width); + mirror::Class* dstClass = dstArray->GetClass()->GetComponentType(); // We want to avoid redundant IsAssignableFrom checks where possible, so we cache a class that // we know is assignable to the destination array's component type. - Class* lastAssignableElementClass = dstClass; + mirror::Class* lastAssignableElementClass = dstClass; - Object* o = NULL; + mirror::Object* o = NULL; int i = 0; for (; i < length; ++i) { o = srcObjects[i]; if (o != NULL) { - Class* oClass = o->GetClass(); + mirror::Class* oClass = o->GetClass(); if (lastAssignableElementClass == oClass) { dstObjects[i] = o; } else if (dstClass->IsAssignableFrom(oClass)) { @@ -243,7 +248,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) { ScopedObjectAccess soa(env); - Object* o = soa.Decode(javaObject); + mirror::Object* o = soa.Decode(javaObject); return static_cast(o->IdentityHashCode()); } diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index 473369ef1a..ca4be9de9b 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -17,7 +17,7 @@ #include "debugger.h" #include "jni_internal.h" #include "monitor.h" -#include "object.h" +#include "mirror/object.h" #include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" #include "thread.h" @@ -88,7 +88,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) { ScopedObjectAccess soa(env); - Object* object = soa.Decode(java_object); + mirror::Object* object = soa.Decode(java_object); if (object == NULL) { Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null"); return JNI_FALSE; @@ -111,7 +111,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) { ScopedUtfChars name(env, java_name); { ScopedObjectAccess soa(env); - if (soa.Decode(peer) == soa.Self()->GetPeer()) { + if (soa.Decode(peer) == soa.Self()->GetPeer()) { soa.Self()->SetThreadName(name.c_str()); return; } @@ -149,7 +149,7 @@ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_ static void Thread_sleep(JNIEnv* env, jclass, jobject java_lock, jlong ms, jint ns) { ScopedObjectAccess soa(env); - Object* lock = soa.Decode(java_lock); + mirror::Object* lock = soa.Decode(java_lock); Monitor::Wait(Thread::Current(), lock, ms, ns, true, kSleeping); } diff --git a/src/native/java_lang_VMClassLoader.cc b/src/native/java_lang_VMClassLoader.cc index 4b5c31c64b..02b7c25523 100644 --- a/src/native/java_lang_VMClassLoader.cc +++ b/src/native/java_lang_VMClassLoader.cc @@ -15,8 +15,8 @@ */ #include "class_linker.h" -#include "class_loader.h" #include "jni_internal.h" +#include "mirror/class_loader.h" #include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" #include "zip_archive.h" @@ -25,14 +25,14 @@ namespace art { static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) { ScopedObjectAccess soa(env); - ClassLoader* loader = soa.Decode(javaLoader); + mirror::ClassLoader* loader = soa.Decode(javaLoader); ScopedUtfChars name(env, javaName); if (name.c_str() == NULL) { return NULL; } std::string descriptor(DotToDescriptor(name.c_str())); - Class* c = Runtime::Current()->GetClassLinker()->LookupClass(descriptor.c_str(), loader); + mirror::Class* c = Runtime::Current()->GetClassLinker()->LookupClass(descriptor.c_str(), loader); if (c != NULL && c->IsResolved()) { return soa.AddLocalReference(c); } else { diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index 15aeed211d..2833cb09cb 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -16,7 +16,8 @@ #include "class_linker.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/class.h" +#include "mirror/object-inl.h" #include "object_utils.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" @@ -26,21 +27,21 @@ namespace art { static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) { ScopedObjectAccess soa(env); DCHECK(javaElementClass != NULL); - Class* element_class = soa.Decode(javaElementClass); + mirror::Class* element_class = soa.Decode(javaElementClass); DCHECK(element_class->IsClass()); DCHECK(javaDimArray != NULL); - Object* dimensions_obj = soa.Decode(javaDimArray); + mirror::Object* dimensions_obj = soa.Decode(javaDimArray); DCHECK(dimensions_obj->IsArrayInstance()); DCHECK_STREQ(ClassHelper(dimensions_obj->GetClass()).GetDescriptor(), "[I"); - IntArray* dimensions_array = down_cast(dimensions_obj); - Array* new_array = Array::CreateMultiArray(soa.Self(), element_class, dimensions_array); + mirror::IntArray* dimensions_array = down_cast(dimensions_obj); + mirror::Array* new_array = mirror::Array::CreateMultiArray(soa.Self(), element_class, dimensions_array); return soa.AddLocalReference(new_array); } static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) { ScopedObjectAccess soa(env); DCHECK(javaElementClass != NULL); - Class* element_class = soa.Decode(javaElementClass); + mirror::Class* element_class = soa.Decode(javaElementClass); if (UNLIKELY(length < 0)) { soa.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); return NULL; @@ -49,13 +50,13 @@ static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementCl descriptor += ClassHelper(element_class).GetDescriptor(); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader()); + mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader()); if (UNLIKELY(array_class == NULL)) { CHECK(soa.Self()->IsExceptionPending()); return NULL; } DCHECK(array_class->IsArrayClass()); - Array* new_array = Array::Alloc(soa.Self(), array_class, length); + mirror::Array* new_array = mirror::Array::Alloc(soa.Self(), array_class, length); return soa.AddLocalReference(new_array); } diff --git a/src/native/java_lang_reflect_Constructor.cc b/src/native/java_lang_reflect_Constructor.cc index ed0d1f127b..fb84dfd2bd 100644 --- a/src/native/java_lang_reflect_Constructor.cc +++ b/src/native/java_lang_reflect_Constructor.cc @@ -16,7 +16,10 @@ #include "class_linker.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" #include "object_utils.h" #include "reflection.h" #include "scoped_thread_state_change.h" @@ -32,8 +35,8 @@ namespace art { */ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) { ScopedObjectAccess soa(env); - AbstractMethod* m = soa.Decode(javaMethod)->AsMethod(); - Class* c = m->GetDeclaringClass(); + mirror::AbstractMethod* m = soa.Decode(javaMethod)->AsMethod(); + mirror::Class* c = m->GetDeclaringClass(); if (c->IsAbstract()) { soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Can't instantiate abstract class %s", PrettyDescriptor(c).c_str()); @@ -45,7 +48,7 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA return NULL; } - Object* receiver = c->AllocObject(soa.Self()); + mirror::Object* receiver = c->AllocObject(soa.Self()); if (receiver == NULL) { return NULL; } diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index fde8f94921..9a2671c167 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -15,15 +15,17 @@ */ #include "class_linker.h" +#include "class_linker-inl.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/field.h" +#include "mirror/field-inl.h" #include "object_utils.h" #include "reflection.h" #include "scoped_thread_state_change.h" namespace art { -static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, +static bool GetFieldValue(const ScopedObjectAccess& soa, mirror::Object* o, mirror::Field* f, JValue& value, bool allow_references) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_EQ(value.GetJ(), 0LL); @@ -72,16 +74,16 @@ static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, return false; } -static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, Field* f, - Object*& o) +static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, mirror::Field* f, + mirror::Object*& o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (f->IsStatic()) { o = f->GetDeclaringClass(); return true; } - o = soa.Decode(javaObj); - Class* declaringClass = f->GetDeclaringClass(); + o = soa.Decode(javaObj); + mirror::Class* declaringClass = f->GetDeclaringClass(); if (!VerifyObjectInClass(o, declaringClass)) { return false; } @@ -90,8 +92,8 @@ static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, Field* static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj) { ScopedObjectAccess soa(env); - Field* f = soa.DecodeField(env->FromReflectedField(javaField)); - Object* o = NULL; + mirror::Field* f = soa.DecodeField(env->FromReflectedField(javaField)); + mirror::Object* o = NULL; if (!CheckReceiver(soa, javaObj, f, o)) { return NULL; } @@ -107,8 +109,8 @@ static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj) { static JValue GetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char dst_descriptor) { ScopedObjectAccess soa(env); - Field* f = soa.DecodeField(env->FromReflectedField(javaField)); - Object* o = NULL; + mirror::Field* f = soa.DecodeField(env->FromReflectedField(javaField)); + mirror::Object* o = NULL; if (!CheckReceiver(soa, javaObj, f, o)) { return JValue(); } @@ -121,7 +123,7 @@ static JValue GetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, // Widen it if necessary (and possible). JValue wide_value; - Class* dst_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(dst_descriptor); + mirror::Class* dst_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(dst_descriptor); if (!ConvertPrimitiveValue(FieldHelper(f).GetTypeAsPrimitiveType(), dst_type->GetPrimitiveType(), field_value, wide_value)) { return JValue(); @@ -161,7 +163,8 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) { return GetPrimitiveField(env, javaField, javaObj, 'S').GetS(); } -static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool allow_references) +static void SetFieldValue(mirror::Object* o, mirror::Field* f, const JValue& new_value, + bool allow_references) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { @@ -214,17 +217,17 @@ static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool all static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue) { ScopedObjectAccess soa(env); - Field* f = soa.DecodeField(env->FromReflectedField(javaField)); + mirror::Field* f = soa.DecodeField(env->FromReflectedField(javaField)); // Unbox the value, if necessary. - Object* boxed_value = soa.Decode(javaValue); + mirror::Object* boxed_value = soa.Decode(javaValue); JValue unboxed_value; if (!UnboxPrimitiveForField(boxed_value, FieldHelper(f).GetType(), unboxed_value, f)) { return; } // Check that the receiver is non-null and an instance of the field's declaring class. - Object* o = NULL; + mirror::Object* o = NULL; if (!CheckReceiver(soa, javaObj, f, o)) { return; } @@ -235,8 +238,8 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char src_descriptor, const JValue& new_value) { ScopedObjectAccess soa(env); - Field* f = soa.DecodeField(env->FromReflectedField(javaField)); - Object* o = NULL; + mirror::Field* f = soa.DecodeField(env->FromReflectedField(javaField)); + mirror::Object* o = NULL; if (!CheckReceiver(soa, javaObj, f, o)) { return; } @@ -249,7 +252,7 @@ static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, c // Widen the value if necessary (and possible). JValue wide_value; - Class* src_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(src_descriptor); + mirror::Class* src_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(src_descriptor); if (!ConvertPrimitiveValue(src_type->GetPrimitiveType(), fh.GetTypeAsPrimitiveType(), new_value, wide_value)) { return; diff --git a/src/native/java_lang_reflect_Method.cc b/src/native/java_lang_reflect_Method.cc index b1eb6e07d7..14dc6a44ee 100644 --- a/src/native/java_lang_reflect_Method.cc +++ b/src/native/java_lang_reflect_Method.cc @@ -16,7 +16,12 @@ #include "class_linker.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" #include "object_utils.h" #include "reflection.h" #include "scoped_thread_state_change.h" @@ -30,10 +35,10 @@ static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiv static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { ScopedObjectAccess soa(env); - AbstractMethod* proxy_method = soa.Decode(javaMethod)->AsMethod(); + mirror::AbstractMethod* proxy_method = soa.Decode(javaMethod)->AsMethod(); CHECK(proxy_method->GetDeclaringClass()->IsProxyClass()); - SynthesizedProxyClass* proxy_class = - down_cast(proxy_method->GetDeclaringClass()); + mirror::SynthesizedProxyClass* proxy_class = + down_cast(proxy_method->GetDeclaringClass()); int throws_index = -1; size_t num_virt_methods = proxy_class->NumVirtualMethods(); for (size_t i = 0; i < num_virt_methods; i++) { @@ -43,13 +48,13 @@ static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { } } CHECK_NE(throws_index, -1); - ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); + mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); return soa.AddLocalReference(declared_exceptions->Clone(soa.Self())); } static jobject Method_findOverriddenMethodNative(JNIEnv* env, jobject javaMethod) { ScopedObjectAccess soa(env); - AbstractMethod* method = soa.Decode(javaMethod)->AsMethod(); + mirror::AbstractMethod* method = soa.Decode(javaMethod)->AsMethod(); return soa.AddLocalReference(method->FindOverriddenMethod()); } diff --git a/src/native/java_lang_reflect_Proxy.cc b/src/native/java_lang_reflect_Proxy.cc index a56726824a..547ce7b38d 100644 --- a/src/native/java_lang_reflect_Proxy.cc +++ b/src/native/java_lang_reflect_Proxy.cc @@ -15,22 +15,28 @@ */ #include "class_linker.h" -#include "class_loader.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/class_loader.h" +#include "mirror/object_array.h" +#include "mirror/string.h" #include "scoped_thread_state_change.h" namespace art { -static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring javaName, jobjectArray javaInterfaces, jobject javaLoader, jobjectArray javaMethods, jobjectArray javaThrows) { +static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring javaName, + jobjectArray javaInterfaces, jobject javaLoader, + jobjectArray javaMethods, jobjectArray javaThrows) { ScopedObjectAccess soa(env); - String* name = soa.Decode(javaName); - ObjectArray* interfaces = soa.Decode*>(javaInterfaces); - ClassLoader* loader = soa.Decode(javaLoader); - ObjectArray* methods = soa.Decode*>(javaMethods); - ObjectArray >* throws = soa.Decode >*>(javaThrows); + mirror::String* name = soa.Decode(javaName); + mirror::ObjectArray* interfaces = + soa.Decode*>(javaInterfaces); + mirror::ClassLoader* loader = soa.Decode(javaLoader); + mirror::ObjectArray* methods = + soa.Decode*>(javaMethods); + mirror::ObjectArray >* throws = + soa.Decode >*>(javaThrows); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* result = class_linker->CreateProxyClass(name, interfaces, loader, methods, throws); + mirror::Class* result = class_linker->CreateProxyClass(name, interfaces, loader, methods, throws); return soa.AddLocalReference(result); } diff --git a/src/native/sun_misc_Unsafe.cc b/src/native/sun_misc_Unsafe.cc index cb06a0b944..abb0d5cd5c 100644 --- a/src/native/sun_misc_Unsafe.cc +++ b/src/native/sun_misc_Unsafe.cc @@ -14,15 +14,18 @@ * limitations under the License. */ +#include "atomic.h" +#include "gc/card_table-inl.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/object.h" +#include "mirror/object-inl.h" #include "scoped_thread_state_change.h" namespace art { static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint expectedValue, jint newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int32_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_release_cas() returns 0 on success, not failure. @@ -32,7 +35,7 @@ static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong expectedValue, jlong newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); byte* raw_addr = reinterpret_cast(obj) + offset; volatile int64_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_cmpxchg() returns 0 on success, not failure. @@ -42,9 +45,9 @@ static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaExpectedValue, jobject javaNewValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); - Object* expectedValue = soa.Decode(javaExpectedValue); - Object* newValue = soa.Decode(javaNewValue); + mirror::Object* obj = soa.Decode(javaObj); + mirror::Object* expectedValue = soa.Decode(javaExpectedValue); + mirror::Object* newValue = soa.Decode(javaNewValue); byte* raw_addr = reinterpret_cast(obj) + offset; int32_t* address = reinterpret_cast(raw_addr); // Note: android_atomic_cmpxchg() returns 0 on success, not failure. @@ -58,98 +61,98 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb static jint Unsafe_getInt(JNIEnv* env, jobject, jobject javaObj, jlong offset) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); return obj->GetField32(MemberOffset(offset), false); } static jint Unsafe_getIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); return obj->GetField32(MemberOffset(offset), true); } static void Unsafe_putInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); obj->SetField32(MemberOffset(offset), newValue, false); } static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); obj->SetField32(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); ANDROID_MEMBAR_STORE(); obj->SetField32(MemberOffset(offset), newValue, false); } static jlong Unsafe_getLong(JNIEnv* env, jobject, jobject javaObj, jlong offset) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); return obj->GetField64(MemberOffset(offset), false); } static jlong Unsafe_getLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); return obj->GetField64(MemberOffset(offset), true); } static void Unsafe_putLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); obj->SetField64(MemberOffset(offset), newValue, false); } static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); obj->SetField64(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); + mirror::Object* obj = soa.Decode(javaObj); ANDROID_MEMBAR_STORE(); obj->SetField64(MemberOffset(offset), newValue, false); } static jobject Unsafe_getObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); - Object* value = obj->GetFieldObject(MemberOffset(offset), true); + mirror::Object* obj = soa.Decode(javaObj); + mirror::Object* value = obj->GetFieldObject(MemberOffset(offset), true); return soa.AddLocalReference(value); } static jobject Unsafe_getObject(JNIEnv* env, jobject, jobject javaObj, jlong offset) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); - Object* value = obj->GetFieldObject(MemberOffset(offset), false); + mirror::Object* obj = soa.Decode(javaObj); + mirror::Object* value = obj->GetFieldObject(MemberOffset(offset), false); return soa.AddLocalReference(value); } static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); - Object* newValue = soa.Decode(javaNewValue); + mirror::Object* obj = soa.Decode(javaObj); + mirror::Object* newValue = soa.Decode(javaNewValue); obj->SetFieldObject(MemberOffset(offset), newValue, false); } static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); - Object* newValue = soa.Decode(javaNewValue); + mirror::Object* obj = soa.Decode(javaObj); + mirror::Object* newValue = soa.Decode(javaNewValue); obj->SetFieldObject(MemberOffset(offset), newValue, true); } static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) { ScopedObjectAccess soa(env); - Object* obj = soa.Decode(javaObj); - Object* newValue = soa.Decode(javaNewValue); + mirror::Object* obj = soa.Decode(javaObj); + mirror::Object* newValue = soa.Decode(javaNewValue); ANDROID_MEMBAR_STORE(); obj->SetFieldObject(MemberOffset(offset), newValue, false); } diff --git a/src/nth_caller_visitor.h b/src/nth_caller_visitor.h index efed16316d..7d9feb622e 100644 --- a/src/nth_caller_visitor.h +++ b/src/nth_caller_visitor.h @@ -17,10 +17,11 @@ #ifndef ART_SRC_NTH_CALLER_VISITOR_H_ #define ART_SRC_NTH_CALLER_VISITOR_H_ -#include "object.h" -#include "thread.h" +#include "mirror/abstract_method.h" +#include "stack.h" namespace art { +class Thread; // Walks up the stack 'n' callers, when used with Thread::WalkStack. struct NthCallerVisitor : public StackVisitor { @@ -38,7 +39,7 @@ struct NthCallerVisitor : public StackVisitor { size_t n; size_t count; - AbstractMethod* caller; + mirror::AbstractMethod* caller; }; } // namespace art diff --git a/src/oat.cc b/src/oat.cc index 8032a11c91..b0a9aef4f0 100644 --- a/src/oat.cc +++ b/src/oat.cc @@ -15,6 +15,7 @@ */ #include "oat.h" +#include "utils.h" #include diff --git a/src/oat/jni/arm/jni_internal_arm.cc b/src/oat/jni/arm/jni_internal_arm.cc index 36d436f9b5..4af4f528f5 100644 --- a/src/oat/jni/arm/jni_internal_arm.cc +++ b/src/oat/jni/arm/jni_internal_arm.cc @@ -23,9 +23,9 @@ #include "compiler.h" #include "invoke_arg_array_builder.h" #include "jni_internal.h" +#include "mirror/abstract_method.h" #include "oat/utils/arm/assembler_arm.h" #include "oat/utils/assembler.h" -#include "object.h" namespace art { namespace arm { @@ -131,7 +131,7 @@ CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_ } // Load the code pointer we are about to call. - __ LoadFromOffset(kLoadWord, IP, R0, AbstractMethod::GetCodeOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, IP, R0, mirror::AbstractMethod::GetCodeOffset().Int32Value()); // Do the call. __ blx(IP); diff --git a/src/oat/jni/jni_compiler.cc b/src/oat/jni/jni_compiler.cc index ac35143e73..c0298de023 100644 --- a/src/oat/jni/jni_compiler.cc +++ b/src/oat/jni/jni_compiler.cc @@ -118,7 +118,7 @@ CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler, // Check sirt offset is within frame CHECK_LT(sirt_offset.Uint32Value(), frame_size); __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), - mr_conv->MethodRegister(), AbstractMethod::DeclaringClassOffset()); + mr_conv->MethodRegister(), mirror::AbstractMethod::DeclaringClassOffset()); __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false); __ StoreRef(sirt_offset, main_jni_conv->InterproceduralScratchRegister()); main_jni_conv->Next(); // in SIRT so move to next argument @@ -269,7 +269,7 @@ CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler, } // 9. Plant call to native code associated with method. - __ Call(main_jni_conv->MethodStackOffset(), AbstractMethod::NativeMethodOffset(), + __ Call(main_jni_conv->MethodStackOffset(), mirror::AbstractMethod::NativeMethodOffset(), mr_conv->InterproceduralScratchRegister()); // 10. Fix differences in result widths. diff --git a/src/oat/jni/mips/jni_internal_mips.cc b/src/oat/jni/mips/jni_internal_mips.cc index 4cfeaa9267..69e86c357c 100644 --- a/src/oat/jni/mips/jni_internal_mips.cc +++ b/src/oat/jni/mips/jni_internal_mips.cc @@ -23,9 +23,9 @@ #include "compiler.h" #include "invoke_arg_array_builder.h" #include "jni_internal.h" +#include "mirror/abstract_method.h" #include "oat/utils/mips/assembler_mips.h" #include "oat/utils/assembler.h" -#include "object.h" namespace art { namespace mips { @@ -128,7 +128,7 @@ CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_ } // Load the code pointer we are about to call. - __ LoadFromOffset(kLoadWord, T9, A0, AbstractMethod::GetCodeOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, T9, A0, mirror::AbstractMethod::GetCodeOffset().Int32Value()); // Do the call. __ Jalr(T9); diff --git a/src/oat/jni/x86/jni_internal_x86.cc b/src/oat/jni/x86/jni_internal_x86.cc index 9d41edaba7..fabd283a3d 100644 --- a/src/oat/jni/x86/jni_internal_x86.cc +++ b/src/oat/jni/x86/jni_internal_x86.cc @@ -18,9 +18,9 @@ #include "compiler.h" #include "invoke_arg_array_builder.h" #include "jni_internal.h" +#include "mirror/abstract_method.h" #include "oat/utils/assembler.h" #include "oat/utils/x86/assembler_x86.h" -#include "object.h" namespace art { namespace x86 { @@ -127,7 +127,7 @@ CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_ } } - __ call(Address(EAX, AbstractMethod::GetCodeOffset())); // Call code off of method + __ call(Address(EAX, mirror::AbstractMethod::GetCodeOffset())); // Call code off of method // Pop arguments up to EBX and the return address. __ addl(ESP, Immediate(frame_size + pad_size - (2 * kPointerSize))); diff --git a/src/oat/runtime/argument_visitor.h b/src/oat/runtime/argument_visitor.h index 06256caec5..df7bc122fd 100644 --- a/src/oat/runtime/argument_visitor.h +++ b/src/oat/runtime/argument_visitor.h @@ -46,7 +46,7 @@ class ArgumentVisitor { #define STACK_ARG_SKIP 0 #endif - ArgumentVisitor(MethodHelper& caller_mh, AbstractMethod** sp) + ArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : caller_mh_(caller_mh), args_in_regs_(ComputeArgsInRegs(caller_mh)), diff --git a/src/oat/runtime/arm/context_arm.cc b/src/oat/runtime/arm/context_arm.cc index 5bd4b3d02e..2e7605064e 100644 --- a/src/oat/runtime/arm/context_arm.cc +++ b/src/oat/runtime/arm/context_arm.cc @@ -16,7 +16,9 @@ #include "context_arm.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "stack.h" +#include "thread.h" namespace art { namespace arm { @@ -38,7 +40,7 @@ void ArmContext::Reset() { } void ArmContext::FillCalleeSaves(const StackVisitor& fr) { - AbstractMethod* method = fr.GetMethod(); + mirror::AbstractMethod* method = fr.GetMethod(); uint32_t core_spills = method->GetCoreSpillMask(); uint32_t fp_core_spills = method->GetFpSpillMask(); size_t spill_count = __builtin_popcount(core_spills); diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc index 15218b8013..c43b7e279e 100644 --- a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc +++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc @@ -28,12 +28,13 @@ extern "C" void* art_check_and_alloc_array_from_code(uint32_t, void*, int32_t); extern "C" void* art_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); // Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class); +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); extern "C" void art_can_put_array_element_from_code(void*, void*); extern "C" void art_check_cast_from_code(void*, void*); // Debug entrypoints. -extern void DebugMe(AbstractMethod* method, uint32_t info); +extern void DebugMe(mirror::AbstractMethod* method, uint32_t info); extern "C" void art_update_debugger(void*, void*, int32_t, void*); // DexCache entrypoints. @@ -69,11 +70,11 @@ extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self); -extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self); -extern Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self); +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self); +extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self); // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); @@ -113,7 +114,8 @@ extern "C" int32_t art_indexof(void*, uint32_t, uint32_t, uint32_t); extern "C" int32_t art_string_compareto(void*, void*); // Invoke entrypoints. -const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod*, AbstractMethod**, Thread*, +const void* UnresolvedDirectMethodTrampolineFromCode(mirror::AbstractMethod*, + mirror::AbstractMethod**, Thread*, Runtime::TrampolineType); extern "C" void art_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_invoke_interface_trampoline(uint32_t, void*); @@ -127,7 +129,8 @@ extern void CheckSuspendFromCode(Thread* thread); extern "C" void art_test_suspend(); // Throw entrypoints. -extern void ThrowAbstractMethodErrorFromCode(AbstractMethod* method, Thread* thread, AbstractMethod** sp); +extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, + mirror::AbstractMethod** sp); extern "C" void art_deliver_exception_from_code(void*); extern "C" void art_throw_array_bounds_from_code(int32_t index, int32_t limit); extern "C" void art_throw_div_zero_from_code(); diff --git a/src/oat/runtime/arm/stub_arm.cc b/src/oat/runtime/arm/stub_arm.cc index 4099ddbc98..90dfffc27f 100644 --- a/src/oat/runtime/arm/stub_arm.cc +++ b/src/oat/runtime/arm/stub_arm.cc @@ -15,10 +15,10 @@ */ #include "jni_internal.h" +#include "mirror/array.h" #include "oat/utils/arm/assembler_arm.h" #include "oat/runtime/oat_support_entrypoints.h" #include "oat/runtime/stub.h" -#include "object.h" #include "stack_indirect_reference_table.h" #include "sirt_ref.h" @@ -27,7 +27,7 @@ namespace art { namespace arm { -ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) { +mirror::ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) { UniquePtr assembler(static_cast(Assembler::Create(kArm))); #if !defined(ART_USE_LLVM_COMPILER) // | Out args | @@ -83,7 +83,7 @@ ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) { assembler->EmitSlowPaths(); size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef resolution_trampoline(self, ByteArray::Alloc(self, cs)); + SirtRef resolution_trampoline(self, mirror::ByteArray::Alloc(self, cs)); CHECK(resolution_trampoline.get() != NULL); MemoryRegion code(resolution_trampoline->GetData(), resolution_trampoline->GetLength()); assembler->FinalizeInstructions(code); @@ -91,9 +91,9 @@ ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) { return resolution_trampoline.get(); } -typedef void (*ThrowAme)(AbstractMethod*, Thread*); +typedef void (*ThrowAme)(mirror::AbstractMethod*, Thread*); -ByteArray* CreateAbstractMethodErrorStub() { +mirror::ByteArray* CreateAbstractMethodErrorStub() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); #if !defined(ART_USE_LLVM_COMPILER) // Save callee saves and ready frame for exception delivery @@ -130,7 +130,7 @@ ByteArray* CreateAbstractMethodErrorStub() { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef abstract_stub(self, ByteArray::Alloc(self, cs)); + SirtRef abstract_stub(self, mirror::ByteArray::Alloc(self, cs)); CHECK(abstract_stub.get() != NULL); MemoryRegion code(abstract_stub->GetData(), abstract_stub->GetLength()); assembler->FinalizeInstructions(code); @@ -138,7 +138,7 @@ ByteArray* CreateAbstractMethodErrorStub() { return abstract_stub.get(); } -ByteArray* CreateJniDlsymLookupStub() { +mirror::ByteArray* CreateJniDlsymLookupStub() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); // Build frame and save argument registers and LR. RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR); @@ -159,7 +159,7 @@ ByteArray* CreateJniDlsymLookupStub() { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef jni_stub(self, ByteArray::Alloc(self, cs)); + SirtRef jni_stub(self, mirror::ByteArray::Alloc(self, cs)); CHECK(jni_stub.get() != NULL); MemoryRegion code(jni_stub->GetData(), jni_stub->GetLength()); assembler->FinalizeInstructions(code); diff --git a/src/oat/runtime/callee_save_frame.h b/src/oat/runtime/callee_save_frame.h index fe66b91f64..08cf9d86be 100644 --- a/src/oat/runtime/callee_save_frame.h +++ b/src/oat/runtime/callee_save_frame.h @@ -21,11 +21,12 @@ #include "thread.h" namespace art { - +namespace mirror { class AbstractMethod; +} // namespace mirror // Place a special frame at the TOS that will save the callee saves for the given type. -static void FinishCalleeSaveFrameSetup(Thread* self, AbstractMethod** sp, +static void FinishCalleeSaveFrameSetup(Thread* self, mirror::AbstractMethod** sp, Runtime::CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Be aware the store below may well stomp on an incoming argument. diff --git a/src/oat/runtime/mips/context_mips.cc b/src/oat/runtime/mips/context_mips.cc index 0c2f9159d0..36eb5b9d9a 100644 --- a/src/oat/runtime/mips/context_mips.cc +++ b/src/oat/runtime/mips/context_mips.cc @@ -16,7 +16,7 @@ #include "context_mips.h" -#include "object.h" +#include "mirror/object.h" namespace art { namespace mips { diff --git a/src/oat/runtime/mips/stub_mips.cc b/src/oat/runtime/mips/stub_mips.cc index 4cb8813800..fda9941de8 100644 --- a/src/oat/runtime/mips/stub_mips.cc +++ b/src/oat/runtime/mips/stub_mips.cc @@ -15,10 +15,10 @@ */ #include "jni_internal.h" +#include "mirror/array.h" #include "oat/runtime/oat_support_entrypoints.h" #include "oat/runtime/stub.h" #include "oat/utils/mips/assembler_mips.h" -#include "object.h" #include "stack_indirect_reference_table.h" #include "sirt_ref.h" @@ -27,7 +27,7 @@ namespace art { namespace mips { -ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) { +mirror::ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) { UniquePtr assembler(static_cast(Assembler::Create(kMips))); #if !defined(ART_USE_LLVM_COMPILER) // | Out args | @@ -113,7 +113,7 @@ ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef resolution_trampoline(self, ByteArray::Alloc(self, cs)); + SirtRef resolution_trampoline(self, mirror::ByteArray::Alloc(self, cs)); CHECK(resolution_trampoline.get() != NULL); MemoryRegion code(resolution_trampoline->GetData(), resolution_trampoline->GetLength()); assembler->FinalizeInstructions(code); @@ -121,9 +121,9 @@ ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) { return resolution_trampoline.get(); } -typedef void (*ThrowAme)(AbstractMethod*, Thread*); +typedef void (*ThrowAme)(mirror::AbstractMethod*, Thread*); -ByteArray* CreateAbstractMethodErrorStub() { +mirror::ByteArray* CreateAbstractMethodErrorStub() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); #if !defined(ART_USE_LLVM_COMPILER) // Save callee saves and ready frame for exception delivery @@ -161,7 +161,7 @@ ByteArray* CreateAbstractMethodErrorStub() { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef abstract_stub(self, ByteArray::Alloc(self, cs)); + SirtRef abstract_stub(self, mirror::ByteArray::Alloc(self, cs)); CHECK(abstract_stub.get() != NULL); MemoryRegion code(abstract_stub->GetData(), abstract_stub->GetLength()); assembler->FinalizeInstructions(code); @@ -169,7 +169,7 @@ ByteArray* CreateAbstractMethodErrorStub() { return abstract_stub.get(); } -ByteArray* CreateJniDlsymLookupStub() { +mirror::ByteArray* CreateJniDlsymLookupStub() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); // Build frame and save argument registers and RA. @@ -203,7 +203,7 @@ ByteArray* CreateJniDlsymLookupStub() { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef jni_stub(self, ByteArray::Alloc(self, cs)); + SirtRef jni_stub(self, mirror::ByteArray::Alloc(self, cs)); CHECK(jni_stub.get() != NULL); MemoryRegion code(jni_stub->GetData(), jni_stub->GetLength()); assembler->FinalizeInstructions(code); diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h index 113a56c08b..a08a584660 100644 --- a/src/oat/runtime/oat_support_entrypoints.h +++ b/src/oat/runtime/oat_support_entrypoints.h @@ -24,10 +24,12 @@ static_cast(OFFSETOF_MEMBER(EntryPoints, x))) namespace art { - +namespace mirror { +class AbstractMethod; class Class; +class Object; +} // namespace mirror class DvmDex; -class AbstractMethod; class Thread; struct PACKED(4) EntryPoints { @@ -40,12 +42,12 @@ struct PACKED(4) EntryPoints { void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); // Cast - uint32_t (*pInstanceofNonTrivialFromCode)(const Class*, const Class*); + uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*); void (*pCanPutArrayElementFromCode)(void*, void*); void (*pCheckCastFromCode)(void*, void*); // Debug - void (*pDebugMe)(AbstractMethod*, uint32_t); + void (*pDebugMe)(mirror::AbstractMethod*, uint32_t); void (*pUpdateDebuggerFromCode)(void*, void*, int32_t, void*); // DexCache @@ -77,8 +79,8 @@ struct PACKED(4) EntryPoints { uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self); void (*pJniMethodEnd)(uint32_t cookie, Thread* self); void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self); - Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self); - Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie, + mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self); + mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie, jobject locked, Thread* self); // Locks @@ -114,7 +116,8 @@ struct PACKED(4) EntryPoints { void* (*pMemcpy)(void*, const void*, size_t); // Invocation - const void* (*pUnresolvedDirectMethodTrampolineFromCode)(AbstractMethod*, AbstractMethod**, Thread*, + const void* (*pUnresolvedDirectMethodTrampolineFromCode)(mirror::AbstractMethod*, + mirror::AbstractMethod**, Thread*, Runtime::TrampolineType); void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*); void (*pInvokeInterfaceTrampoline)(uint32_t, void*); @@ -129,7 +132,8 @@ struct PACKED(4) EntryPoints { // Throws void (*pDeliverException)(void*); - void (*pThrowAbstractMethodErrorFromCode)(AbstractMethod* m, Thread* thread, AbstractMethod** sp); + void (*pThrowAbstractMethodErrorFromCode)(mirror::AbstractMethod* m, Thread* thread, + mirror::AbstractMethod** sp); void (*pThrowArrayBoundsFromCode)(int32_t, int32_t); void (*pThrowDivZeroFromCode)(); void (*pThrowNoSuchMethodFromCode)(int32_t); diff --git a/src/oat/runtime/stub.h b/src/oat/runtime/stub.h index 0e5f0dd67d..2679793332 100644 --- a/src/oat/runtime/stub.h +++ b/src/oat/runtime/stub.h @@ -20,31 +20,35 @@ #include "runtime.h" namespace art { +namespace mirror { +template class PrimitiveArray; +typedef PrimitiveArray ByteArray; +} // namespace mirror namespace arm { - ByteArray* CreateAbstractMethodErrorStub() + mirror::ByteArray* CreateAbstractMethodErrorStub() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) + mirror::ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ByteArray* CreateJniDlsymLookupStub() + mirror::ByteArray* CreateJniDlsymLookupStub() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } namespace mips { - ByteArray* CreateAbstractMethodErrorStub() + mirror::ByteArray* CreateAbstractMethodErrorStub() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) + mirror::ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ByteArray* CreateJniDlsymLookupStub() + mirror::ByteArray* CreateJniDlsymLookupStub() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } namespace x86 { - ByteArray* CreateAbstractMethodErrorStub() + mirror::ByteArray* CreateAbstractMethodErrorStub() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) + mirror::ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ByteArray* CreateJniDlsymLookupStub() + mirror::ByteArray* CreateJniDlsymLookupStub() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } diff --git a/src/oat/runtime/support_alloc.cc b/src/oat/runtime/support_alloc.cc index fb70285073..5e3af78c94 100644 --- a/src/oat/runtime/support_alloc.cc +++ b/src/oat/runtime/support_alloc.cc @@ -15,51 +15,61 @@ */ #include "callee_save_frame.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #include "runtime_support.h" namespace art { -extern "C" Object* artAllocObjectFromCode(uint32_t type_idx, AbstractMethod* method, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Object* artAllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, false); } -extern "C" Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, AbstractMethod* method, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, + mirror::AbstractMethod* method, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, true); } -extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, AbstractMethod* method, int32_t component_count, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Array* artAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, self, false); } -extern "C" Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, AbstractMethod* method, - int32_t component_count, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, + mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, self, true); } -extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, AbstractMethod* method, - int32_t component_count, Thread* self, - AbstractMethod** sp) +extern "C" mirror::Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, + mirror::AbstractMethod* method, + int32_t component_count, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false); } -extern "C" Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, - AbstractMethod* method, - int32_t component_count, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, + mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true); diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc index 0db743b16c..71a37efe2c 100644 --- a/src/oat/runtime/support_cast.cc +++ b/src/oat/runtime/support_cast.cc @@ -15,12 +15,15 @@ */ #include "callee_save_frame.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" #include "runtime_support.h" namespace art { // Assignable test for code, won't throw. Null and equality tests already performed -extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class) +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass != NULL); DCHECK(ref_class != NULL); @@ -28,8 +31,8 @@ extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref } // Check whether it is safe to cast one class to the other, throw exception and return -1 on failure -extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, - AbstractMethod** sp) +extern "C" int artCheckCastFromCode(const mirror::Class* a, const mirror::Class* b, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(a->IsClass()) << PrettyClass(a); DCHECK(b->IsClass()) << PrettyClass(b); @@ -47,13 +50,14 @@ extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self // Tests whether 'element' can be assigned into an array of type 'array_class'. // Returns 0 on success and -1 if an exception is pending. -extern "C" int artCanPutArrayElementFromCode(const Object* element, const Class* array_class, - Thread* self, AbstractMethod** sp) +extern "C" int artCanPutArrayElementFromCode(const mirror::Object* element, + const mirror::Class* array_class, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(array_class != NULL); // element can't be NULL as we catch this is screened in runtime_support - Class* element_class = element->GetClass(); - Class* component_type = array_class->GetComponentType(); + mirror::Class* element_class = element->GetClass(); + mirror::Class* component_type = array_class->GetComponentType(); if (LIKELY(component_type->IsAssignableFrom(element_class))) { return 0; // Success } else { diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc index e2ca493c05..0d67dd92c3 100644 --- a/src/oat/runtime/support_debug.cc +++ b/src/oat/runtime/support_debug.cc @@ -25,14 +25,14 @@ namespace art { * method entry and offset 0 within the method, we'll use an offset of -1 * to denote method entry. */ -extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, AbstractMethod** sp) +extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); Dbg::UpdateDebugger(dex_pc, self); } // Temporary debugging hook for compiler. -extern void DebugMe(AbstractMethod* method, uint32_t info) +extern void DebugMe(mirror::AbstractMethod* method, uint32_t info) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { LOG(INFO) << "DebugMe"; if (method != NULL) { diff --git a/src/oat/runtime/support_deoptimize.cc b/src/oat/runtime/support_deoptimize.cc index 75e671febd..0d88c520d2 100644 --- a/src/oat/runtime/support_deoptimize.cc +++ b/src/oat/runtime/support_deoptimize.cc @@ -16,7 +16,8 @@ #include "callee_save_frame.h" #include "interpreter/interpreter.h" -#include "object.h" // for JValue +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "stack.h" #include "thread.h" @@ -24,7 +25,7 @@ namespace art { -extern "C" uint64_t artDeoptimize(JValue ret_val, Thread* self, AbstractMethod** sp) +extern "C" uint64_t artDeoptimize(JValue ret_val, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); // Return value may hold Object* so avoid suspension. @@ -36,7 +37,7 @@ extern "C" uint64_t artDeoptimize(JValue ret_val, Thread* self, AbstractMethod** : StackVisitor(thread, context), shadow_frame_(NULL), runtime_frames_(0) { } virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { if (runtime_frames_ == 0) { runtime_frames_++; @@ -84,7 +85,7 @@ extern "C" uint64_t artDeoptimize(JValue ret_val, Thread* self, AbstractMethod** } -extern "C" JValue artEnterInterpreterFromDeoptimize(Thread* self, AbstractMethod** sp) +extern "C" JValue artEnterInterpreterFromDeoptimize(Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); JValue return_value; diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc index 10c7930f3c..da15917ed8 100644 --- a/src/oat/runtime/support_dexcache.cc +++ b/src/oat/runtime/support_dexcache.cc @@ -15,12 +15,17 @@ */ #include "callee_save_frame.h" +#include "class_linker-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #include "runtime_support.h" namespace art { -extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const AbstractMethod* referrer, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called to ensure static storage base is initialized for direct static field reads and writes. // A class may be accessing another class' fields when it doesn't have access, as access has been @@ -29,18 +34,19 @@ extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Ab return ResolveVerifyAndClinit(type_idx, referrer, self, true, false); } -extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const AbstractMethod* referrer, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when method->dex_cache_resolved_types_[] misses. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveVerifyAndClinit(type_idx, referrer, self, false, false); } -extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, - const AbstractMethod* referrer, - Thread* self, - AbstractMethod** sp) +extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated. @@ -48,8 +54,9 @@ extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, return ResolveVerifyAndClinit(type_idx, referrer, self, false, true); } -extern "C" String* artResolveStringFromCode(AbstractMethod* referrer, int32_t string_idx, - Thread* self, AbstractMethod** sp) +extern "C" mirror::String* artResolveStringFromCode(mirror::AbstractMethod* referrer, + int32_t string_idx, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveStringFromCode(referrer, string_idx); diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc index 93362473c1..a564fa95df 100644 --- a/src/oat/runtime/support_field.cc +++ b/src/oat/runtime/support_field.cc @@ -15,16 +15,20 @@ */ #include "callee_save_frame.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" #include "runtime_support.h" #include namespace art { -extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const AbstractMethod* referrer, - Thread* self, AbstractMethod** sp) +extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL)) { return field->Get32(field->GetDeclaringClass()); } @@ -36,10 +40,11 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const AbstractMet return 0; // Will throw exception by checking with Thread::Current } -extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const AbstractMethod* referrer, - Thread* self, AbstractMethod** sp) +extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL)) { return field->Get64(field->GetDeclaringClass()); } @@ -51,26 +56,28 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const AbstractMet return 0; // Will throw exception by checking with Thread::Current } -extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const AbstractMethod* referrer, - Thread* self, AbstractMethod** sp) +extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, + sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { return field->GetObj(field->GetDeclaringClass()); } FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(Object*)); + field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { return field->GetObj(field->GetDeclaringClass()); } return NULL; // Will throw exception by checking with Thread::Current } -extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) +extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get32(obj); } @@ -86,11 +93,11 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, return 0; // Will throw exception by checking with Thread::Current } -extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) +extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get64(obj); } @@ -106,16 +113,17 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, return 0; // Will throw exception by checking with Thread::Current } -extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) +extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + const mirror::AbstractMethod* referrer, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL && obj != NULL)) { return field->GetObj(obj); } FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(Object*)); + field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { ThrowNullPointerExceptionForFieldAccess(field, true); @@ -127,10 +135,10 @@ extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, } extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL)) { field->Set32(field->GetDeclaringClass(), new_value); return 0; // success @@ -144,10 +152,10 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, return -1; // failure } -extern "C" int artSet64StaticFromCode(uint32_t field_idx, const AbstractMethod* referrer, - uint64_t new_value, Thread* self, AbstractMethod** sp) +extern "C" int artSet64StaticFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + uint64_t new_value, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != NULL)) { field->Set64(field->GetDeclaringClass(), new_value); return 0; // success @@ -161,11 +169,12 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, const AbstractMethod* return -1; // failure } -extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) +extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, + sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { if (LIKELY(!FieldHelper(field).IsPrimitiveType())) { field->SetObj(field->GetDeclaringClass(), new_value); @@ -173,7 +182,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, } } FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(Object*)); + field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { field->SetObj(field->GetDeclaringClass(), new_value); return 0; // success @@ -181,11 +190,11 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, return -1; // failure } -extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_t new_value, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) +extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { field->Set32(obj, new_value); return 0; // success @@ -203,12 +212,14 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_ return -1; // failure } -extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_t new_value, - Thread* self, AbstractMethod** sp) +extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); - AbstractMethod* referrer = sp[callee_save->GetFrameSizeInBytes() / sizeof(AbstractMethod*)]; - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t)); + mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); + mirror::AbstractMethod* referrer = + sp[callee_save->GetFrameSizeInBytes() / sizeof(mirror::AbstractMethod*)]; + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, + sizeof(int64_t)); if (LIKELY(field != NULL && obj != NULL)) { field->Set64(obj, new_value); return 0; // success @@ -227,17 +238,20 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_ return -1; // failure } -extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, Object* obj, Object* new_value, - const AbstractMethod* referrer, Thread* self, - AbstractMethod** sp) +extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + mirror::Object* new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, + sizeof(mirror::Object*)); if (LIKELY(field != NULL && obj != NULL)) { field->SetObj(obj, new_value); return 0; // success } FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectWrite, sizeof(Object*)); + field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectWrite, + sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { ThrowNullPointerExceptionForFieldAccess(field, false); diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc index 9c6231f1fe..73f832aa35 100644 --- a/src/oat/runtime/support_fillarray.cc +++ b/src/oat/runtime/support_fillarray.cc @@ -16,7 +16,8 @@ #include "callee_save_frame.h" #include "dex_instruction.h" -#include "object.h" +#include "mirror/array.h" +#include "mirror/object-inl.h" namespace art { @@ -35,9 +36,9 @@ namespace art { * ubyte data[size*width] table of data values (may contain a single-byte * padding at the end) */ -extern "C" int artHandleFillArrayDataFromCode(Array* array, +extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array, const Instruction::ArrayDataPayload* payload, - Thread* self, AbstractMethod** sp) + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); diff --git a/src/oat/runtime/support_instrumentation.cc b/src/oat/runtime/support_instrumentation.cc index 73e43717a3..f65717aa73 100644 --- a/src/oat/runtime/support_instrumentation.cc +++ b/src/oat/runtime/support_instrumentation.cc @@ -22,8 +22,10 @@ namespace art { -extern "C" const void* artInstrumentationMethodEntryFromCode(AbstractMethod* method, Thread* self, - AbstractMethod** sp, uintptr_t lr) +extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::AbstractMethod* method, + Thread* self, + mirror::AbstractMethod** sp, + uintptr_t lr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { self->SetTopOfStack(sp, lr); self->VerifyStack(); @@ -41,7 +43,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(AbstractMethod* met return instrumentation->GetSavedCodeFromMap(method); } -extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, AbstractMethod** sp) +extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { self->SetTopOfStack(sp, 0); self->VerifyStack(); diff --git a/src/oat/runtime/support_interpreter.cc b/src/oat/runtime/support_interpreter.cc index 98751cc7c5..a02ef2719a 100644 --- a/src/oat/runtime/support_interpreter.cc +++ b/src/oat/runtime/support_interpreter.cc @@ -17,7 +17,9 @@ #include "argument_visitor.h" #include "callee_save_frame.h" #include "interpreter/interpreter.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" namespace art { @@ -25,7 +27,7 @@ namespace art { // Visits arguments on the stack placing them into the shadow frame. class BuildShadowFrameVisitor : public ArgumentVisitor { public: - BuildShadowFrameVisitor(MethodHelper& caller_mh, AbstractMethod** sp, + BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, ShadowFrame& sf, size_t first_arg_reg) : ArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {} @@ -41,7 +43,7 @@ class BuildShadowFrameVisitor : public ArgumentVisitor { } break; case Primitive::kPrimNot: - sf_.SetVRegReference(cur_reg_, *reinterpret_cast(GetParamAddress())); + sf_.SetVRegReference(cur_reg_, *reinterpret_cast(GetParamAddress())); break; case Primitive::kPrimBoolean: // Fall-through. case Primitive::kPrimByte: // Fall-through. @@ -65,7 +67,8 @@ class BuildShadowFrameVisitor : public ArgumentVisitor { DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor); }; -extern "C" uint64_t artInterpreterEntry(AbstractMethod* method, Thread* self, AbstractMethod** sp) +extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Ensure we don't get thread suspension until the object arguments are safely in the shadow // frame. diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc index 7a49489f8d..438ac8f4c3 100644 --- a/src/oat/runtime/support_invoke.cc +++ b/src/oat/runtime/support_invoke.cc @@ -15,16 +15,22 @@ */ #include "callee_save_frame.h" +#include "dex_instruction.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "runtime_support.h" namespace art { // Determine target of interface dispatch. This object is known non-null. -extern "C" uint64_t artInvokeInterfaceTrampoline(AbstractMethod* interface_method, - Object* this_object, AbstractMethod* caller_method, - Thread* self, AbstractMethod** sp) +extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::AbstractMethod* interface_method, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method; + mirror::AbstractMethod* method; if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex16)) { method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); if (UNLIKELY(method == NULL)) { @@ -131,10 +137,13 @@ extern "C" uint64_t artInvokeInterfaceTrampoline(AbstractMethod* interface_metho } -static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, AbstractMethod* caller_method, - Thread* self, AbstractMethod** sp, bool access_check, InvokeType type) +static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, mirror::AbstractMethod** sp, bool access_check, + InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); + mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, + access_check, type); if (UNLIKELY(method == NULL)) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type); @@ -163,47 +172,47 @@ static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Abstra // See comments in runtime_support_asm.S extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, Thread* self, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface); } extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, Thread* self, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect); } extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, - Thread* self, - AbstractMethod** sp) + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic); } extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, Thread* self, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper); } extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, Thread* self, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual); } diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc index e1ae530b9c..0e21700cac 100644 --- a/src/oat/runtime/support_jni.cc +++ b/src/oat/runtime/support_jni.cc @@ -14,7 +14,11 @@ * limitations under the License. */ -#include "object.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "runtime_support.h" #include "scoped_thread_state_change.h" @@ -28,7 +32,7 @@ extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_) DCHECK(Thread::Current() == self); ScopedObjectAccess soa(self); - AbstractMethod* method = self->GetCurrentMethod(); + mirror::AbstractMethod* method = self->GetCurrentMethod(); DCHECK(method != NULL); // Lookup symbol address for method, on failure we'll return NULL with an @@ -81,11 +85,11 @@ extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject lo PopLocalReferences(saved_local_ref_cookie, self); } -extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self) +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); - Object* o = self->DecodeJObject(result); // Must decode before pop. + mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); // Process result. if (UNLIKELY(self->GetJniEnv()->check_jni)) { @@ -97,13 +101,13 @@ extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_re return o; } -extern Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self) +extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - Object* o = self->DecodeJObject(result); + mirror::Object* o = self->DecodeJObject(result); PopLocalReferences(saved_local_ref_cookie, self); // Process result. if (UNLIKELY(self->GetJniEnv()->check_jni)) { @@ -117,9 +121,10 @@ extern Object* JniMethodEndWithReferenceSynchronized(jobject result, static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { intptr_t value = *arg_ptr; - Object** value_as_jni_rep = reinterpret_cast(value); - Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL; - CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) << value_as_work_around_rep; + mirror::Object** value_as_jni_rep = reinterpret_cast(value); + mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL; + CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) + << value_as_work_around_rep; *arg_ptr = reinterpret_cast(value_as_work_around_rep); } @@ -137,7 +142,7 @@ extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) // | unused | // | unused | // | unused | <- sp - AbstractMethod* jni_method = self->GetCurrentMethod(); + mirror::AbstractMethod* jni_method = self->GetCurrentMethod(); DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method); intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack // Fix up this/jclass argument diff --git a/src/oat/runtime/support_locks.cc b/src/oat/runtime/support_locks.cc index f3a31067dc..38fc48cba0 100644 --- a/src/oat/runtime/support_locks.cc +++ b/src/oat/runtime/support_locks.cc @@ -15,11 +15,12 @@ */ #include "callee_save_frame.h" -#include "object.h" +#include "mirror/object.h" namespace art { -extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, AbstractMethod** sp) +extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, + mirror::AbstractMethod** sp) UNLOCK_FUNCTION(monitor_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK(obj != NULL); // Assumed to have been checked before entry @@ -27,7 +28,8 @@ extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, AbstractMethod return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */; } -extern "C" void artLockObjectFromCode(Object* obj, Thread* thread, AbstractMethod** sp) +extern "C" void artLockObjectFromCode(mirror::Object* obj, Thread* thread, + mirror::AbstractMethod** sp) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); DCHECK(obj != NULL); // Assumed to have been checked before entry diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc index f0f07a11c2..65e404ab55 100644 --- a/src/oat/runtime/support_proxy.cc +++ b/src/oat/runtime/support_proxy.cc @@ -15,7 +15,9 @@ */ #include "argument_visitor.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "reflection.h" #include "runtime_support.h" @@ -31,7 +33,7 @@ namespace art { // to jobjects. class BuildArgumentVisitor : public ArgumentVisitor { public: - BuildArgumentVisitor(MethodHelper& caller_mh, AbstractMethod** sp, + BuildArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, ScopedObjectAccessUnchecked& soa, std::vector& args) : ArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} @@ -40,7 +42,7 @@ class BuildArgumentVisitor : public ArgumentVisitor { Primitive::Type type = GetParamPrimitiveType(); switch (type) { case Primitive::kPrimNot: { - Object* obj = *reinterpret_cast(GetParamAddress()); + mirror::Object* obj = *reinterpret_cast(GetParamAddress()); val.l = soa_.AddLocalReference(obj); break; } @@ -79,8 +81,9 @@ class BuildArgumentVisitor : public ArgumentVisitor { // which is responsible for recording callee save registers. We explicitly place into jobjects the // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a // field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artProxyInvokeHandler(AbstractMethod* proxy_method, Object* receiver, - Thread* self, AbstractMethod** sp) +extern "C" uint64_t artProxyInvokeHandler(mirror::AbstractMethod* proxy_method, + mirror::Object* receiver, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Ensure we don't get thread suspension until the object arguments are safely in jobjects. const char* old_cause = @@ -106,7 +109,7 @@ extern "C" uint64_t artProxyInvokeHandler(AbstractMethod* proxy_method, Object* args.erase(args.begin()); // Convert proxy method into expected interface method. - AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); DCHECK(interface_method != NULL); DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); jobject interface_method_jobj = soa.AddLocalReference(interface_method); diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc index ac188a82f8..dcfea3aae8 100644 --- a/src/oat/runtime/support_stubs.cc +++ b/src/oat/runtime/support_stubs.cc @@ -17,8 +17,12 @@ #if !defined(ART_USE_LLVM_COMPILER) #include "callee_save_frame.h" #endif +#include "class_linker-inl.h" #include "dex_instruction.h" -#include "object.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #if defined(ART_USE_LLVM_COMPILER) #include "nth_caller_visitor.h" @@ -32,7 +36,8 @@ namespace art { #if !defined(ART_USE_LLVM_COMPILER) // Lazily resolve a method. Called by stub code. -const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, AbstractMethod** sp, Thread* thread, +const void* UnresolvedDirectMethodTrampolineFromCode(mirror::AbstractMethod* called, + mirror::AbstractMethod** sp, Thread* thread, Runtime::TrampolineType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__arm__) @@ -52,7 +57,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // | R0 | // | Method* | <- sp DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); + mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp) + kPointerSize); uint32_t pc_offset = 10; uintptr_t caller_pc = regs[pc_offset]; @@ -72,7 +77,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // | ECX | arg1 // | EAX/Method* | <- sp DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 32); + mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 32); uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); uintptr_t caller_pc = regs[7]; #elif defined(__mips__) @@ -91,13 +96,13 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // | A1 | arg1 // | A0/Method* | <- sp DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); + mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); uint32_t pc_offset = 11; uintptr_t caller_pc = regs[pc_offset]; #else UNIMPLEMENTED(FATAL); - AbstractMethod** caller_sp = NULL; + mirror::AbstractMethod** caller_sp = NULL; uintptr_t* regs = NULL; uintptr_t caller_pc = 0; #endif @@ -109,7 +114,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // Compute details about the called method (avoid GCs) ClassLinker* linker = Runtime::Current()->GetClassLinker(); - AbstractMethod* caller = *caller_sp; + mirror::AbstractMethod* caller = *caller_sp; InvokeType invoke_type; uint32_t dex_method_idx; #if !defined(__i386__) @@ -173,7 +178,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // Place into local references incoming arguments from the caller's register arguments size_t cur_arg = 1; // skip method_idx in R0, first arg is in R1 if (invoke_type != kStatic) { - Object* obj = reinterpret_cast(regs[cur_arg]); + mirror::Object* obj = reinterpret_cast(regs[cur_arg]); cur_arg++; if (args_in_regs < 3) { // If we thought we had fewer than 3 arguments in registers, account for the receiver @@ -188,7 +193,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs char c = shorty[shorty_index]; shorty_index++; if (c == 'L') { - Object* obj = reinterpret_cast(regs[cur_arg]); + mirror::Object* obj = reinterpret_cast(regs[cur_arg]); soa.AddLocalReference(obj); } cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); @@ -199,7 +204,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs char c = shorty[shorty_index]; shorty_index++; if (c == 'L') { - Object* obj = reinterpret_cast(regs[cur_arg]); + mirror::Object* obj = reinterpret_cast(regs[cur_arg]); soa.AddLocalReference(obj); } cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); @@ -214,7 +219,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // Incompatible class change should have been handled in resolve method. CHECK(!called->CheckIncompatibleClassChange(invoke_type)); // Ensure that the called method's class is initialized. - Class* called_class = called->GetDeclaringClass(); + mirror::Class* called_class = called->GetDeclaringClass(); linker->EnsureInitialized(called_class, true, true); if (LIKELY(called_class->IsInitialized())) { code = called->GetCode(); @@ -252,7 +257,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs Thread* thread, Runtime::TrampolineType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t dex_pc; - AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); + mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); ClassLinker* linker = Runtime::Current()->GetClassLinker(); InvokeType invoke_type; @@ -299,7 +304,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs // Incompatible class change should have been handled in resolve method. CHECK(!called->CheckIncompatibleClassChange(invoke_type)); // Ensure that the called method's class is initialized. - Class* called_class = called->GetDeclaringClass(); + mirror::Class* called_class = called->GetDeclaringClass(); linker->EnsureInitialized(called_class, true, true); if (LIKELY(called_class->IsInitialized())) { code = called->GetCode(); @@ -342,7 +347,8 @@ const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod* called, Abs #if !defined(ART_USE_LLVM_COMPILER) // Called by the AbstractMethodError. Called by stub code. -extern void ThrowAbstractMethodErrorFromCode(AbstractMethod* method, Thread* thread, AbstractMethod** sp) +extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", @@ -350,7 +356,8 @@ extern void ThrowAbstractMethodErrorFromCode(AbstractMethod* method, Thread* thr thread->QuickDeliverException(); } #else // ART_USE_LLVM_COMPILER -extern void ThrowAbstractMethodErrorFromCode(AbstractMethod* method, Thread* thread, AbstractMethod**) +extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, + mirror::AbstractMethod**) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", PrettyMethod(method).c_str()); diff --git a/src/oat/runtime/support_thread.cc b/src/oat/runtime/support_thread.cc index 04038ab2e1..e7117147a9 100644 --- a/src/oat/runtime/support_thread.cc +++ b/src/oat/runtime/support_thread.cc @@ -28,7 +28,7 @@ void CheckSuspendFromCode(Thread* thread) CheckSuspend(thread); } -extern "C" void artTestSuspendFromCode(Thread* thread, AbstractMethod** sp) +extern "C" void artTestSuspendFromCode(Thread* thread, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when suspend count check value is 0 and thread->suspend_count_ != 0 FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc index 5bf48e4329..80ba118d96 100644 --- a/src/oat/runtime/support_throw.cc +++ b/src/oat/runtime/support_throw.cc @@ -15,7 +15,7 @@ */ #include "callee_save_frame.h" -#include "object.h" +#include "mirror/object.h" #include "object_utils.h" #include "runtime_support.h" #include "thread.h" @@ -26,21 +26,21 @@ namespace art { // Used to implement MOVE_EXCEPTION. extern "C" void* GetAndClearException(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(self->IsExceptionPending()); - Throwable* exception = self->GetException(); + mirror::Throwable* exception = self->GetException(); self->ClearException(); return exception; } // Deliver an exception that's pending on thread helping set up a callee save frame on the way. -extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, AbstractMethod** sp) +extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->QuickDeliverException(); } // Called by generated call to throw an exception. -extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, - AbstractMethod** sp) +extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* thread, + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* * exception may be NULL, in which case this routine should @@ -56,18 +56,18 @@ extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread // Called by generated call to throw a NPE exception. extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); uint32_t dex_pc; - AbstractMethod* throw_method = self->GetCurrentMethod(&dex_pc); + mirror::AbstractMethod* throw_method = self->GetCurrentMethod(&dex_pc); ThrowNullPointerExceptionFromDexPC(throw_method, dex_pc); self->QuickDeliverException(); } // Called by generated call to throw an arithmetic divide by zero exception. extern "C" void artThrowDivZeroFromCode(Thread* thread, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); @@ -76,7 +76,7 @@ extern "C" void artThrowDivZeroFromCode(Thread* thread, // Called by generated call to throw an array index out of bounds exception. extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", @@ -84,7 +84,7 @@ extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread thread->QuickDeliverException(); } -extern "C" void artThrowStackOverflowFromCode(Thread* self, AbstractMethod** sp) +extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); ThrowStackOverflowError(self); @@ -92,10 +92,10 @@ extern "C" void artThrowStackOverflowFromCode(Thread* self, AbstractMethod** sp) } extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, - AbstractMethod** sp) + mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - AbstractMethod* method = self->GetCurrentMethod(); + mirror::AbstractMethod* method = self->GetCurrentMethod(); ThrowNoSuchMethodError(method_idx, method); self->QuickDeliverException(); } diff --git a/src/oat/runtime/x86/context_x86.cc b/src/oat/runtime/x86/context_x86.cc index 4efdf81d5e..9d930ca5a8 100644 --- a/src/oat/runtime/x86/context_x86.cc +++ b/src/oat/runtime/x86/context_x86.cc @@ -16,7 +16,8 @@ #include "context_x86.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "stack.h" namespace art { namespace x86 { @@ -34,7 +35,7 @@ void X86Context::Reset() { } void X86Context::FillCalleeSaves(const StackVisitor& fr) { - AbstractMethod* method = fr.GetMethod(); + mirror::AbstractMethod* method = fr.GetMethod(); uint32_t core_spills = method->GetCoreSpillMask(); size_t spill_count = __builtin_popcount(core_spills); DCHECK_EQ(method->GetFpSpillMask(), 0u); diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc index fce2251908..445ae2a5a7 100644 --- a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc +++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc @@ -28,12 +28,13 @@ extern "C" void* art_check_and_alloc_array_from_code(uint32_t, void*, int32_t); extern "C" void* art_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); // Cast entrypoints. -extern "C" uint32_t art_is_assignable_from_code(const Class* klass, const Class* ref_class); +extern "C" uint32_t art_is_assignable_from_code(const mirror::Class* klass, + const mirror::Class* ref_class); extern "C" void art_can_put_array_element_from_code(void*, void*); extern "C" void art_check_cast_from_code(void*, void*); // Debug entrypoints. -extern void DebugMe(AbstractMethod* method, uint32_t info); +extern void DebugMe(mirror::AbstractMethod* method, uint32_t info); extern "C" void art_update_debugger(void*, void*, int32_t, void*); // DexCache entrypoints. @@ -66,11 +67,11 @@ extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self); -extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self); -extern Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self); +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self); +extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self); // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); @@ -98,7 +99,8 @@ extern "C" int32_t art_string_compareto(void*, void*); extern "C" void* art_memcpy(void*, const void*, size_t); // Invoke entrypoints. -const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod*, AbstractMethod**, Thread*, +const void* UnresolvedDirectMethodTrampolineFromCode(mirror::AbstractMethod*, + mirror::AbstractMethod**, Thread*, Runtime::TrampolineType); extern "C" void art_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_invoke_interface_trampoline(uint32_t, void*); @@ -112,7 +114,8 @@ extern void CheckSuspendFromCode(Thread* thread); extern "C" void art_test_suspend(); // Throw entrypoints. -extern void ThrowAbstractMethodErrorFromCode(AbstractMethod* method, Thread* thread, AbstractMethod** sp); +extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, + mirror::AbstractMethod** sp); extern "C" void art_deliver_exception_from_code(void*); extern "C" void art_throw_array_bounds_from_code(int32_t index, int32_t limit); extern "C" void art_throw_div_zero_from_code(); diff --git a/src/oat/runtime/x86/stub_x86.cc b/src/oat/runtime/x86/stub_x86.cc index cade99deb8..92f3c5f238 100644 --- a/src/oat/runtime/x86/stub_x86.cc +++ b/src/oat/runtime/x86/stub_x86.cc @@ -15,10 +15,10 @@ */ #include "jni_internal.h" +#include "mirror/array.h" #include "oat/runtime/oat_support_entrypoints.h" #include "oat/runtime/stub.h" #include "oat/utils/x86/assembler_x86.h" -#include "object.h" #include "stack_indirect_reference_table.h" #include "sirt_ref.h" @@ -27,7 +27,7 @@ namespace art { namespace x86 { -ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) { +mirror::ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) { UniquePtr assembler(static_cast(Assembler::Create(kX86))); #if !defined(ART_USE_LLVM_COMPILER) @@ -93,7 +93,7 @@ ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) { assembler->EmitSlowPaths(); size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef resolution_trampoline(self, ByteArray::Alloc(self, cs)); + SirtRef resolution_trampoline(self, mirror::ByteArray::Alloc(self, cs)); CHECK(resolution_trampoline.get() != NULL); MemoryRegion code(resolution_trampoline->GetData(), resolution_trampoline->GetLength()); assembler->FinalizeInstructions(code); @@ -101,9 +101,9 @@ ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) { return resolution_trampoline.get(); } -typedef void (*ThrowAme)(AbstractMethod*, Thread*); +typedef void (*ThrowAme)(mirror::AbstractMethod*, Thread*); -ByteArray* CreateAbstractMethodErrorStub() { +mirror::ByteArray* CreateAbstractMethodErrorStub() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); #if !defined(ART_USE_LLVM_COMPILER) @@ -148,7 +148,7 @@ ByteArray* CreateAbstractMethodErrorStub() { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef abstract_stub(self, ByteArray::Alloc(self, cs)); + SirtRef abstract_stub(self, mirror::ByteArray::Alloc(self, cs)); CHECK(abstract_stub.get() != NULL); MemoryRegion code(abstract_stub->GetData(), abstract_stub->GetLength()); assembler->FinalizeInstructions(code); @@ -156,7 +156,7 @@ ByteArray* CreateAbstractMethodErrorStub() { return abstract_stub.get(); } -ByteArray* CreateJniDlsymLookupStub() { +mirror::ByteArray* CreateJniDlsymLookupStub() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); // Pad stack to ensure 16-byte alignment @@ -182,7 +182,7 @@ ByteArray* CreateJniDlsymLookupStub() { size_t cs = assembler->CodeSize(); Thread* self = Thread::Current(); - SirtRef jni_stub(self, ByteArray::Alloc(self, cs)); + SirtRef jni_stub(self, mirror::ByteArray::Alloc(self, cs)); CHECK(jni_stub.get() != NULL); MemoryRegion code(jni_stub->GetData(), jni_stub->GetLength()); assembler->FinalizeInstructions(code); diff --git a/src/oat_compilation_unit.h b/src/oat_compilation_unit.h index ec7c9a30e8..7eac3223a3 100644 --- a/src/oat_compilation_unit.h +++ b/src/oat_compilation_unit.h @@ -22,11 +22,12 @@ #include namespace art { - +namespace mirror { class ClassLoader; +class DexCache; +} // namespace mirror class ClassLinker; class DexFile; -class DexCache; class OatCompilationUnit { public: diff --git a/src/oat_file.cc b/src/oat_file.cc index 8229f630b1..b806df82d6 100644 --- a/src/oat_file.cc +++ b/src/oat_file.cc @@ -22,7 +22,11 @@ #include "base/unix_file/fd_file.h" #include "elf_file.h" #include "oat.h" +#include "mirror/class.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" #include "os.h" +#include "utils.h" namespace art { @@ -296,7 +300,7 @@ const OatFile::OatClass* OatFile::OatDexFile::GetOatClass(uint32_t class_def_ind const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset; CHECK_LT(oat_class_pointer, oat_file_->End()); - Class::Status status = *reinterpret_cast(oat_class_pointer); + mirror::Class::Status status = *reinterpret_cast(oat_class_pointer); const byte* methods_pointer = oat_class_pointer + sizeof(status); CHECK_LT(methods_pointer, oat_file_->End()); @@ -307,13 +311,13 @@ const OatFile::OatClass* OatFile::OatDexFile::GetOatClass(uint32_t class_def_ind } OatFile::OatClass::OatClass(const OatFile* oat_file, - Class::Status status, + mirror::Class::Status status, const OatMethodOffsets* methods_pointer) : oat_file_(oat_file), status_(status), methods_pointer_(methods_pointer) {} OatFile::OatClass::~OatClass() {} -Class::Status OatFile::OatClass::GetStatus() const { +mirror::Class::Status OatFile::OatClass::GetStatus() const { return status_; } @@ -392,9 +396,9 @@ uint32_t OatFile::OatMethod::GetCodeSize() const { return reinterpret_cast(code)[-1]; } -AbstractMethod::InvokeStub* OatFile::OatMethod::GetInvokeStub() const { +mirror::AbstractMethod::InvokeStub* OatFile::OatMethod::GetInvokeStub() const { const byte* stub = GetOatPointer(invoke_stub_offset_); - return reinterpret_cast(const_cast(stub)); + return reinterpret_cast(const_cast(stub)); } uint32_t OatFile::OatMethod::GetInvokeStubSize() const { @@ -413,7 +417,7 @@ const void* OatFile::OatMethod::GetProxyStub() const { } #endif -void OatFile::OatMethod::LinkMethodPointers(AbstractMethod* method) const { +void OatFile::OatMethod::LinkMethodPointers(mirror::AbstractMethod* method) const { CHECK(method != NULL); method->SetCode(GetCode()); method->SetFrameSizeInBytes(frame_size_in_bytes_); @@ -425,7 +429,7 @@ void OatFile::OatMethod::LinkMethodPointers(AbstractMethod* method) const { method->SetInvokeStub(GetInvokeStub()); } -void OatFile::OatMethod::LinkMethodOffsets(AbstractMethod* method) const { +void OatFile::OatMethod::LinkMethodOffsets(mirror::AbstractMethod* method) const { CHECK(method != NULL); method->SetOatCodeOffset(GetCodeOffset()); method->SetFrameSizeInBytes(GetFrameSizeInBytes()); diff --git a/src/oat_file.h b/src/oat_file.h index ff5cd80ebf..bff48faa7f 100644 --- a/src/oat_file.h +++ b/src/oat_file.h @@ -20,8 +20,11 @@ #include #include -#include "globals.h" -#include "object.h" +#include "dex_file.h" +#include "invoke_type.h" +#include "mem_map.h" +#include "mirror/abstract_method.h" +#include "oat.h" #include "os.h" namespace art { @@ -65,10 +68,10 @@ class OatFile { class OatMethod { public: // Link Method for execution using the contents of this OatMethod - void LinkMethodPointers(AbstractMethod* method) const; + void LinkMethodPointers(mirror::AbstractMethod* method) const; // Link Method for image writing using the contents of this OatMethod - void LinkMethodOffsets(AbstractMethod* method) const; + void LinkMethodOffsets(mirror::AbstractMethod* method) const; uint32_t GetCodeOffset() const { return code_offset_; @@ -108,7 +111,7 @@ class OatFile { return GetOatPointer(native_gc_map_offset_); } - AbstractMethod::InvokeStub* GetInvokeStub() const; + mirror::AbstractMethod::InvokeStub* GetInvokeStub() const; uint32_t GetInvokeStubSize() const; #if defined(ART_USE_LLVM_COMPILER) @@ -161,7 +164,7 @@ class OatFile { class OatClass { public: - Class::Status GetStatus() const; + mirror::Class::Status GetStatus() const; // get the OatMethod entry based on its index into the class // defintion. direct methods come first, followed by virtual @@ -172,11 +175,11 @@ class OatFile { private: OatClass(const OatFile* oat_file, - Class::Status status, + mirror::Class::Status status, const OatMethodOffsets* methods_pointer); const OatFile* oat_file_; - const Class::Status status_; + const mirror::Class::Status status_; const OatMethodOffsets* methods_pointer_; friend class OatDexFile; diff --git a/src/oat_test.cc b/src/oat_test.cc index ec0fa7da0f..8da3626e8c 100644 --- a/src/oat_test.cc +++ b/src/oat_test.cc @@ -14,6 +14,9 @@ * limitations under the License. */ +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object_array-inl.h" #include "oat_file.h" #include "oat_writer.h" #include "vector_output_stream.h" @@ -24,7 +27,7 @@ namespace art { class OatTest : public CommonTest { protected: - void CheckMethod(AbstractMethod* method, + void CheckMethod(mirror::AbstractMethod* method, const OatFile::OatMethod& oat_method, const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -119,7 +122,7 @@ TEST_F(OatTest, WriteRead) { UniquePtr oat_class(oat_dex_file->GetOatClass(i)); - Class* klass = class_linker->FindClass(descriptor, NULL); + mirror::Class* klass = class_linker->FindClass(descriptor, NULL); size_t method_index = 0; for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) { diff --git a/src/oat_writer.cc b/src/oat_writer.cc index 113bebaa53..6fcea5d8e2 100644 --- a/src/oat_writer.cc +++ b/src/oat_writer.cc @@ -21,7 +21,9 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" -#include "class_loader.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/array.h" +#include "mirror/class_loader.h" #include "os.h" #include "output_stream.h" #include "safe_map.h" @@ -134,13 +136,13 @@ size_t OatWriter::InitOatClasses(size_t offset) { Compiler::ClassReference class_ref = Compiler::ClassReference(dex_file, class_def_index); CompiledClass* compiled_class = compiler_->GetCompiledClass(class_ref); - Class::Status status; + mirror::Class::Status status; if (compiled_class != NULL) { status = compiled_class->GetStatus(); } else if (verifier::MethodVerifier::IsClassRejected(class_ref)) { - status = Class::kStatusError; + status = mirror::Class::kStatusError; } else { - status = Class::kStatusNotReady; + status = mirror::Class::kStatusNotReady; } OatClass* oat_class = new OatClass(status, num_methods); @@ -307,16 +309,18 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, // We expect GC maps except when the class hasn't been verified or the method is native Compiler::ClassReference class_ref = Compiler::ClassReference(dex_file, class_def_index); CompiledClass* compiled_class = compiler_->GetCompiledClass(class_ref); - Class::Status status; + mirror::Class::Status status; if (compiled_class != NULL) { status = compiled_class->GetStatus(); } else if (verifier::MethodVerifier::IsClassRejected(class_ref)) { - status = Class::kStatusError; + status = mirror::Class::kStatusError; } else { - status = Class::kStatusNotReady; + status = mirror::Class::kStatusNotReady; } - CHECK(gc_map_size != 0 || is_native || status < Class::kStatusVerified) - << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " " << (status < Class::kStatusVerified) << " " << status << " " << PrettyMethod(method_idx, *dex_file); + CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified) + << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " " + << (status < mirror::Class::kStatusVerified) << " " << status << " " + << PrettyMethod(method_idx, *dex_file); #endif // Deduplicate GC maps @@ -396,11 +400,11 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, if (compiler_->IsImage()) { ClassLinker* linker = Runtime::Current()->GetClassLinker(); - DexCache* dex_cache = linker->FindDexCache(*dex_file); + mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file); // Unchecked as we hold mutator_lock_ on entry. ScopedObjectAccessUnchecked soa(Thread::Current()); - AbstractMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache, - NULL, NULL, type); + mirror::AbstractMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache, + NULL, NULL, type); CHECK(method != NULL); method->SetFrameSizeInBytes(frame_size_in_bytes); method->SetCoreSpillMask(core_spill_mask); @@ -836,7 +840,7 @@ bool OatWriter::OatDexFile::Write(OutputStream& out) const { return true; } -OatWriter::OatClass::OatClass(Class::Status status, uint32_t methods_count) { +OatWriter::OatClass::OatClass(mirror::Class::Status status, uint32_t methods_count) { status_ = status; method_offsets_.resize(methods_count); } diff --git a/src/oat_writer.h b/src/oat_writer.h index 2bcbbc526e..e1638d0fc6 100644 --- a/src/oat_writer.h +++ b/src/oat_writer.h @@ -22,10 +22,9 @@ #include #include "compiler.h" -#include "dex_cache.h" #include "mem_map.h" #include "oat.h" -#include "object.h" +#include "mirror/class.h" #include "safe_map.h" #include "UniquePtr.h" @@ -136,13 +135,13 @@ class OatWriter { class OatClass { public: - explicit OatClass(Class::Status status, uint32_t methods_count); + explicit OatClass(mirror::Class::Status status, uint32_t methods_count); size_t SizeOf() const; void UpdateChecksum(OatHeader& oat_header) const; bool Write(OutputStream& out) const; // data to write - Class::Status status_; + mirror::Class::Status status_; std::vector method_offsets_; private: diff --git a/src/oatdump.cc b/src/oatdump.cc index 5ee433c4e1..3fe62bce24 100644 --- a/src/oatdump.cc +++ b/src/oatdump.cc @@ -25,6 +25,7 @@ #include "base/stringpiece.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" +#include "class_linker-inl.h" #include "dex_instruction.h" #include "disassembler.h" #include "gc_map.h" @@ -32,6 +33,12 @@ #include "gc/space.h" #include "image.h" #include "indenter.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "oat.h" #include "object_utils.h" #include "os.h" @@ -162,7 +169,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetOatCode(AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const void* GetOatCode(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(m); for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; @@ -580,8 +587,8 @@ class OatDumper { uint32_t method_access_flags, uint32_t dex_pc) { bool first = true; ScopedObjectAccess soa(Thread::Current()); - DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file); - ClassLoader* class_loader = NULL; + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file); + mirror::ClassLoader* class_loader = NULL; std::vector kinds = verifier::MethodVerifier::DescribeVRegs(dex_method_idx, dex_file, dex_cache, class_loader, class_def_idx, code_item, NULL, @@ -633,8 +640,8 @@ class OatDumper { uint32_t method_access_flags) { if ((method_access_flags & kAccNative) == 0) { ScopedObjectAccess soa(Thread::Current()); - DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file); - ClassLoader* class_loader = NULL; + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file); + mirror::ClassLoader* class_loader = NULL; verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache, class_loader, class_def_idx, code_item, NULL, method_access_flags); @@ -713,17 +720,17 @@ class ImageDumper { for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { ImageHeader::ImageRoot image_root = static_cast(i); const char* image_root_description = image_roots_descriptions_[i]; - Object* image_root_object = image_header_.GetImageRoot(image_root); + mirror::Object* image_root_object = image_header_.GetImageRoot(image_root); indent1_os << StringPrintf("%s: %p\n", image_root_description, image_root_object); if (image_root_object->IsObjectArray()) { Indenter indent2_filter(indent1_os.rdbuf(), kIndentChar, kIndentBy1Count); std::ostream indent2_os(&indent2_filter); // TODO: replace down_cast with AsObjectArray (g++ currently has a problem with this) - ObjectArray* image_root_object_array - = down_cast*>(image_root_object); + mirror::ObjectArray* image_root_object_array + = down_cast*>(image_root_object); // = image_root_object->AsObjectArray(); for (int i = 0; i < image_root_object_array->GetLength(); i++) { - Object* value = image_root_object_array->Get(i); + mirror::Object* value = image_root_object_array->Get(i); if (value != NULL) { indent2_os << i << ": "; PrettyObjectValue(indent2_os, value->GetClass(), value); @@ -737,7 +744,7 @@ class ImageDumper { os << "\n"; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Object* oat_location_object = image_header_.GetImageRoot(ImageHeader::kOatLocation); + mirror::Object* oat_location_object = image_header_.GetImageRoot(ImageHeader::kOatLocation); std::string oat_location(oat_location_object->AsString()->ToModifiedUtf8()); os << "OAT LOCATION: " << oat_location; if (!host_prefix_.empty()) { @@ -811,36 +818,36 @@ class ImageDumper { } private: - static void PrettyObjectValue(std::ostream& os, Class* type, Object* value) + static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(type != NULL); if (value == NULL) { os << StringPrintf("null %s\n", PrettyDescriptor(type).c_str()); } else if (type->IsStringClass()) { - String* string = value->AsString(); + mirror::String* string = value->AsString(); os << StringPrintf("%p String: %s\n", string, PrintableString(string->ToModifiedUtf8()).c_str()); } else if (type->IsClassClass()) { - Class* klass = value->AsClass(); + mirror::Class* klass = value->AsClass(); os << StringPrintf("%p Class: %s\n", klass, PrettyDescriptor(klass).c_str()); } else if (type->IsFieldClass()) { - Field* field = value->AsField(); + mirror::Field* field = value->AsField(); os << StringPrintf("%p Field: %s\n", field, PrettyField(field).c_str()); } else if (type->IsMethodClass()) { - AbstractMethod* method = value->AsMethod(); + mirror::AbstractMethod* method = value->AsMethod(); os << StringPrintf("%p Method: %s\n", method, PrettyMethod(method).c_str()); } else { os << StringPrintf("%p %s\n", value, PrettyDescriptor(type).c_str()); } } - static void PrintField(std::ostream& os, Field* field, Object* obj) + static void PrintField(std::ostream& os, mirror::Field* field, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FieldHelper fh(field); const char* descriptor = fh.GetTypeDescriptor(); os << StringPrintf("%s: ", fh.GetName()); if (descriptor[0] != 'L' && descriptor[0] != '[') { - Class* type = fh.GetType(); + mirror::Class* type = fh.GetType(); if (type->IsPrimitiveLong()) { os << StringPrintf("%lld (0x%llx)\n", field->Get64(obj), field->Get64(obj)); } else if (type->IsPrimitiveDouble()) { @@ -854,7 +861,7 @@ class ImageDumper { } else { // Get the value, don't compute the type unless it is non-null as we don't want // to cause class loading. - Object* value = field->GetObj(obj); + mirror::Object* value = field->GetObj(obj); if (value == NULL) { os << StringPrintf("null %s\n", PrettyDescriptor(descriptor).c_str()); } else { @@ -863,26 +870,26 @@ class ImageDumper { } } - static void DumpFields(std::ostream& os, Object* obj, Class* klass) + static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* super = klass->GetSuperClass(); + mirror::Class* super = klass->GetSuperClass(); if (super != NULL) { DumpFields(os, obj, super); } - ObjectArray* fields = klass->GetIFields(); + mirror::ObjectArray* fields = klass->GetIFields(); if (fields != NULL) { for (int32_t i = 0; i < fields->GetLength(); i++) { - Field* field = fields->Get(i); + mirror::Field* field = fields->Get(i); PrintField(os, field, obj); } } } - bool InDumpSpace(const Object* object) { + bool InDumpSpace(const mirror::Object* object) { return image_space_.Contains(object); } - const void* GetOatCodeBegin(AbstractMethod* m) + const void* GetOatCodeBegin(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); @@ -895,7 +902,7 @@ class ImageDumper { return code; } - uint32_t GetOatCodeSize(AbstractMethod* m) + uint32_t GetOatCodeSize(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { @@ -904,7 +911,7 @@ class ImageDumper { return oat_code_begin[-1]; } - const void* GetOatCodeEnd(AbstractMethod* m) + const void* GetOatCodeEnd(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { @@ -913,7 +920,7 @@ class ImageDumper { return oat_code_begin + GetOatCodeSize(m); } - static void Callback(Object* obj, void* arg) + static void Callback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(obj != NULL); DCHECK(arg != NULL); @@ -928,12 +935,12 @@ class ImageDumper { state->stats_.alignment_bytes += alignment_bytes; std::ostream& os = *state->os_; - Class* obj_class = obj->GetClass(); + mirror::Class* obj_class = obj->GetClass(); if (obj_class->IsArrayClass()) { os << StringPrintf("%p: %s length:%d\n", obj, PrettyDescriptor(obj_class).c_str(), obj->AsArray()->GetLength()); } else if (obj->IsClass()) { - Class* klass = obj->AsClass(); + mirror::Class* klass = obj->AsClass(); os << StringPrintf("%p: java.lang.Class \"%s\" (", obj, PrettyDescriptor(klass).c_str()) << klass->GetStatus() << ")\n"; } else if (obj->IsField()) { @@ -952,10 +959,10 @@ class ImageDumper { std::ostream indent_os(&indent_filter); DumpFields(indent_os, obj, obj_class); if (obj->IsObjectArray()) { - ObjectArray* obj_array = obj->AsObjectArray(); + mirror::ObjectArray* obj_array = obj->AsObjectArray(); int32_t length = obj_array->GetLength(); for (int32_t i = 0; i < length; i++) { - Object* value = obj_array->Get(i); + mirror::Object* value = obj_array->Get(i); size_t run = 0; for (int32_t j = i + 1; j < length; j++) { if (value == obj_array->Get(j)) { @@ -970,22 +977,22 @@ class ImageDumper { indent_os << StringPrintf("%d to %zd: ", i, i + run); i = i + run; } - Class* value_class = value == NULL ? obj_class->GetComponentType() : value->GetClass(); + mirror::Class* value_class = value == NULL ? obj_class->GetComponentType() : value->GetClass(); PrettyObjectValue(indent_os, value_class, value); } } else if (obj->IsClass()) { - ObjectArray* sfields = obj->AsClass()->GetSFields(); + mirror::ObjectArray* sfields = obj->AsClass()->GetSFields(); if (sfields != NULL) { indent_os << "STATICS:\n"; Indenter indent2_filter(indent_os.rdbuf(), kIndentChar, kIndentBy1Count); std::ostream indent2_os(&indent2_filter); for (int32_t i = 0; i < sfields->GetLength(); i++) { - Field* field = sfields->Get(i); + mirror::Field* field = sfields->Get(i); PrintField(indent2_os, field, field->GetDeclaringClass()); } } } else if (obj->IsMethod()) { - AbstractMethod* method = obj->AsMethod(); + mirror::AbstractMethod* method = obj->AsMethod(); if (method->IsNative()) { DCHECK(method->GetNativeGcMap() == NULL) << PrettyMethod(method); DCHECK(method->GetMappingTable() == NULL) << PrettyMethod(method); @@ -1110,7 +1117,7 @@ class ImageDumper { size_t dex_instruction_bytes; - std::vector method_outlier; + std::vector method_outlier; std::vector method_outlier_size; std::vector method_outlier_expansion; std::vector > oat_dex_file_sizes; @@ -1163,7 +1170,7 @@ class ImageDumper { return (static_cast(size) / static_cast(object_bytes)) * 100; } - void ComputeOutliers(size_t total_size, double expansion, AbstractMethod* method) { + void ComputeOutliers(size_t total_size, double expansion, mirror::AbstractMethod* method) { method_outlier_size.push_back(total_size); method_outlier_expansion.push_back(expansion); method_outlier.push_back(method); diff --git a/src/oatexec.cc b/src/oatexec.cc index ede4799877..c05266ca52 100644 --- a/src/oatexec.cc +++ b/src/oatexec.cc @@ -23,7 +23,7 @@ #include "base/logging.h" #include "jni.h" -#include "object.h" +#include "modifiers.h" #include "ScopedLocalRef.h" #include "toStringArray.h" #include "UniquePtr.h" diff --git a/src/object.cc b/src/object.cc deleted file mode 100644 index 10bf672655..0000000000 --- a/src/object.cc +++ /dev/null @@ -1,1828 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "object.h" - -#include - -#include -#include -#include -#include - -#include "base/logging.h" -#include "class_linker.h" -#include "class_loader.h" -#include "dex_cache.h" -#include "dex_file.h" -#include "globals.h" -#include "heap.h" -#include "intern_table.h" -#include "interpreter/interpreter.h" -#include "monitor.h" -#include "object_utils.h" -#include "runtime.h" -#include "runtime_support.h" -#include "sirt_ref.h" -#include "stack.h" -#include "utils.h" -#include "well_known_classes.h" - -namespace art { - -BooleanArray* Object::AsBooleanArray() { - DCHECK(GetClass()->IsArrayClass()); - DCHECK(GetClass()->GetComponentType()->IsPrimitiveBoolean()); - return down_cast(this); -} - -ByteArray* Object::AsByteArray() { - DCHECK(GetClass()->IsArrayClass()); - DCHECK(GetClass()->GetComponentType()->IsPrimitiveByte()); - return down_cast(this); -} - -CharArray* Object::AsCharArray() { - DCHECK(GetClass()->IsArrayClass()); - DCHECK(GetClass()->GetComponentType()->IsPrimitiveChar()); - return down_cast(this); -} - -ShortArray* Object::AsShortArray() { - DCHECK(GetClass()->IsArrayClass()); - DCHECK(GetClass()->GetComponentType()->IsPrimitiveShort()); - return down_cast(this); -} - -IntArray* Object::AsIntArray() { - DCHECK(GetClass()->IsArrayClass()); - DCHECK(GetClass()->GetComponentType()->IsPrimitiveInt() || - GetClass()->GetComponentType()->IsPrimitiveFloat()); - return down_cast(this); -} - -LongArray* Object::AsLongArray() { - DCHECK(GetClass()->IsArrayClass()); - DCHECK(GetClass()->GetComponentType()->IsPrimitiveLong() || - GetClass()->GetComponentType()->IsPrimitiveDouble()); - return down_cast(this); -} - -String* Object::AsString() { - DCHECK(GetClass()->IsStringClass()); - return down_cast(this); -} - -Throwable* Object::AsThrowable() { - DCHECK(GetClass()->IsThrowableClass()); - return down_cast(this); -} - -Object* Object::Clone(Thread* self) { - Class* c = GetClass(); - DCHECK(!c->IsClassClass()); - - // Object::SizeOf gets the right size even if we're an array. - // Using c->AllocObject() here would be wrong. - size_t num_bytes = SizeOf(); - Heap* heap = Runtime::Current()->GetHeap(); - SirtRef copy(self, heap->AllocObject(self, c, num_bytes)); - if (copy.get() == NULL) { - return NULL; - } - - // Copy instance data. We assume memcpy copies by words. - // TODO: expose and use move32. - byte* src_bytes = reinterpret_cast(this); - byte* dst_bytes = reinterpret_cast(copy.get()); - size_t offset = sizeof(Object); - memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset); - - // Perform write barriers on copied object references. - if (c->IsArrayClass()) { - if (!c->GetComponentType()->IsPrimitive()) { - const ObjectArray* array = copy->AsObjectArray(); - heap->WriteBarrierArray(copy.get(), 0, array->GetLength()); - } - } else { - for (const Class* klass = c; klass != NULL; klass = klass->GetSuperClass()) { - size_t num_reference_fields = klass->NumReferenceInstanceFields(); - for (size_t i = 0; i < num_reference_fields; ++i) { - Field* field = klass->GetInstanceField(i); - MemberOffset field_offset = field->GetOffset(); - const Object* ref = copy->GetFieldObject(field_offset, false); - heap->WriteBarrierField(copy.get(), field_offset, ref); - } - } - } - - if (c->IsFinalizable()) { - heap->AddFinalizerReference(Thread::Current(), copy.get()); - } - - return copy.get(); -} - -uint32_t Object::GetThinLockId() { - return Monitor::GetThinLockId(monitor_); -} - -void Object::MonitorEnter(Thread* thread) { - Monitor::MonitorEnter(thread, this); -} - -bool Object::MonitorExit(Thread* thread) { - return Monitor::MonitorExit(thread, this); -} - -void Object::Notify() { - Monitor::Notify(Thread::Current(), this); -} - -void Object::NotifyAll() { - Monitor::NotifyAll(Thread::Current(), this); -} - -void Object::Wait() { - Monitor::Wait(Thread::Current(), this, 0, 0, true, kWaiting); -} - -void Object::Wait(int64_t ms, int32_t ns) { - Monitor::Wait(Thread::Current(), this, ms, ns, true, kTimedWaiting); -} - -#if VERIFY_OBJECT_ENABLED -void Object::CheckFieldAssignment(MemberOffset field_offset, const Object* new_value) { - const Class* c = GetClass(); - if (Runtime::Current()->GetClassLinker() == NULL || - !Runtime::Current()->GetHeap()->IsObjectValidationEnabled() || - !c->IsResolved()) { - return; - } - for (const Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) { - ObjectArray* fields = cur->GetIFields(); - if (fields != NULL) { - size_t num_ref_ifields = cur->NumReferenceInstanceFields(); - for (size_t i = 0; i < num_ref_ifields; ++i) { - Field* field = fields->Get(i); - if (field->GetOffset().Int32Value() == field_offset.Int32Value()) { - FieldHelper fh(field); - CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass())); - return; - } - } - } - } - if (c->IsArrayClass()) { - // Bounds and assign-ability done in the array setter. - return; - } - if (IsClass()) { - ObjectArray* fields = AsClass()->GetSFields(); - if (fields != NULL) { - size_t num_ref_sfields = AsClass()->NumReferenceStaticFields(); - for (size_t i = 0; i < num_ref_sfields; ++i) { - Field* field = fields->Get(i); - if (field->GetOffset().Int32Value() == field_offset.Int32Value()) { - FieldHelper fh(field); - CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass())); - return; - } - } - } - } - LOG(FATAL) << "Failed to find field for assignment to " << reinterpret_cast(this) - << " of type " << PrettyDescriptor(c) << " at offset " << field_offset; -} -#endif - -// TODO: get global references for these -Class* Field::java_lang_reflect_Field_ = NULL; - -void Field::SetClass(Class* java_lang_reflect_Field) { - CHECK(java_lang_reflect_Field_ == NULL); - CHECK(java_lang_reflect_Field != NULL); - java_lang_reflect_Field_ = java_lang_reflect_Field; -} - -void Field::ResetClass() { - CHECK(java_lang_reflect_Field_ != NULL); - java_lang_reflect_Field_ = NULL; -} - -void Field::SetOffset(MemberOffset num_bytes) { - DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); -#if 0 // TODO enable later in boot and under !NDEBUG - FieldHelper fh(this); - Primitive::Type type = fh.GetTypeAsPrimitiveType(); - if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) { - DCHECK_ALIGNED(num_bytes.Uint32Value(), 8); - } -#endif - SetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), num_bytes.Uint32Value(), false); -} - -uint32_t Field::Get32(const Object* object) const { - DCHECK(object != NULL) << PrettyField(this); - DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); - return object->GetField32(GetOffset(), IsVolatile()); -} - -void Field::Set32(Object* object, uint32_t new_value) const { - DCHECK(object != NULL) << PrettyField(this); - DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); - object->SetField32(GetOffset(), new_value, IsVolatile()); -} - -uint64_t Field::Get64(const Object* object) const { - DCHECK(object != NULL) << PrettyField(this); - DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); - return object->GetField64(GetOffset(), IsVolatile()); -} - -void Field::Set64(Object* object, uint64_t new_value) const { - DCHECK(object != NULL) << PrettyField(this); - DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); - object->SetField64(GetOffset(), new_value, IsVolatile()); -} - -Object* Field::GetObj(const Object* object) const { - DCHECK(object != NULL) << PrettyField(this); - DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); - return object->GetFieldObject(GetOffset(), IsVolatile()); -} - -void Field::SetObj(Object* object, const Object* new_value) const { - DCHECK(object != NULL) << PrettyField(this); - DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); - object->SetFieldObject(GetOffset(), new_value, IsVolatile()); -} - -bool Field::GetBoolean(const Object* object) const { - DCHECK_EQ(Primitive::kPrimBoolean, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - return Get32(object); -} - -void Field::SetBoolean(Object* object, bool z) const { - DCHECK_EQ(Primitive::kPrimBoolean, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Set32(object, z); -} - -int8_t Field::GetByte(const Object* object) const { - DCHECK_EQ(Primitive::kPrimByte, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - return Get32(object); -} - -void Field::SetByte(Object* object, int8_t b) const { - DCHECK_EQ(Primitive::kPrimByte, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Set32(object, b); -} - -uint16_t Field::GetChar(const Object* object) const { - DCHECK_EQ(Primitive::kPrimChar, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - return Get32(object); -} - -void Field::SetChar(Object* object, uint16_t c) const { - DCHECK_EQ(Primitive::kPrimChar, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Set32(object, c); -} - -int16_t Field::GetShort(const Object* object) const { - DCHECK_EQ(Primitive::kPrimShort, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - return Get32(object); -} - -void Field::SetShort(Object* object, int16_t s) const { - DCHECK_EQ(Primitive::kPrimShort, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Set32(object, s); -} - -int32_t Field::GetInt(const Object* object) const { -#ifndef NDEBUG - Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); - CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this); -#endif - return Get32(object); -} - -void Field::SetInt(Object* object, int32_t i) const { -#ifndef NDEBUG - Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); - CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this); -#endif - Set32(object, i); -} - -int64_t Field::GetLong(const Object* object) const { -#ifndef NDEBUG - Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); - CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this); -#endif - return Get64(object); -} - -void Field::SetLong(Object* object, int64_t j) const { -#ifndef NDEBUG - Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType(); - CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this); -#endif - Set64(object, j); -} - -union Bits { - jdouble d; - jfloat f; - jint i; - jlong j; -}; - -float Field::GetFloat(const Object* object) const { - DCHECK_EQ(Primitive::kPrimFloat, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Bits bits; - bits.i = Get32(object); - return bits.f; -} - -void Field::SetFloat(Object* object, float f) const { - DCHECK_EQ(Primitive::kPrimFloat, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Bits bits; - bits.f = f; - Set32(object, bits.i); -} - -double Field::GetDouble(const Object* object) const { - DCHECK_EQ(Primitive::kPrimDouble, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Bits bits; - bits.j = Get64(object); - return bits.d; -} - -void Field::SetDouble(Object* object, double d) const { - DCHECK_EQ(Primitive::kPrimDouble, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - Bits bits; - bits.d = d; - Set64(object, bits.j); -} - -Object* Field::GetObject(const Object* object) const { - DCHECK_EQ(Primitive::kPrimNot, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - return GetObj(object); -} - -void Field::SetObject(Object* object, const Object* l) const { - DCHECK_EQ(Primitive::kPrimNot, FieldHelper(this).GetTypeAsPrimitiveType()) - << PrettyField(this); - SetObj(object, l); -} - -// TODO: get global references for these -Class* AbstractMethod::java_lang_reflect_Constructor_ = NULL; -Class* AbstractMethod::java_lang_reflect_Method_ = NULL; - -InvokeType AbstractMethod::GetInvokeType() const { - // TODO: kSuper? - if (GetDeclaringClass()->IsInterface()) { - return kInterface; - } else if (IsStatic()) { - return kStatic; - } else if (IsDirect()) { - return kDirect; - } else { - return kVirtual; - } -} - -void AbstractMethod::SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method) { - CHECK(java_lang_reflect_Constructor_ == NULL); - CHECK(java_lang_reflect_Constructor != NULL); - java_lang_reflect_Constructor_ = java_lang_reflect_Constructor; - - CHECK(java_lang_reflect_Method_ == NULL); - CHECK(java_lang_reflect_Method != NULL); - java_lang_reflect_Method_ = java_lang_reflect_Method; -} - -void AbstractMethod::ResetClasses() { - CHECK(java_lang_reflect_Constructor_ != NULL); - java_lang_reflect_Constructor_ = NULL; - - CHECK(java_lang_reflect_Method_ != NULL); - java_lang_reflect_Method_ = NULL; -} - -ObjectArray* AbstractMethod::GetDexCacheStrings() const { - return GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_strings_), false); -} - -void AbstractMethod::SetDexCacheStrings(ObjectArray* new_dex_cache_strings) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_strings_), - new_dex_cache_strings, false); -} - -ObjectArray* AbstractMethod::GetDexCacheResolvedMethods() const { - return GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_methods_), false); -} - -void AbstractMethod::SetDexCacheResolvedMethods(ObjectArray* new_dex_cache_methods) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_methods_), - new_dex_cache_methods, false); -} - -ObjectArray* AbstractMethod::GetDexCacheResolvedTypes() const { - return GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_types_), false); -} - -void AbstractMethod::SetDexCacheResolvedTypes(ObjectArray* new_dex_cache_classes) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_types_), - new_dex_cache_classes, false); -} - -ObjectArray* AbstractMethod::GetDexCacheInitializedStaticStorage() const { - return GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_initialized_static_storage_), - false); -} - -void AbstractMethod::SetDexCacheInitializedStaticStorage(ObjectArray* new_value) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_initialized_static_storage_), - new_value, false); -} - -size_t AbstractMethod::NumArgRegisters(const StringPiece& shorty) { - CHECK_LE(1, shorty.length()); - uint32_t num_registers = 0; - for (int i = 1; i < shorty.length(); ++i) { - char ch = shorty[i]; - if (ch == 'D' || ch == 'J') { - num_registers += 2; - } else { - num_registers += 1; - } - } - return num_registers; -} - -bool AbstractMethod::IsProxyMethod() const { - return GetDeclaringClass()->IsProxyClass(); -} - -AbstractMethod* AbstractMethod::FindOverriddenMethod() const { - if (IsStatic()) { - return NULL; - } - Class* declaring_class = GetDeclaringClass(); - Class* super_class = declaring_class->GetSuperClass(); - uint16_t method_index = GetMethodIndex(); - ObjectArray* super_class_vtable = super_class->GetVTable(); - AbstractMethod* result = NULL; - // Did this method override a super class method? If so load the result from the super class' - // vtable - if (super_class_vtable != NULL && method_index < super_class_vtable->GetLength()) { - result = super_class_vtable->Get(method_index); - } else { - // Method didn't override superclass method so search interfaces - if (IsProxyMethod()) { - result = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex()); - CHECK_EQ(result, - Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this)); - } else { - MethodHelper mh(this); - MethodHelper interface_mh; - IfTable* iftable = GetDeclaringClass()->GetIfTable(); - for (size_t i = 0; i < iftable->Count() && result == NULL; i++) { - Class* interface = iftable->GetInterface(i); - for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) { - AbstractMethod* interface_method = interface->GetVirtualMethod(j); - interface_mh.ChangeMethod(interface_method); - if (mh.HasSameNameAndSignature(&interface_mh)) { - result = interface_method; - break; - } - } - } - } - } -#ifndef NDEBUG - MethodHelper result_mh(result); - DCHECK(result == NULL || MethodHelper(this).HasSameNameAndSignature(&result_mh)); -#endif - return result; -} - -static const void* GetOatCode(const AbstractMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Runtime* runtime = Runtime::Current(); - const void* code = m->GetCode(); - // Peel off any method tracing trampoline. - if (runtime->IsMethodTracingActive() && runtime->GetInstrumentation()->GetSavedCodeFromMap(m) != NULL) { - code = runtime->GetInstrumentation()->GetSavedCodeFromMap(m); - } - // Peel off any resolution stub. - if (code == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { - code = runtime->GetClassLinker()->GetOatCodeFor(m); - } - return code; -} - -uintptr_t AbstractMethod::NativePcOffset(const uintptr_t pc) const { - return pc - reinterpret_cast(GetOatCode(this)); -} - -// Find the lowest-address native safepoint pc for a given dex pc -uintptr_t AbstractMethod::ToFirstNativeSafepointPc(const uint32_t dex_pc) const { -#if !defined(ART_USE_LLVM_COMPILER) - const uint32_t* mapping_table = GetPcToDexMappingTable(); - if (mapping_table == NULL) { - DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this); - return DexFile::kDexNoIndex; // Special no mapping case - } - size_t mapping_table_length = GetPcToDexMappingTableLength(); - for (size_t i = 0; i < mapping_table_length; i += 2) { - if (mapping_table[i + 1] == dex_pc) { - return mapping_table[i] + reinterpret_cast(GetOatCode(this)); - } - } - LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc - << " in " << PrettyMethod(this); - return 0; -#else - // Compiler LLVM doesn't use the machine pc, we just use dex pc instead. - return static_cast(dex_pc); -#endif -} - -uint32_t AbstractMethod::ToDexPc(const uintptr_t pc) const { -#if !defined(ART_USE_LLVM_COMPILER) - const uint32_t* mapping_table = GetPcToDexMappingTable(); - if (mapping_table == NULL) { - DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this); - return DexFile::kDexNoIndex; // Special no mapping case - } - size_t mapping_table_length = GetPcToDexMappingTableLength(); - uint32_t sought_offset = pc - reinterpret_cast(GetOatCode(this)); - for (size_t i = 0; i < mapping_table_length; i += 2) { - if (mapping_table[i] == sought_offset) { - return mapping_table[i + 1]; - } - } - LOG(ERROR) << "Failed to find Dex offset for PC offset " << reinterpret_cast(sought_offset) - << "(PC " << reinterpret_cast(pc) << ") in " << PrettyMethod(this); - return DexFile::kDexNoIndex; -#else - // Compiler LLVM doesn't use the machine pc, we just use dex pc instead. - return static_cast(pc); -#endif -} - -uintptr_t AbstractMethod::ToNativePc(const uint32_t dex_pc) const { - const uint32_t* mapping_table = GetDexToPcMappingTable(); - if (mapping_table == NULL) { - DCHECK_EQ(dex_pc, 0U); - return 0; // Special no mapping/pc == 0 case - } - size_t mapping_table_length = GetDexToPcMappingTableLength(); - for (size_t i = 0; i < mapping_table_length; i += 2) { - uint32_t map_offset = mapping_table[i]; - uint32_t map_dex_offset = mapping_table[i + 1]; - if (map_dex_offset == dex_pc) { - return reinterpret_cast(GetOatCode(this)) + map_offset; - } - } - LOG(FATAL) << "Looking up Dex PC not contained in method, 0x" << std::hex << dex_pc - << " in " << PrettyMethod(this); - return 0; -} - -uint32_t AbstractMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const { - MethodHelper mh(this); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - // Iterate over the catch handlers associated with dex_pc - for (CatchHandlerIterator it(*code_item, dex_pc); it.HasNext(); it.Next()) { - uint16_t iter_type_idx = it.GetHandlerTypeIndex(); - // Catch all case - if (iter_type_idx == DexFile::kDexNoIndex16) { - return it.GetHandlerAddress(); - } - // Does this catch exception type apply? - Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); - if (iter_exception_type == NULL) { - // The verifier should take care of resolving all exception classes early - LOG(WARNING) << "Unresolved exception class when finding catch block: " - << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); - } else if (iter_exception_type->IsAssignableFrom(exception_type)) { - return it.GetHandlerAddress(); - } - } - // Handler not found - return DexFile::kDexNoIndex; -} - -void AbstractMethod::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) { - if (kIsDebugBuild) { - self->AssertThreadSuspensionIsAllowable(); - CHECK_EQ(kRunnable, self->GetState()); - } - - // Push a transition back into managed code onto the linked list in thread. - ManagedStack fragment; - self->PushManagedStackFragment(&fragment); - - // Call the invoke stub associated with the method. - // Pass everything as arguments. - AbstractMethod::InvokeStub* stub = GetInvokeStub(); - - if (UNLIKELY(!Runtime::Current()->IsStarted())){ - LOG(INFO) << "Not invoking " << PrettyMethod(this) << " for a runtime that isn't started"; - if (result != NULL) { - result->SetJ(0); - } - } else { - bool interpret = self->ReadFlag(kEnterInterpreter) && !IsNative() && !IsProxyMethod(); - const bool kLogInvocationStartAndReturn = false; - if (!interpret && GetCode() != NULL && stub != NULL) { - if (kLogInvocationStartAndReturn) { - LOG(INFO) << StringPrintf("Invoking '%s' code=%p stub=%p", - PrettyMethod(this).c_str(), GetCode(), stub); - } - (*stub)(this, receiver, self, args, result); - if (kLogInvocationStartAndReturn) { - LOG(INFO) << StringPrintf("Returned '%s' code=%p stub=%p", - PrettyMethod(this).c_str(), GetCode(), stub); - } - } else { - const bool kInterpretMethodsWithNoCode = false; - if (interpret || kInterpretMethodsWithNoCode) { - if (kLogInvocationStartAndReturn) { - LOG(INFO) << "Interpreting " << PrettyMethod(this) << "'"; - } - art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args, result); - if (kLogInvocationStartAndReturn) { - LOG(INFO) << "Returned '" << PrettyMethod(this) << "'"; - } - } else { - LOG(INFO) << "Not invoking '" << PrettyMethod(this) - << "' code=" << reinterpret_cast(GetCode()) - << " stub=" << reinterpret_cast(stub); - if (result != NULL) { - result->SetJ(0); - } - } - } - } - - // Pop transition. - self->PopManagedStackFragment(fragment); -} - -bool AbstractMethod::IsRegistered() const { - void* native_method = GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_), false); - CHECK(native_method != NULL); - void* jni_stub = Runtime::Current()->GetJniDlsymLookupStub()->GetData(); - return native_method != jni_stub; -} - -void AbstractMethod::RegisterNative(Thread* self, const void* native_method) { - DCHECK(Thread::Current() == self); - CHECK(IsNative()) << PrettyMethod(this); - CHECK(native_method != NULL) << PrettyMethod(this); -#if defined(ART_USE_LLVM_COMPILER) - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_), - native_method, false); -#else - if (!self->GetJniEnv()->vm->work_around_app_jni_bugs) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_), - native_method, false); - } else { - // We've been asked to associate this method with the given native method but are working - // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct - // the native method to runtime support and store the target somewhere runtime support will - // find it. -#if defined(__arm__) - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_), - reinterpret_cast(art_work_around_app_jni_bugs), false); -#else - UNIMPLEMENTED(FATAL); -#endif - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_gc_map_), - reinterpret_cast(native_method), false); - } -#endif -} - -void AbstractMethod::UnregisterNative(Thread* self) { - CHECK(IsNative()) << PrettyMethod(this); - // restore stub to lookup native pointer via dlsym - RegisterNative(self, Runtime::Current()->GetJniDlsymLookupStub()->GetData()); -} - -Class* Class::java_lang_Class_ = NULL; - -void Class::SetClassClass(Class* java_lang_Class) { - CHECK(java_lang_Class_ == NULL) << java_lang_Class_ << " " << java_lang_Class; - CHECK(java_lang_Class != NULL); - java_lang_Class_ = java_lang_Class; -} - -void Class::ResetClass() { - CHECK(java_lang_Class_ != NULL); - java_lang_Class_ = NULL; -} - -void Class::SetStatus(Status new_status) { - CHECK(new_status > GetStatus() || new_status == kStatusError || !Runtime::Current()->IsStarted()) - << PrettyClass(this) << " " << GetStatus() << " -> " << new_status; - CHECK(sizeof(Status) == sizeof(uint32_t)) << PrettyClass(this); - if (new_status > kStatusResolved) { - CHECK_EQ(GetThinLockId(), Thread::Current()->GetThinLockId()) << PrettyClass(this); - } - if (new_status == kStatusError) { - CHECK_NE(GetStatus(), kStatusError) << PrettyClass(this); - - // stash current exception - Thread* self = Thread::Current(); - SirtRef exception(self, self->GetException()); - CHECK(exception.get() != NULL); - - // clear exception to call FindSystemClass - self->ClearException(); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* eiie_class = class_linker->FindSystemClass("Ljava/lang/ExceptionInInitializerError;"); - CHECK(!self->IsExceptionPending()); - - // only verification errors, not initialization problems, should set a verify error. - // this is to ensure that ThrowEarlierClassFailure will throw NoClassDefFoundError in that case. - Class* exception_class = exception->GetClass(); - if (!eiie_class->IsAssignableFrom(exception_class)) { - SetVerifyErrorClass(exception_class); - } - - // restore exception - self->SetException(exception.get()); - } - return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status, false); -} - -DexCache* Class::GetDexCache() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false); -} - -void Class::SetDexCache(DexCache* new_dex_cache) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache, false); -} - -Object* Class::AllocObject(Thread* self) { - DCHECK(!IsArrayClass()) << PrettyClass(this); - DCHECK(IsInstantiable()) << PrettyClass(this); - // TODO: decide whether we want this check. It currently fails during bootstrap. - // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this); - DCHECK_GE(this->object_size_, sizeof(Object)); - return Runtime::Current()->GetHeap()->AllocObject(self, this, this->object_size_); -} - -void Class::SetClassSize(size_t new_class_size) { - DCHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this); - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size, false); -} - -// Return the class' name. The exact format is bizarre, but it's the specified behavior for -// Class.getName: keywords for primitive types, regular "[I" form for primitive arrays (so "int" -// but "[I"), and arrays of reference types written between "L" and ";" but with dots rather than -// slashes (so "java.lang.String" but "[Ljava.lang.String;"). Madness. -String* Class::ComputeName() { - String* name = GetName(); - if (name != NULL) { - return name; - } - std::string descriptor(ClassHelper(this).GetDescriptor()); - if ((descriptor[0] != 'L') && (descriptor[0] != '[')) { - // The descriptor indicates that this is the class for - // a primitive type; special-case the return value. - const char* c_name = NULL; - switch (descriptor[0]) { - case 'Z': c_name = "boolean"; break; - case 'B': c_name = "byte"; break; - case 'C': c_name = "char"; break; - case 'S': c_name = "short"; break; - case 'I': c_name = "int"; break; - case 'J': c_name = "long"; break; - case 'F': c_name = "float"; break; - case 'D': c_name = "double"; break; - case 'V': c_name = "void"; break; - default: - LOG(FATAL) << "Unknown primitive type: " << PrintableChar(descriptor[0]); - } - name = String::AllocFromModifiedUtf8(Thread::Current(), c_name); - } else { - // Convert the UTF-8 name to a java.lang.String. The name must use '.' to separate package - // components. - if (descriptor.size() > 2 && descriptor[0] == 'L' && descriptor[descriptor.size() - 1] == ';') { - descriptor.erase(0, 1); - descriptor.erase(descriptor.size() - 1); - } - std::replace(descriptor.begin(), descriptor.end(), '/', '.'); - name = String::AllocFromModifiedUtf8(Thread::Current(), descriptor.c_str()); - } - SetName(name); - return name; -} - -void Class::DumpClass(std::ostream& os, int flags) const { - if ((flags & kDumpClassFullDetail) == 0) { - os << PrettyClass(this); - if ((flags & kDumpClassClassLoader) != 0) { - os << ' ' << GetClassLoader(); - } - if ((flags & kDumpClassInitialized) != 0) { - os << ' ' << GetStatus(); - } - os << "\n"; - return; - } - - Class* super = GetSuperClass(); - ClassHelper kh(this); - os << "----- " << (IsInterface() ? "interface" : "class") << " " - << "'" << kh.GetDescriptor() << "' cl=" << GetClassLoader() << " -----\n", - os << " objectSize=" << SizeOf() << " " - << "(" << (super != NULL ? super->SizeOf() : -1) << " from super)\n", - os << StringPrintf(" access=0x%04x.%04x\n", - GetAccessFlags() >> 16, GetAccessFlags() & kAccJavaFlagsMask); - if (super != NULL) { - os << " super='" << PrettyClass(super) << "' (cl=" << super->GetClassLoader() << ")\n"; - } - if (IsArrayClass()) { - os << " componentType=" << PrettyClass(GetComponentType()) << "\n"; - } - if (kh.NumDirectInterfaces() > 0) { - os << " interfaces (" << kh.NumDirectInterfaces() << "):\n"; - for (size_t i = 0; i < kh.NumDirectInterfaces(); ++i) { - Class* interface = kh.GetDirectInterface(i); - const ClassLoader* cl = interface->GetClassLoader(); - os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl); - } - } - os << " vtable (" << NumVirtualMethods() << " entries, " - << (super != NULL ? super->NumVirtualMethods() : 0) << " in super):\n"; - for (size_t i = 0; i < NumVirtualMethods(); ++i) { - os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(GetVirtualMethodDuringLinking(i)).c_str()); - } - os << " direct methods (" << NumDirectMethods() << " entries):\n"; - for (size_t i = 0; i < NumDirectMethods(); ++i) { - os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(GetDirectMethod(i)).c_str()); - } - if (NumStaticFields() > 0) { - os << " static fields (" << NumStaticFields() << " entries):\n"; - if (IsResolved() || IsErroneous()) { - for (size_t i = 0; i < NumStaticFields(); ++i) { - os << StringPrintf(" %2zd: %s\n", i, PrettyField(GetStaticField(i)).c_str()); - } - } else { - os << " "; - } - } - if (NumInstanceFields() > 0) { - os << " instance fields (" << NumInstanceFields() << " entries):\n"; - if (IsResolved() || IsErroneous()) { - for (size_t i = 0; i < NumInstanceFields(); ++i) { - os << StringPrintf(" %2zd: %s\n", i, PrettyField(GetInstanceField(i)).c_str()); - } - } else { - os << " "; - } - } -} - -void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) { - if (new_reference_offsets != CLASS_WALK_SUPER) { - // Sanity check that the number of bits set in the reference offset bitmap - // agrees with the number of references - size_t count = 0; - for (Class* c = this; c != NULL; c = c->GetSuperClass()) { - count += c->NumReferenceInstanceFieldsDuringLinking(); - } - CHECK_EQ((size_t)__builtin_popcount(new_reference_offsets), count); - } - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), - new_reference_offsets, false); -} - -void Class::SetReferenceStaticOffsets(uint32_t new_reference_offsets) { - if (new_reference_offsets != CLASS_WALK_SUPER) { - // Sanity check that the number of bits set in the reference offset bitmap - // agrees with the number of references - CHECK_EQ((size_t)__builtin_popcount(new_reference_offsets), - NumReferenceStaticFieldsDuringLinking()); - } - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_), - new_reference_offsets, false); -} - -bool Class::Implements(const Class* klass) const { - DCHECK(klass != NULL); - DCHECK(klass->IsInterface()) << PrettyClass(this); - // All interfaces implemented directly and by our superclass, and - // recursively all super-interfaces of those interfaces, are listed - // in iftable_, so we can just do a linear scan through that. - int32_t iftable_count = GetIfTableCount(); - IfTable* iftable = GetIfTable(); - for (int32_t i = 0; i < iftable_count; i++) { - if (iftable->GetInterface(i) == klass) { - return true; - } - } - return false; -} - -// Determine whether "this" is assignable from "src", where both of these -// are array classes. -// -// Consider an array class, e.g. Y[][], where Y is a subclass of X. -// Y[][] = Y[][] --> true (identity) -// X[][] = Y[][] --> true (element superclass) -// Y = Y[][] --> false -// Y[] = Y[][] --> false -// Object = Y[][] --> true (everything is an object) -// Object[] = Y[][] --> true -// Object[][] = Y[][] --> true -// Object[][][] = Y[][] --> false (too many []s) -// Serializable = Y[][] --> true (all arrays are Serializable) -// Serializable[] = Y[][] --> true -// Serializable[][] = Y[][] --> false (unless Y is Serializable) -// -// Don't forget about primitive types. -// Object[] = int[] --> false -// -bool Class::IsArrayAssignableFromArray(const Class* src) const { - DCHECK(IsArrayClass()) << PrettyClass(this); - DCHECK(src->IsArrayClass()) << PrettyClass(src); - return GetComponentType()->IsAssignableFrom(src->GetComponentType()); -} - -bool Class::IsAssignableFromArray(const Class* src) const { - DCHECK(!IsInterface()) << PrettyClass(this); // handled first in IsAssignableFrom - DCHECK(src->IsArrayClass()) << PrettyClass(src); - if (!IsArrayClass()) { - // If "this" is not also an array, it must be Object. - // src's super should be java_lang_Object, since it is an array. - Class* java_lang_Object = src->GetSuperClass(); - DCHECK(java_lang_Object != NULL) << PrettyClass(src); - DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src); - return this == java_lang_Object; - } - return IsArrayAssignableFromArray(src); -} - -bool Class::IsSubClass(const Class* klass) const { - DCHECK(!IsInterface()) << PrettyClass(this); - DCHECK(!IsArrayClass()) << PrettyClass(this); - const Class* current = this; - do { - if (current == klass) { - return true; - } - current = current->GetSuperClass(); - } while (current != NULL); - return false; -} - -bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) { - size_t i = 0; - while (descriptor1[i] != '\0' && descriptor1[i] == descriptor2[i]) { - ++i; - } - if (descriptor1.find('/', i) != StringPiece::npos || - descriptor2.find('/', i) != StringPiece::npos) { - return false; - } else { - return true; - } -} - -bool Class::IsInSamePackage(const Class* that) const { - const Class* klass1 = this; - const Class* klass2 = that; - if (klass1 == klass2) { - return true; - } - // Class loaders must match. - if (klass1->GetClassLoader() != klass2->GetClassLoader()) { - return false; - } - // Arrays are in the same package when their element classes are. - while (klass1->IsArrayClass()) { - klass1 = klass1->GetComponentType(); - } - while (klass2->IsArrayClass()) { - klass2 = klass2->GetComponentType(); - } - // Compare the package part of the descriptor string. - ClassHelper kh(klass1); - std::string descriptor1(kh.GetDescriptor()); - kh.ChangeClass(klass2); - std::string descriptor2(kh.GetDescriptor()); - return IsInSamePackage(descriptor1, descriptor2); -} - -bool Class::IsClassClass() const { - Class* java_lang_Class = GetClass()->GetClass(); - return this == java_lang_Class; -} - -bool Class::IsStringClass() const { - return this == String::GetJavaLangString(); -} - -bool Class::IsThrowableClass() const { - return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this); -} - -bool Class::IsFieldClass() const { - Class* java_lang_Class = GetClass(); - Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass(); - return this == java_lang_reflect_Field; - -} - -bool Class::IsMethodClass() const { - return (this == AbstractMethod::GetMethodClass()) || - (this == AbstractMethod::GetConstructorClass()); - -} - -ClassLoader* Class::GetClassLoader() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false); -} - -void Class::SetClassLoader(ClassLoader* new_class_loader) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false); -} - -AbstractMethod* Class::FindVirtualMethodForInterface(AbstractMethod* method) { - Class* declaring_class = method->GetDeclaringClass(); - DCHECK(declaring_class != NULL) << PrettyClass(this); - DCHECK(declaring_class->IsInterface()) << PrettyMethod(method); - // TODO cache to improve lookup speed - int32_t iftable_count = GetIfTableCount(); - IfTable* iftable = GetIfTable(); - for (int32_t i = 0; i < iftable_count; i++) { - if (iftable->GetInterface(i) == declaring_class) { - return iftable->GetMethodArray(i)->Get(method->GetMethodIndex()); - } - } - return NULL; -} - -AbstractMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) const { - // Check the current class before checking the interfaces. - AbstractMethod* method = FindDeclaredVirtualMethod(name, signature); - if (method != NULL) { - return method; - } - - int32_t iftable_count = GetIfTableCount(); - IfTable* iftable = GetIfTable(); - for (int32_t i = 0; i < iftable_count; i++) { - method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature); - if (method != NULL) { - return method; - } - } - return NULL; -} - -AbstractMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { - // Check the current class before checking the interfaces. - AbstractMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx); - if (method != NULL) { - return method; - } - - int32_t iftable_count = GetIfTableCount(); - IfTable* iftable = GetIfTable(); - for (int32_t i = 0; i < iftable_count; i++) { - method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(dex_cache, dex_method_idx); - if (method != NULL) { - return method; - } - } - return NULL; -} - - -AbstractMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const { - MethodHelper mh; - for (size_t i = 0; i < NumDirectMethods(); ++i) { - AbstractMethod* method = GetDirectMethod(i); - mh.ChangeMethod(method); - if (name == mh.GetName() && signature == mh.GetSignature()) { - return method; - } - } - return NULL; -} - -AbstractMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { - if (GetDexCache() == dex_cache) { - for (size_t i = 0; i < NumDirectMethods(); ++i) { - AbstractMethod* method = GetDirectMethod(i); - if (method->GetDexMethodIndex() == dex_method_idx) { - return method; - } - } - } - return NULL; -} - -AbstractMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature) const { - for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { - AbstractMethod* method = klass->FindDeclaredDirectMethod(name, signature); - if (method != NULL) { - return method; - } - } - return NULL; -} - -AbstractMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { - for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { - AbstractMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx); - if (method != NULL) { - return method; - } - } - return NULL; -} - -AbstractMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, - const StringPiece& signature) const { - MethodHelper mh; - for (size_t i = 0; i < NumVirtualMethods(); ++i) { - AbstractMethod* method = GetVirtualMethod(i); - mh.ChangeMethod(method); - if (name == mh.GetName() && signature == mh.GetSignature()) { - return method; - } - } - return NULL; -} - -AbstractMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { - if (GetDexCache() == dex_cache) { - for (size_t i = 0; i < NumVirtualMethods(); ++i) { - AbstractMethod* method = GetVirtualMethod(i); - if (method->GetDexMethodIndex() == dex_method_idx) { - return method; - } - } - } - return NULL; -} - -AbstractMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& signature) const { - for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { - AbstractMethod* method = klass->FindDeclaredVirtualMethod(name, signature); - if (method != NULL) { - return method; - } - } - return NULL; -} - -AbstractMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const { - for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) { - AbstractMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx); - if (method != NULL) { - return method; - } - } - return NULL; -} - -Field* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) { - // Is the field in this class? - // Interfaces are not relevant because they can't contain instance fields. - FieldHelper fh; - for (size_t i = 0; i < NumInstanceFields(); ++i) { - Field* f = GetInstanceField(i); - fh.ChangeField(f); - if (name == fh.GetName() && type == fh.GetTypeDescriptor()) { - return f; - } - } - return NULL; -} - -Field* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) { - if (GetDexCache() == dex_cache) { - for (size_t i = 0; i < NumInstanceFields(); ++i) { - Field* f = GetInstanceField(i); - if (f->GetDexFieldIndex() == dex_field_idx) { - return f; - } - } - } - return NULL; -} - -Field* Class::FindInstanceField(const StringPiece& name, const StringPiece& type) { - // Is the field in this class, or any of its superclasses? - // Interfaces are not relevant because they can't contain instance fields. - for (Class* c = this; c != NULL; c = c->GetSuperClass()) { - Field* f = c->FindDeclaredInstanceField(name, type); - if (f != NULL) { - return f; - } - } - return NULL; -} - -Field* Class::FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) { - // Is the field in this class, or any of its superclasses? - // Interfaces are not relevant because they can't contain instance fields. - for (Class* c = this; c != NULL; c = c->GetSuperClass()) { - Field* f = c->FindDeclaredInstanceField(dex_cache, dex_field_idx); - if (f != NULL) { - return f; - } - } - return NULL; -} - -Field* Class::FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) { - DCHECK(type != NULL); - FieldHelper fh; - for (size_t i = 0; i < NumStaticFields(); ++i) { - Field* f = GetStaticField(i); - fh.ChangeField(f); - if (name == fh.GetName() && type == fh.GetTypeDescriptor()) { - return f; - } - } - return NULL; -} - -Field* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) { - if (dex_cache == GetDexCache()) { - for (size_t i = 0; i < NumStaticFields(); ++i) { - Field* f = GetStaticField(i); - if (f->GetDexFieldIndex() == dex_field_idx) { - return f; - } - } - } - return NULL; -} - -Field* Class::FindStaticField(const StringPiece& name, const StringPiece& type) { - // Is the field in this class (or its interfaces), or any of its - // superclasses (or their interfaces)? - ClassHelper kh; - for (Class* k = this; k != NULL; k = k->GetSuperClass()) { - // Is the field in this class? - Field* f = k->FindDeclaredStaticField(name, type); - if (f != NULL) { - return f; - } - // Is this field in any of this class' interfaces? - kh.ChangeClass(k); - for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) { - Class* interface = kh.GetDirectInterface(i); - f = interface->FindStaticField(name, type); - if (f != NULL) { - return f; - } - } - } - return NULL; -} - -Field* Class::FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) { - ClassHelper kh; - for (Class* k = this; k != NULL; k = k->GetSuperClass()) { - // Is the field in this class? - Field* f = k->FindDeclaredStaticField(dex_cache, dex_field_idx); - if (f != NULL) { - return f; - } - // Is this field in any of this class' interfaces? - kh.ChangeClass(k); - for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) { - Class* interface = kh.GetDirectInterface(i); - f = interface->FindStaticField(dex_cache, dex_field_idx); - if (f != NULL) { - return f; - } - } - } - return NULL; -} - -Field* Class::FindField(const StringPiece& name, const StringPiece& type) { - // Find a field using the JLS field resolution order - ClassHelper kh; - for (Class* k = this; k != NULL; k = k->GetSuperClass()) { - // Is the field in this class? - Field* f = k->FindDeclaredInstanceField(name, type); - if (f != NULL) { - return f; - } - f = k->FindDeclaredStaticField(name, type); - if (f != NULL) { - return f; - } - // Is this field in any of this class' interfaces? - kh.ChangeClass(k); - for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) { - Class* interface = kh.GetDirectInterface(i); - f = interface->FindStaticField(name, type); - if (f != NULL) { - return f; - } - } - } - return NULL; -} - -Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, - size_t component_size) { - DCHECK(array_class != NULL); - DCHECK_GE(component_count, 0); - DCHECK(array_class->IsArrayClass()); - - size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); - size_t data_size = component_count * component_size; - size_t size = header_size + data_size; - - // Check for overflow and throw OutOfMemoryError if this was an unreasonable request. - size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size); - if (data_size >> component_shift != size_t(component_count) || size < data_size) { - self->ThrowNewExceptionF("Ljava/lang/OutOfMemoryError;", - "%s of length %d would overflow", - PrettyDescriptor(array_class).c_str(), component_count); - return NULL; - } - - Heap* heap = Runtime::Current()->GetHeap(); - Array* array = down_cast(heap->AllocObject(self, array_class, size)); - if (array != NULL) { - DCHECK(array->IsArrayInstance()); - array->SetLength(component_count); - } - return array; -} - -Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) { - DCHECK(array_class->IsArrayClass()); - return Alloc(self, array_class, component_count, array_class->GetComponentSize()); -} - -// Create a multi-dimensional array of Objects or primitive types. -// -// We have to generate the names for X[], X[][], X[][][], and so on. The -// easiest way to deal with that is to create the full name once and then -// subtract pieces off. Besides, we want to start with the outermost -// piece and work our way in. -// Recursively create an array with multiple dimensions. Elements may be -// Objects or primitive types. -static Array* RecursiveCreateMultiArray(Thread* self, Class* array_class, int current_dimension, - IntArray* dimensions) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - int32_t array_length = dimensions->Get(current_dimension); - SirtRef new_array(self, Array::Alloc(self, array_class, array_length)); - if (UNLIKELY(new_array.get() == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; - } - if ((current_dimension + 1) < dimensions->GetLength()) { - // Create a new sub-array in every element of the array. - for (int32_t i = 0; i < array_length; i++) { - Array* sub_array = RecursiveCreateMultiArray(self, array_class->GetComponentType(), - current_dimension + 1, dimensions); - if (UNLIKELY(sub_array == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; - } - new_array->AsObjectArray()->Set(i, sub_array); - } - } - return new_array.get(); -} - -Array* Array::CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions) { - // Verify dimensions. - // - // The caller is responsible for verifying that "dimArray" is non-null - // and has a length > 0 and <= 255. - int num_dimensions = dimensions->GetLength(); - DCHECK_GT(num_dimensions, 0); - DCHECK_LE(num_dimensions, 255); - - for (int i = 0; i < num_dimensions; i++) { - int dimension = dimensions->Get(i); - if (UNLIKELY(dimension < 0)) { - self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", - "Dimension %d: %d", i, dimension); - return NULL; - } - } - - // Generate the full name of the array class. - std::string descriptor(num_dimensions, '['); - descriptor += ClassHelper(element_class).GetDescriptor(); - - // Find/generate the array class. - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader()); - if (UNLIKELY(array_class == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; - } - // create the array - Array* new_array = RecursiveCreateMultiArray(self, array_class, 0, dimensions); - if (UNLIKELY(new_array == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; - } - return new_array; -} - -bool Array::ThrowArrayIndexOutOfBoundsException(int32_t index) const { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "length=%i; index=%i", length_, index); - return false; -} - -bool Array::ThrowArrayStoreException(Object* object) const { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "%s cannot be stored in an array of type %s", - PrettyTypeOf(object).c_str(), PrettyTypeOf(this).c_str()); - return false; -} - -template -PrimitiveArray* PrimitiveArray::Alloc(Thread* self, size_t length) { - DCHECK(array_class_ != NULL); - Array* raw_array = Array::Alloc(self, array_class_, length, sizeof(T)); - return down_cast*>(raw_array); -} - -template Class* PrimitiveArray::array_class_ = NULL; - -// Explicitly instantiate all the primitive array types. -template class PrimitiveArray; // BooleanArray -template class PrimitiveArray; // ByteArray -template class PrimitiveArray; // CharArray -template class PrimitiveArray; // DoubleArray -template class PrimitiveArray; // FloatArray -template class PrimitiveArray; // IntArray -template class PrimitiveArray; // LongArray -template class PrimitiveArray; // ShortArray - -// Explicitly instantiate Class[][] -template class ObjectArray >; - -// TODO: get global references for these -Class* String::java_lang_String_ = NULL; - -void String::SetClass(Class* java_lang_String) { - CHECK(java_lang_String_ == NULL); - CHECK(java_lang_String != NULL); - java_lang_String_ = java_lang_String; -} - -void String::ResetClass() { - CHECK(java_lang_String_ != NULL); - java_lang_String_ = NULL; -} - -String* String::Intern() { - return Runtime::Current()->GetInternTable()->InternWeak(this); -} - -int32_t String::GetHashCode() { - int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false); - if (result == 0) { - ComputeHashCode(); - } - result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false); - DCHECK(result != 0 || ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()) == 0) - << ToModifiedUtf8() << " " << result; - return result; -} - -int32_t String::GetLength() const { - int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), false); - DCHECK(result >= 0 && result <= GetCharArray()->GetLength()); - return result; -} - -uint16_t String::CharAt(int32_t index) const { - // TODO: do we need this? Equals is the only caller, and could - // bounds check itself. - if (index < 0 || index >= count_) { - Thread* self = Thread::Current(); - self->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", - "length=%i; index=%i", count_, index); - return 0; - } - return GetCharArray()->Get(index + GetOffset()); -} - -String* String::AllocFromUtf16(Thread* self, - int32_t utf16_length, - const uint16_t* utf16_data_in, - int32_t hash_code) { - CHECK(utf16_data_in != NULL || utf16_length == 0); - String* string = Alloc(self, GetJavaLangString(), utf16_length); - if (string == NULL) { - return NULL; - } - // TODO: use 16-bit wide memset variant - CharArray* array = const_cast(string->GetCharArray()); - if (array == NULL) { - return NULL; - } - for (int i = 0; i < utf16_length; i++) { - array->Set(i, utf16_data_in[i]); - } - if (hash_code != 0) { - string->SetHashCode(hash_code); - } else { - string->ComputeHashCode(); - } - return string; -} - - String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) { - if (utf == NULL) { - return NULL; - } - size_t char_count = CountModifiedUtf8Chars(utf); - return AllocFromModifiedUtf8(self, char_count, utf); -} - -String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, - const char* utf8_data_in) { - String* string = Alloc(self, GetJavaLangString(), utf16_length); - if (string == NULL) { - return NULL; - } - uint16_t* utf16_data_out = - const_cast(string->GetCharArray()->GetData()); - ConvertModifiedUtf8ToUtf16(utf16_data_out, utf8_data_in); - string->ComputeHashCode(); - return string; -} - -String* String::Alloc(Thread* self, Class* java_lang_String, int32_t utf16_length) { - SirtRef array(self, CharArray::Alloc(self, utf16_length)); - if (array.get() == NULL) { - return NULL; - } - return Alloc(self, java_lang_String, array.get()); -} - -String* String::Alloc(Thread* self, Class* java_lang_String, CharArray* array) { - // Hold reference in case AllocObject causes GC. - SirtRef array_ref(self, array); - String* string = down_cast(java_lang_String->AllocObject(self)); - if (string == NULL) { - return NULL; - } - string->SetArray(array); - string->SetCount(array->GetLength()); - return string; -} - -bool String::Equals(const String* that) const { - if (this == that) { - // Quick reference equality test - return true; - } else if (that == NULL) { - // Null isn't an instanceof anything - return false; - } else if (this->GetLength() != that->GetLength()) { - // Quick length inequality test - return false; - } else { - // Note: don't short circuit on hash code as we're presumably here as the - // hash code was already equal - for (int32_t i = 0; i < that->GetLength(); ++i) { - if (this->CharAt(i) != that->CharAt(i)) { - return false; - } - } - return true; - } -} - -bool String::Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) const { - if (this->GetLength() != that_length) { - return false; - } else { - for (int32_t i = 0; i < that_length; ++i) { - if (this->CharAt(i) != that_chars[that_offset + i]) { - return false; - } - } - return true; - } -} - -bool String::Equals(const char* modified_utf8) const { - for (int32_t i = 0; i < GetLength(); ++i) { - uint16_t ch = GetUtf16FromUtf8(&modified_utf8); - if (ch == '\0' || ch != CharAt(i)) { - return false; - } - } - return *modified_utf8 == '\0'; -} - -bool String::Equals(const StringPiece& modified_utf8) const { - if (modified_utf8.size() != GetLength()) { - return false; - } - const char* p = modified_utf8.data(); - for (int32_t i = 0; i < GetLength(); ++i) { - uint16_t ch = GetUtf16FromUtf8(&p); - if (ch != CharAt(i)) { - return false; - } - } - return true; -} - -// Create a modified UTF-8 encoded std::string from a java/lang/String object. -std::string String::ToModifiedUtf8() const { - const uint16_t* chars = GetCharArray()->GetData() + GetOffset(); - size_t byte_count = GetUtfLength(); - std::string result(byte_count, static_cast(0)); - ConvertUtf16ToModifiedUtf8(&result[0], chars, GetLength()); - return result; -} - -#ifdef HAVE__MEMCMP16 -// "count" is in 16-bit units. -extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count); -#define MemCmp16 __memcmp16 -#else -static uint32_t MemCmp16(const uint16_t* s0, const uint16_t* s1, size_t count) { - for (size_t i = 0; i < count; i++) { - if (s0[i] != s1[i]) { - return static_cast(s0[i]) - static_cast(s1[i]); - } - } - return 0; -} -#endif - -int32_t String::CompareTo(String* rhs) const { - // Quick test for comparison of a string with itself. - const String* lhs = this; - if (lhs == rhs) { - return 0; - } - // TODO: is this still true? - // The annoying part here is that 0x00e9 - 0xffff != 0x00ea, - // because the interpreter converts the characters to 32-bit integers - // *without* sign extension before it subtracts them (which makes some - // sense since "char" is unsigned). So what we get is the result of - // 0x000000e9 - 0x0000ffff, which is 0xffff00ea. - int lhsCount = lhs->GetLength(); - int rhsCount = rhs->GetLength(); - int countDiff = lhsCount - rhsCount; - int minCount = (countDiff < 0) ? lhsCount : rhsCount; - const uint16_t* lhsChars = lhs->GetCharArray()->GetData() + lhs->GetOffset(); - const uint16_t* rhsChars = rhs->GetCharArray()->GetData() + rhs->GetOffset(); - int otherRes = MemCmp16(lhsChars, rhsChars, minCount); - if (otherRes != 0) { - return otherRes; - } - return countDiff; -} - -void Throwable::SetCause(Throwable* cause) { - CHECK(cause != NULL); - CHECK(cause != this); - CHECK(GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false) == NULL); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause, false); -} - -bool Throwable::IsCheckedException() const { - if (InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_Error))) { - return false; - } - return !InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_RuntimeException)); -} - -std::string Throwable::Dump() const { - std::string result(PrettyTypeOf(this)); - result += ": "; - String* msg = GetDetailMessage(); - if (msg != NULL) { - result += msg->ToModifiedUtf8(); - } - result += "\n"; - Object* stack_state = GetStackState(); - // check stack state isn't missing or corrupt - if (stack_state != NULL && stack_state->IsObjectArray()) { - // Decode the internal stack trace into the depth and method trace - ObjectArray* method_trace = down_cast*>(stack_state); - int32_t depth = method_trace->GetLength() - 1; - IntArray* pc_trace = down_cast(method_trace->Get(depth)); - MethodHelper mh; - for (int32_t i = 0; i < depth; ++i) { - AbstractMethod* method = down_cast(method_trace->Get(i)); - mh.ChangeMethod(method); - uint32_t dex_pc = pc_trace->Get(i); - int32_t line_number = mh.GetLineNumFromDexPC(dex_pc); - const char* source_file = mh.GetDeclaringClassSourceFile(); - result += StringPrintf(" at %s (%s:%d)\n", PrettyMethod(method, true).c_str(), - source_file, line_number); - } - } - Throwable* cause = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false); - if (cause != NULL && cause != this) { // Constructor makes cause == this by default. - result += "Caused by: "; - result += cause->Dump(); - } - return result; -} - - -Class* Throwable::java_lang_Throwable_ = NULL; - -void Throwable::SetClass(Class* java_lang_Throwable) { - CHECK(java_lang_Throwable_ == NULL); - CHECK(java_lang_Throwable != NULL); - java_lang_Throwable_ = java_lang_Throwable; -} - -void Throwable::ResetClass() { - CHECK(java_lang_Throwable_ != NULL); - java_lang_Throwable_ = NULL; -} - -Class* StackTraceElement::java_lang_StackTraceElement_ = NULL; - -void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) { - CHECK(java_lang_StackTraceElement_ == NULL); - CHECK(java_lang_StackTraceElement != NULL); - java_lang_StackTraceElement_ = java_lang_StackTraceElement; -} - -void StackTraceElement::ResetClass() { - CHECK(java_lang_StackTraceElement_ != NULL); - java_lang_StackTraceElement_ = NULL; -} - -StackTraceElement* StackTraceElement::Alloc(Thread* self, - String* declaring_class, - String* method_name, - String* file_name, - int32_t line_number) { - StackTraceElement* trace = - down_cast(GetStackTraceElement()->AllocObject(self)); - trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_), - const_cast(declaring_class), false); - trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_), - const_cast(method_name), false); - trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_), - const_cast(file_name), false); - trace->SetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), - line_number, false); - return trace; -} - -} // namespace art diff --git a/src/object.h b/src/object.h deleted file mode 100644 index f02e312dd6..0000000000 --- a/src/object.h +++ /dev/null @@ -1,2748 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_SRC_OBJECT_H_ -#define ART_SRC_OBJECT_H_ - -#include -#include - -#include "atomic.h" -#include "base/casts.h" -#include "base/logging.h" -#include "base/macros.h" -#include "base/stringpiece.h" -#include "globals.h" -#include "heap.h" -#include "invoke_type.h" -#include "modifiers.h" -#include "offsets.h" -#include "primitive.h" -#include "runtime.h" -#include "thread.h" -#include "UniquePtr.h" -#include "utf.h" - -namespace art { - -class Array; -class Class; -class ClassLoader; -class CodeAndDirectMethods; -class DexCache; -class Field; -class IfTable; -class Monitor; -class Member; -class AbstractMethod; -class Object; -class StaticStorageBase; -class String; -template class ObjectArray; -template class PrimitiveArray; -typedef PrimitiveArray BooleanArray; -typedef PrimitiveArray ByteArray; -typedef PrimitiveArray CharArray; -typedef PrimitiveArray DoubleArray; -typedef PrimitiveArray FloatArray; -typedef PrimitiveArray IntArray; -typedef PrimitiveArray LongArray; -typedef PrimitiveArray ShortArray; -union JValue; - -#if defined(ART_USE_LLVM_COMPILER) -namespace compiler_llvm { - class InferredRegCategoryMap; -} // namespace compiler_llvm -#endif - -/* - * Definitions for packing refOffsets in Class. - */ -/* - * A magic value for refOffsets. Ignore the bits and walk the super - * chain when this is the value. - * [This is an unlikely "natural" value, since it would be 30 non-ref instance - * fields followed by 2 ref instance fields.] - */ -#define CLASS_WALK_SUPER ((unsigned int)(3)) -#define CLASS_BITS_PER_WORD (sizeof(unsigned long int) * 8) -#define CLASS_OFFSET_ALIGNMENT 4 -#define CLASS_HIGH_BIT ((unsigned int)1 << (CLASS_BITS_PER_WORD - 1)) -/* - * Given an offset, return the bit number which would encode that offset. - * Local use only. - */ -#define _CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset) \ - ((unsigned int)(byteOffset) / \ - CLASS_OFFSET_ALIGNMENT) -/* - * Is the given offset too large to be encoded? - */ -#define CLASS_CAN_ENCODE_OFFSET(byteOffset) \ - (_CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset) < CLASS_BITS_PER_WORD) -/* - * Return a single bit, encoding the offset. - * Undefined if the offset is too large, as defined above. - */ -#define CLASS_BIT_FROM_OFFSET(byteOffset) \ - (CLASS_HIGH_BIT >> _CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset)) -/* - * Return an offset, given a bit number as returned from CLZ. - */ -#define CLASS_OFFSET_FROM_CLZ(rshift) \ - MemberOffset((static_cast(rshift) * CLASS_OFFSET_ALIGNMENT)) - -#define OFFSET_OF_OBJECT_MEMBER(type, field) \ - MemberOffset(OFFSETOF_MEMBER(type, field)) - -// Classes shared with the managed side of the world need to be packed -// so that they don't have extra platform specific padding. -#define MANAGED PACKED(4) - -// C++ mirror of java.lang.Object -class MANAGED Object { - public: - static MemberOffset ClassOffset() { - return OFFSET_OF_OBJECT_MEMBER(Object, klass_); - } - - Class* GetClass() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false); - } - - void SetClass(Class* new_klass); - - bool InstanceOf(const Class* klass) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - int32_t IdentityHashCode() const { - #ifdef MOVING_GARBAGE_COLLECTOR - // TODO: we'll need to use the Object's internal concept of identity - UNIMPLEMENTED(FATAL); - #endif - return reinterpret_cast(this); - } - - static MemberOffset MonitorOffset() { - return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); - } - - volatile int32_t* GetRawLockWordAddress() { - byte* raw_addr = reinterpret_cast(this) + - OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value(); - int32_t* word_addr = reinterpret_cast(raw_addr); - return const_cast(word_addr); - } - - uint32_t GetThinLockId(); - - void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); - - bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_); - - void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void Wait() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void Wait(int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool IsClass() const; - - Class* AsClass() { - DCHECK(IsClass()); - return down_cast(this); - } - - const Class* AsClass() const { - DCHECK(IsClass()); - return down_cast(this); - } - - bool IsObjectArray() const; - - template - ObjectArray* AsObjectArray(); - - template - const ObjectArray* AsObjectArray() const; - - bool IsArrayInstance() const; - - Array* AsArray() { - DCHECK(IsArrayInstance()); - return down_cast(this); - } - - const Array* AsArray() const { - DCHECK(IsArrayInstance()); - return down_cast(this); - } - - BooleanArray* AsBooleanArray(); - ByteArray* AsByteArray(); - CharArray* AsCharArray(); - ShortArray* AsShortArray(); - IntArray* AsIntArray(); - LongArray* AsLongArray(); - - String* AsString(); - - Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool IsMethod() const; - - AbstractMethod* AsMethod() { - DCHECK(IsMethod()); - return down_cast(this); - } - - const AbstractMethod* AsMethod() const { - DCHECK(IsMethod()); - return down_cast(this); - } - - bool IsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - Field* AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(IsField()); - return down_cast(this); - } - - const Field* AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(IsField()); - return down_cast(this); - } - - bool IsReferenceInstance() const; - - bool IsWeakReferenceInstance() const; - - bool IsSoftReferenceInstance() const; - - bool IsFinalizerReferenceInstance() const; - - bool IsPhantomReferenceInstance() const; - - // Accessors for Java type fields - template - T GetFieldObject(MemberOffset field_offset, bool is_volatile) const { - T result = reinterpret_cast(GetField32(field_offset, is_volatile)); - Runtime::Current()->GetHeap()->VerifyObject(result); - return result; - } - - void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile, - bool this_is_valid = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Runtime::Current()->GetHeap()->VerifyObject(new_value); - SetField32(field_offset, reinterpret_cast(new_value), is_volatile, this_is_valid); - if (new_value != NULL) { - CheckFieldAssignment(field_offset, new_value); - Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value); - } - } - - uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const { - Runtime::Current()->GetHeap()->VerifyObject(this); - const byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); - const int32_t* word_addr = reinterpret_cast(raw_addr); - if (UNLIKELY(is_volatile)) { - return android_atomic_acquire_load(word_addr); - } else { - return *word_addr; - } - } - - void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile, bool this_is_valid = true) { - if (this_is_valid) { - Runtime::Current()->GetHeap()->VerifyObject(this); - } - byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); - uint32_t* word_addr = reinterpret_cast(raw_addr); - if (UNLIKELY(is_volatile)) { - /* - * TODO: add an android_atomic_synchronization_store() function and - * use it in the 32-bit volatile set handlers. On some platforms we - * can use a fast atomic instruction and avoid the barriers. - */ - ANDROID_MEMBAR_STORE(); - *word_addr = new_value; - ANDROID_MEMBAR_FULL(); - } else { - *word_addr = new_value; - } - } - - uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const { - Runtime::Current()->GetHeap()->VerifyObject(this); - const byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); - const int64_t* addr = reinterpret_cast(raw_addr); - if (UNLIKELY(is_volatile)) { - uint64_t result = QuasiAtomic::Read64(addr); - ANDROID_MEMBAR_FULL(); - return result; - } else { - return *addr; - } - } - - void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile) { - Runtime::Current()->GetHeap()->VerifyObject(this); - byte* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); - int64_t* addr = reinterpret_cast(raw_addr); - if (UNLIKELY(is_volatile)) { - ANDROID_MEMBAR_STORE(); - QuasiAtomic::Write64(addr, new_value); - // Post-store barrier not required due to use of atomic op or mutex. - } else { - *addr = new_value; - } - } - - protected: - // Accessors for non-Java type fields - template - T GetFieldPtr(MemberOffset field_offset, bool is_volatile) const { - return reinterpret_cast(GetField32(field_offset, is_volatile)); - } - - template - void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile, bool this_is_valid = true) { - SetField32(field_offset, reinterpret_cast(new_value), is_volatile, this_is_valid); - } - - private: -#if VERIFY_OBJECT_ENABLED - void CheckFieldAssignment(MemberOffset field_offset, const Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -#else - void CheckFieldAssignment(MemberOffset, const Object*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {} -#endif - - Class* klass_; - - uint32_t monitor_; - - friend class ImageWriter; // for abusing monitor_ directly - friend struct ObjectOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(Object); -}; - -// C++ mirror of java.lang.reflect.Field -class MANAGED Field : public Object { - public: - Class* GetDeclaringClass() const; - - void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - uint32_t GetAccessFlags() const; - - void SetAccessFlags(uint32_t new_access_flags) { - SetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), new_access_flags, false); - } - - bool IsPublic() const { - return (GetAccessFlags() & kAccPublic) != 0; - } - - bool IsStatic() const { - return (GetAccessFlags() & kAccStatic) != 0; - } - - bool IsFinal() const { - return (GetAccessFlags() & kAccFinal) != 0; - } - - uint32_t GetDexFieldIndex() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, field_dex_idx_), false); - } - - void SetDexFieldIndex(uint32_t new_idx) { - SetField32(OFFSET_OF_OBJECT_MEMBER(Field, field_dex_idx_), new_idx, false); - } - - // Offset to field within an Object - MemberOffset GetOffset() const; - - static MemberOffset OffsetOffset() { - return MemberOffset(OFFSETOF_MEMBER(Field, offset_)); - } - - MemberOffset GetOffsetDuringLinking() const; - - void SetOffset(MemberOffset num_bytes); - - // field access, null object for static fields - bool GetBoolean(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetBoolean(Object* object, bool z) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int8_t GetByte(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetByte(Object* object, int8_t b) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - uint16_t GetChar(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetChar(Object* object, uint16_t c) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int16_t GetShort(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetShort(Object* object, int16_t s) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int32_t GetInt(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetInt(Object* object, int32_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int64_t GetLong(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetLong(Object* object, int64_t j) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - float GetFloat(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetFloat(Object* object, float f) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - double GetDouble(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetDouble(Object* object, double d) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* GetObject(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetObject(Object* object, const Object* l) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // raw field accesses - uint32_t Get32(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Set32(Object* object, uint32_t new_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - uint64_t Get64(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Set64(Object* object, uint64_t new_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* GetObj(const Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetObj(Object* object, const Object* new_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static Class* GetJavaLangReflectField() { - DCHECK(java_lang_reflect_Field_ != NULL); - return java_lang_reflect_Field_; - } - - static void SetClass(Class* java_lang_reflect_Field); - static void ResetClass(); - - bool IsVolatile() const { - return (GetAccessFlags() & kAccVolatile) != 0; - } - - private: - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - // The class we are a part of - Class* declaring_class_; - - uint32_t access_flags_; - - // Dex cache index of field id - uint32_t field_dex_idx_; - - // Offset of field within an instance or in the Class' static fields - uint32_t offset_; - - static Class* java_lang_reflect_Field_; - - friend struct FieldOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(Field); -}; - -// C++ mirror of java.lang.reflect.Method and java.lang.reflect.Constructor -class MANAGED AbstractMethod : public Object { - public: - // A function that invokes a method with an array of its arguments. - typedef void InvokeStub(const AbstractMethod* method, - Object* obj, - Thread* thread, - JValue* args, - JValue* result); - - Class* GetDeclaringClass() const; - - void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static MemberOffset DeclaringClassOffset() { - return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_)); - } - - uint32_t GetAccessFlags() const; - - void SetAccessFlags(uint32_t new_access_flags) { - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, access_flags_), new_access_flags, false); - } - - // Approximate what kind of method call would be used for this method. - InvokeType GetInvokeType() const; - - // Returns true if the method is declared public. - bool IsPublic() const { - return (GetAccessFlags() & kAccPublic) != 0; - } - - // Returns true if the method is declared private. - bool IsPrivate() const { - return (GetAccessFlags() & kAccPrivate) != 0; - } - - // Returns true if the method is declared static. - bool IsStatic() const { - return (GetAccessFlags() & kAccStatic) != 0; - } - - // Returns true if the method is a constructor. - bool IsConstructor() const { - return (GetAccessFlags() & kAccConstructor) != 0; - } - - // Returns true if the method is static, private, or a constructor. - bool IsDirect() const { - return IsDirect(GetAccessFlags()); - } - - static bool IsDirect(uint32_t access_flags) { - return (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0; - } - - // Returns true if the method is declared synchronized. - bool IsSynchronized() const { - uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized; - return (GetAccessFlags() & synchonized) != 0; - } - - bool IsFinal() const { - return (GetAccessFlags() & kAccFinal) != 0; - } - - bool IsMiranda() const { - return (GetAccessFlags() & kAccMiranda) != 0; - } - - bool IsNative() const { - return (GetAccessFlags() & kAccNative) != 0; - } - - bool IsAbstract() const { - return (GetAccessFlags() & kAccAbstract) != 0; - } - - bool IsSynthetic() const { - return (GetAccessFlags() & kAccSynthetic) != 0; - } - - bool IsProxyMethod() const; - - bool CheckIncompatibleClassChange(InvokeType type); - - uint16_t GetMethodIndex() const; - - size_t GetVtableIndex() const { - return GetMethodIndex(); - } - - void SetMethodIndex(uint16_t new_method_index) { - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_), new_method_index, false); - } - - static MemberOffset MethodIndexOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_); - } - - uint32_t GetCodeItemOffset() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_item_offset_), false); - } - - void SetCodeItemOffset(uint32_t new_code_off) { - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_item_offset_), new_code_off, false); - } - - // Number of 32bit registers that would be required to hold all the arguments - static size_t NumArgRegisters(const StringPiece& shorty); - - uint32_t GetDexMethodIndex() const; - - void SetDexMethodIndex(uint32_t new_idx) { - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_dex_index_), new_idx, false); - } - - ObjectArray* GetDexCacheStrings() const; - void SetDexCacheStrings(ObjectArray* new_dex_cache_strings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static MemberOffset DexCacheStringsOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_strings_); - } - - static MemberOffset DexCacheResolvedMethodsOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_methods_); - } - - static MemberOffset DexCacheResolvedTypesOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, dex_cache_resolved_types_); - } - - static MemberOffset DexCacheInitializedStaticStorageOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, - dex_cache_initialized_static_storage_); - } - - ObjectArray* GetDexCacheResolvedMethods() const; - void SetDexCacheResolvedMethods(ObjectArray* new_dex_cache_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ObjectArray* GetDexCacheResolvedTypes() const; - void SetDexCacheResolvedTypes(ObjectArray* new_dex_cache_types) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ObjectArray* GetDexCacheInitializedStaticStorage() const; - void SetDexCacheInitializedStaticStorage(ObjectArray* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Find the method that this method overrides - AbstractMethod* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - const void* GetCode() const { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_), false); - } - - void SetCode(const void* code) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_), code, false); - } - - uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this); - uintptr_t code = reinterpret_cast(GetCode()); - if (code == 0) { - return 0; - } - // TODO: make this Thumb2 specific - code &= ~0x1; - return reinterpret_cast(code)[-1]; - } - - bool IsWithinCode(uintptr_t pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uintptr_t code = reinterpret_cast(GetCode()); - if (code == 0) { - return pc == 0; - } - /* - * During a stack walk, a return PC may point to the end of the code + 1 - * (in the case that the last instruction is a call that isn't expected to - * return. Thus, we check <= code + GetCodeSize(). - */ - return (code <= pc && pc <= code + GetCodeSize()); - } - - void AssertPcIsWithinCode(uintptr_t pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - uint32_t GetOatCodeOffset() const { - DCHECK(!Runtime::Current()->IsStarted()); - return reinterpret_cast(GetCode()); - } - - void SetOatCodeOffset(uint32_t code_offset) { - DCHECK(!Runtime::Current()->IsStarted()); - SetCode(reinterpret_cast(code_offset)); - } - - static MemberOffset GetCodeOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, code_); - } - - const uint32_t* GetMappingTable() const { - const uint32_t* map = GetMappingTableRaw(); - if (map == NULL) { - return map; - } - return map + 1; - } - - uint32_t GetPcToDexMappingTableLength() const { - const uint32_t* map = GetMappingTableRaw(); - if (map == NULL) { - return 0; - } - return map[2]; - } - - const uint32_t* GetPcToDexMappingTable() const { - const uint32_t* map = GetMappingTableRaw(); - if (map == NULL) { - return map; - } - return map + 3; - } - - - uint32_t GetDexToPcMappingTableLength() const { - const uint32_t* map = GetMappingTableRaw(); - if (map == NULL) { - return 0; - } - return map[1] - map[2]; - } - - const uint32_t* GetDexToPcMappingTable() const { - const uint32_t* map = GetMappingTableRaw(); - if (map == NULL) { - return map; - } - return map + 3 + map[2]; - } - - - const uint32_t* GetMappingTableRaw() const { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), false); - } - - void SetMappingTable(const uint32_t* mapping_table) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), - mapping_table, false); - } - - uint32_t GetOatMappingTableOffset() const { - DCHECK(!Runtime::Current()->IsStarted()); - return reinterpret_cast(GetMappingTableRaw()); - } - - void SetOatMappingTableOffset(uint32_t mapping_table_offset) { - DCHECK(!Runtime::Current()->IsStarted()); - SetMappingTable(reinterpret_cast(mapping_table_offset)); - } - - // Callers should wrap the uint16_t* in a VmapTable instance for convenient access. - const uint16_t* GetVmapTableRaw() const { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), false); - } - - void SetVmapTable(const uint16_t* vmap_table) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), vmap_table, false); - } - - uint32_t GetOatVmapTableOffset() const { - DCHECK(!Runtime::Current()->IsStarted()); - return reinterpret_cast(GetVmapTableRaw()); - } - - void SetOatVmapTableOffset(uint32_t vmap_table_offset) { - DCHECK(!Runtime::Current()->IsStarted()); - SetVmapTable(reinterpret_cast(vmap_table_offset)); - } - - const uint8_t* GetNativeGcMap() const { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_gc_map_), false); - } - void SetNativeGcMap(const uint8_t* data) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_gc_map_), data, - false); - } - - // When building the oat need a convenient place to stuff the offset of the native GC map. - void SetOatNativeGcMapOffset(uint32_t gc_map_offset) { - DCHECK(!Runtime::Current()->IsStarted()); - SetNativeGcMap(reinterpret_cast(gc_map_offset)); - } - - uint32_t GetOatNativeGcMapOffset() const { - DCHECK(!Runtime::Current()->IsStarted()); - return reinterpret_cast(GetNativeGcMap()); - } - - size_t GetFrameSizeInBytes() const { - DCHECK_EQ(sizeof(size_t), sizeof(uint32_t)); - size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, frame_size_in_bytes_), false); - DCHECK_LE(static_cast(kStackAlignment), result); - return result; - } - - void SetFrameSizeInBytes(size_t new_frame_size_in_bytes) { - DCHECK_EQ(sizeof(size_t), sizeof(uint32_t)); - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, frame_size_in_bytes_), - new_frame_size_in_bytes, false); - } - - size_t GetReturnPcOffsetInBytes() const { - return GetFrameSizeInBytes() - kPointerSize; - } - - bool IsRegistered() const; - - void RegisterNative(Thread* self, const void* native_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static MemberOffset NativeMethodOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, native_method_); - } - - const void* GetNativeMethod() const { - return reinterpret_cast(GetField32(NativeMethodOffset(), false)); - } - - // Native to managed invocation stub entry point - InvokeStub* GetInvokeStub() const { - InvokeStub* result = GetFieldPtr( - OFFSET_OF_OBJECT_MEMBER(AbstractMethod, invoke_stub_), false); - // TODO: DCHECK(result != NULL); should be ahead of time compiled - return result; - } - - void SetInvokeStub(InvokeStub* invoke_stub) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, invoke_stub_), - invoke_stub, false); - } - - uint32_t GetInvokeStubSize() const { - uintptr_t invoke_stub = reinterpret_cast(GetInvokeStub()); - if (invoke_stub == 0) { - return 0; - } - // TODO: make this Thumb2 specific - invoke_stub &= ~0x1; - return reinterpret_cast(invoke_stub)[-1]; - } - - uint32_t GetOatInvokeStubOffset() const { - DCHECK(!Runtime::Current()->IsStarted()); - return reinterpret_cast(GetInvokeStub()); - } - - void SetOatInvokeStubOffset(uint32_t invoke_stub_offset) { - DCHECK(!Runtime::Current()->IsStarted()); - SetInvokeStub(reinterpret_cast(invoke_stub_offset)); - } - - static MemberOffset GetInvokeStubOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, invoke_stub_); - } - - static MemberOffset GetMethodIndexOffset() { - return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_); - } - - uint32_t GetCoreSpillMask() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, core_spill_mask_), false); - } - - void SetCoreSpillMask(uint32_t core_spill_mask) { - // Computed during compilation - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, core_spill_mask_), core_spill_mask, false); - } - - uint32_t GetFpSpillMask() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, fp_spill_mask_), false); - } - - void SetFpSpillMask(uint32_t fp_spill_mask) { - // Computed during compilation - SetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, fp_spill_mask_), fp_spill_mask, false); - } - - // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal - // conventions for a method of managed code. Returns false for Proxy methods. - bool IsRuntimeMethod() const { - return GetDexMethodIndex() == DexFile::kDexNoIndex16; - } - - // Is this a hand crafted method used for something like describing callee saves? - bool IsCalleeSaveMethod() const { - if (!IsRuntimeMethod()) { - return false; - } - Runtime* runtime = Runtime::Current(); - bool result = false; - for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) { - if (this == runtime->GetCalleeSaveMethod(Runtime::CalleeSaveType(i))) { - result = true; - break; - } - } - return result; - } - - bool IsResolutionMethod() const { - bool result = this == Runtime::Current()->GetResolutionMethod(); - // Check that if we do think it is phony it looks like the resolution method - DCHECK(!result || GetDexMethodIndex() == DexFile::kDexNoIndex16); - return result; - } - - uintptr_t NativePcOffset(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Converts a native PC to a dex PC. - uint32_t ToDexPc(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Converts a dex PC to a native PC. - uintptr_t ToNativePc(const uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Converts a dex PC to the first corresponding safepoint PC. - uintptr_t ToFirstNativeSafepointPc(const uint32_t dex_pc) - const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Find the catch block for the given exception type and dex_pc - uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method); - - static Class* GetConstructorClass() { - return java_lang_reflect_Constructor_; - } - - static Class* GetMethodClass() { - return java_lang_reflect_Method_; - } - - static void ResetClasses(); - - protected: - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - // The class we are a part of - Class* declaring_class_; - - // short cuts to declaring_class_->dex_cache_ member for fast compiled code access - ObjectArray* dex_cache_initialized_static_storage_; - - // short cuts to declaring_class_->dex_cache_ member for fast compiled code access - ObjectArray* dex_cache_resolved_methods_; - - // short cuts to declaring_class_->dex_cache_ member for fast compiled code access - ObjectArray* dex_cache_resolved_types_; - - // short cuts to declaring_class_->dex_cache_ member for fast compiled code access - ObjectArray* dex_cache_strings_; - - // Access flags; low 16 bits are defined by spec. - uint32_t access_flags_; - - // Compiled code associated with this method for callers from managed code. - // May be compiled managed code or a bridge for invoking a native method. - const void* code_; - - // Offset to the CodeItem. - uint32_t code_item_offset_; - - // Architecture-dependent register spill mask - uint32_t core_spill_mask_; - - // Architecture-dependent register spill mask - uint32_t fp_spill_mask_; - - // Total size in bytes of the frame - size_t frame_size_in_bytes_; - - // Garbage collection map of native PC offsets to reference bitmaps. - const uint8_t* native_gc_map_; - - // Native invocation stub entry point for calling from native to managed code. - InvokeStub* invoke_stub_; - - // Mapping from native pc to dex pc - const uint32_t* mapping_table_; - - // Index into method_ids of the dex file associated with this method - uint32_t method_dex_index_; - - // For concrete virtual methods, this is the offset of the method in Class::vtable_. - // - // For abstract methods in an interface class, this is the offset of the method in - // "iftable_->Get(n)->GetMethodArray()". - // - // For static and direct methods this is the index in the direct methods table. - uint32_t method_index_; - - // The target native method registered with this method - const void* native_method_; - - // When a register is promoted into a register, the spill mask holds which registers hold dex - // registers. The first promoted register's corresponding dex register is vmap_table_[1], the Nth - // is vmap_table_[N]. vmap_table_[0] holds the length of the table. - const uint16_t* vmap_table_; - - static Class* java_lang_reflect_Constructor_; - static Class* java_lang_reflect_Method_; - - friend class ImageWriter; // for relocating code_ and invoke_stub_ - friend struct AbstractMethodOffsets; // for verifying offset information - friend struct ConstructorMethodOffsets; // for verifying offset information - friend struct MethodOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod); -}; - -class MANAGED Method : public AbstractMethod { - -}; - -class MANAGED Constructor : public AbstractMethod { - -}; - -class MANAGED Array : public Object { - public: - // A convenience for code that doesn't know the component size, - // and doesn't want to have to work it out itself. - static Array* Alloc(Thread* self, Class* array_class, int32_t component_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, - size_t component_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - size_t SizeOf() const; - - int32_t GetLength() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), false); - } - - void SetLength(int32_t length) { - CHECK_GE(length, 0); - SetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false); - } - - static MemberOffset LengthOffset() { - return OFFSET_OF_OBJECT_MEMBER(Array, length_); - } - - static MemberOffset DataOffset(size_t component_size) { - if (component_size != sizeof(int64_t)) { - return OFFSET_OF_OBJECT_MEMBER(Array, first_element_); - } else { - // Align longs and doubles. - return MemberOffset(OFFSETOF_MEMBER(Array, first_element_) + 4); - } - } - - void* GetRawData(size_t component_size) { - intptr_t data = reinterpret_cast(this) + DataOffset(component_size).Int32Value(); - return reinterpret_cast(data); - } - - const void* GetRawData(size_t component_size) const { - intptr_t data = reinterpret_cast(this) + DataOffset(component_size).Int32Value(); - return reinterpret_cast(data); - } - - protected: - bool IsValidIndex(int32_t index) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (UNLIKELY(index < 0 || index >= GetLength())) { - return ThrowArrayIndexOutOfBoundsException(index); - } - return true; - } - - protected: - bool ThrowArrayIndexOutOfBoundsException(int32_t index) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool ThrowArrayStoreException(Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - private: - // The number of array elements. - int32_t length_; - // Marker for the data (used by generated code) - uint32_t first_element_[0]; - - DISALLOW_IMPLICIT_CONSTRUCTORS(Array); -}; - -template -class MANAGED ObjectArray : public Array { - public: - static ObjectArray* Alloc(Thread* self, Class* object_array_class, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Set element without bound and element type checks, to be used in limited - // circumstances, such as during boot image writing - void SetWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Set element without bound and element type checks, to be used in limited circumstances, such - // as during boot image writing. Does not do write barrier. - void SetPtrWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static void Copy(const ObjectArray* src, int src_pos, - ObjectArray* dst, int dst_pos, - size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ObjectArray* CopyOf(Thread* self, int32_t new_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray); -}; - -template -ObjectArray* ObjectArray::Alloc(Thread* self, Class* object_array_class, int32_t length) { - Array* array = Array::Alloc(self, object_array_class, length, sizeof(Object*)); - if (UNLIKELY(array == NULL)) { - return NULL; - } else { - return array->AsObjectArray(); - } -} - -template -inline T* ObjectArray::Get(int32_t i) const { - if (UNLIKELY(!IsValidIndex(i))) { - return NULL; - } - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - return GetFieldObject(data_offset, false); -} - -template -ObjectArray* ObjectArray::CopyOf(Thread* self, int32_t new_length) { - ObjectArray* new_array = Alloc(self, GetClass(), new_length); - Copy(this, 0, new_array, 0, std::min(GetLength(), new_length)); - return new_array; -} - -class MANAGED IfTable : public ObjectArray { - public: - Class* GetInterface(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* interface = Get((i * kMax) + kInterface)->AsClass(); - DCHECK(interface != NULL); - return interface; - } - - void SetInterface(int32_t i, Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ObjectArray* GetMethodArray(int32_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray* method_array = - down_cast*>(Get((i * kMax) + kMethodArray)); - DCHECK(method_array != NULL); - return method_array; - } - - size_t GetMethodArrayCount(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray* method_array = - down_cast*>(Get((i * kMax) + kMethodArray)); - if (method_array == NULL) { - return 0; - } - return method_array->GetLength(); - } - - void SetMethodArray(int32_t i, ObjectArray* new_ma) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(new_ma != NULL); - DCHECK(Get((i * kMax) + kMethodArray) == NULL); - Set((i * kMax) + kMethodArray, new_ma); - } - - size_t Count() const { - return GetLength() / kMax; - } - - enum { - // Points to the interface class. - kInterface = 0, - // Method pointers into the vtable, allow fast map from interface method index to concrete - // instance method. - kMethodArray = 1, - kMax = 2, - }; - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(IfTable); -}; - -// Type for the InitializedStaticStorage table. Currently the Class -// provides the static storage. However, this might change to an Array -// to improve image sharing, so we use this type to avoid assumptions -// on the current storage. -class MANAGED StaticStorageBase : public Object { -}; - -// C++ mirror of java.lang.Class -class MANAGED Class : public StaticStorageBase { - public: - // Class Status - // - // kStatusNotReady: If a Class cannot be found in the class table by - // FindClass, it allocates an new one with AllocClass in the - // kStatusNotReady and calls LoadClass. Note if it does find a - // class, it may not be kStatusResolved and it will try to push it - // forward toward kStatusResolved. - // - // kStatusIdx: LoadClass populates with Class with information from - // the DexFile, moving the status to kStatusIdx, indicating that the - // Class value in super_class_ has not been populated. The new Class - // can then be inserted into the classes table. - // - // kStatusLoaded: After taking a lock on Class, the ClassLinker will - // attempt to move a kStatusIdx class forward to kStatusLoaded by - // using ResolveClass to initialize the super_class_ and ensuring the - // interfaces are resolved. - // - // kStatusResolved: Still holding the lock on Class, the ClassLinker - // shows linking is complete and fields of the Class populated by making - // it kStatusResolved. Java allows circularities of the form where a super - // class has a field that is of the type of the sub class. We need to be able - // to fully resolve super classes while resolving types for fields. - // - // kStatusRetryVerificationAtRuntime: The verifier sets a class to - // this state if it encounters a soft failure at compile time. This - // often happens when there are unresolved classes in other dex - // files, and this status marks a class as needing to be verified - // again at runtime. - // - // TODO: Explain the other states - enum Status { - kStatusError = -1, - kStatusNotReady = 0, - kStatusIdx = 1, // Loaded, DEX idx in super_class_type_idx_ and interfaces_type_idx_. - kStatusLoaded = 2, // DEX idx values resolved. - kStatusResolved = 3, // Part of linking. - kStatusVerifying = 4, // In the process of being verified. - kStatusRetryVerificationAtRuntime = 5, // Compile time verification failed, retry at runtime. - kStatusVerifyingAtRuntime = 6, // Retrying verification at runtime. - kStatusVerified = 7, // Logically part of linking; done pre-init. - kStatusInitializing = 8, // Class init in progress. - kStatusInitialized = 9, // Ready to go. - }; - - Status GetStatus() const { - DCHECK_EQ(sizeof(Status), sizeof(uint32_t)); - return static_cast(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), false)); - } - - void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Returns true if the class has failed to link. - bool IsErroneous() const { - return GetStatus() == kStatusError; - } - - // Returns true if the class has been loaded. - bool IsIdxLoaded() const { - return GetStatus() >= kStatusIdx; - } - - // Returns true if the class has been loaded. - bool IsLoaded() const { - return GetStatus() >= kStatusLoaded; - } - - // Returns true if the class has been linked. - bool IsResolved() const { - return GetStatus() >= kStatusResolved; - } - - // Returns true if the class was compile-time verified. - bool IsCompileTimeVerified() const { - return GetStatus() >= kStatusRetryVerificationAtRuntime; - } - - // Returns true if the class has been verified. - bool IsVerified() const { - return GetStatus() >= kStatusVerified; - } - - // Returns true if the class is initializing. - bool IsInitializing() const { - return GetStatus() >= kStatusInitializing; - } - - // Returns true if the class is initialized. - bool IsInitialized() const { - return GetStatus() == kStatusInitialized; - } - - uint32_t GetAccessFlags() const; - - void SetAccessFlags(uint32_t new_access_flags) { - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags, false); - } - - // Returns true if the class is an interface. - bool IsInterface() const { - return (GetAccessFlags() & kAccInterface) != 0; - } - - // Returns true if the class is declared public. - bool IsPublic() const { - return (GetAccessFlags() & kAccPublic) != 0; - } - - // Returns true if the class is declared final. - bool IsFinal() const { - return (GetAccessFlags() & kAccFinal) != 0; - } - - bool IsFinalizable() const { - return (GetAccessFlags() & kAccClassIsFinalizable) != 0; - } - - void SetFinalizable() { - uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false); - SetAccessFlags(flags | kAccClassIsFinalizable); - } - - // Returns true if the class is abstract. - bool IsAbstract() const { - return (GetAccessFlags() & kAccAbstract) != 0; - } - - // Returns true if the class is an annotation. - bool IsAnnotation() const { - return (GetAccessFlags() & kAccAnnotation) != 0; - } - - // Returns true if the class is synthetic. - bool IsSynthetic() const { - return (GetAccessFlags() & kAccSynthetic) != 0; - } - - bool IsReferenceClass() const { - return (GetAccessFlags() & kAccClassIsReference) != 0; - } - - bool IsWeakReferenceClass() const { - return (GetAccessFlags() & kAccClassIsWeakReference) != 0; - } - - bool IsSoftReferenceClass() const { - return (GetAccessFlags() & kAccReferenceFlagsMask) == kAccClassIsReference; - } - - bool IsFinalizerReferenceClass() const { - return (GetAccessFlags() & kAccClassIsFinalizerReference) != 0; - } - - bool IsPhantomReferenceClass() const { - return (GetAccessFlags() & kAccClassIsPhantomReference) != 0; - } - - - String* GetName() const; // Returns the cached name. - void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets the cached name. - // Computes the name, then sets the cached value. - String* ComputeName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool IsProxyClass() const { - // Read access flags without using getter as whether something is a proxy can be check in - // any loaded state - // TODO: switch to a check if the super class is java.lang.reflect.Proxy? - uint32_t access_flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false); - return (access_flags & kAccClassIsProxy) != 0; - } - - Primitive::Type GetPrimitiveType() const { - DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t)); - return static_cast( - GetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), false)); - } - - void SetPrimitiveType(Primitive::Type new_type) { - DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t)); - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), new_type, false); - } - - // Returns true if the class is a primitive type. - bool IsPrimitive() const { - return GetPrimitiveType() != Primitive::kPrimNot; - } - - bool IsPrimitiveBoolean() const { - return GetPrimitiveType() == Primitive::kPrimBoolean; - } - - bool IsPrimitiveByte() const { - return GetPrimitiveType() == Primitive::kPrimByte; - } - - bool IsPrimitiveChar() const { - return GetPrimitiveType() == Primitive::kPrimChar; - } - - bool IsPrimitiveShort() const { - return GetPrimitiveType() == Primitive::kPrimShort; - } - - bool IsPrimitiveInt() const { - return GetPrimitiveType() == Primitive::kPrimInt; - } - - bool IsPrimitiveLong() const { - return GetPrimitiveType() == Primitive::kPrimLong; - } - - bool IsPrimitiveFloat() const { - return GetPrimitiveType() == Primitive::kPrimFloat; - } - - bool IsPrimitiveDouble() const { - return GetPrimitiveType() == Primitive::kPrimDouble; - } - - bool IsPrimitiveVoid() const { - return GetPrimitiveType() == Primitive::kPrimVoid; - } - - bool IsPrimitiveArray() const { - return IsArrayClass() && GetComponentType()->IsPrimitive(); - } - - // Depth of class from java.lang.Object - size_t Depth() { - size_t depth = 0; - for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) { - depth++; - } - return depth; - } - - bool IsArrayClass() const { - return GetComponentType() != NULL; - } - - bool IsClassClass() const; - - bool IsStringClass() const; - - bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool IsFieldClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool IsMethodClass() const; - - Class* GetComponentType() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), false); - } - - void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(GetComponentType() == NULL); - DCHECK(new_component_type != NULL); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), new_component_type, false); - } - - size_t GetComponentSize() const { - return Primitive::ComponentSize(GetComponentType()->GetPrimitiveType()); - } - - bool IsObjectClass() const { - return !IsPrimitive() && GetSuperClass() == NULL; - } - bool IsInstantiable() const { - return !IsPrimitive() && !IsInterface() && !IsAbstract(); - } - - bool IsObjectArrayClass() const { - return GetComponentType() != NULL && !GetComponentType()->IsPrimitive(); - } - - // Creates a raw object instance but does not invoke the default constructor. - Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool IsVariableSize() const { - // Classes and arrays vary in size, and so the object_size_ field cannot - // be used to get their instance size - return IsClassClass() || IsArrayClass(); - } - - size_t SizeOf() const { - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false); - } - - size_t GetClassSize() const { - DCHECK_EQ(sizeof(size_t), sizeof(uint32_t)); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false); - } - - void SetClassSize(size_t new_class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this); - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false); - CHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(this); - return result; - } - - void SetObjectSize(size_t new_object_size) { - DCHECK(!IsVariableSize()); - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size, false); - } - - // Returns true if this class is in the same packages as that class. - bool IsInSamePackage(const Class* that) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); - - // Returns true if this class can access that class. - bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return that->IsPublic() || this->IsInSamePackage(that); - } - - // Can this class access a member in the provided class with the provided member access flags? - // Note that access to the class isn't checked in case the declaring class is protected and the - // method has been exposed by a public sub-class - bool CanAccessMember(Class* access_to, uint32_t member_flags) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Classes can access all of their own members - if (this == access_to) { - return true; - } - // Public members are trivially accessible - if (member_flags & kAccPublic) { - return true; - } - // Private members are trivially not accessible - if (member_flags & kAccPrivate) { - return false; - } - // Check for protected access from a sub-class, which may or may not be in the same package. - if (member_flags & kAccProtected) { - if (this->IsSubClass(access_to)) { - return true; - } - } - // Allow protected access from other classes in the same package. - return this->IsInSamePackage(access_to); - } - - bool IsSubClass(const Class* klass) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Can src be assigned to this class? For example, String can be assigned to Object (by an - // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing - // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface - // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign - // to themselves. Classes for primitive types may not assign to each other. - bool IsAssignableFrom(const Class* src) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(src != NULL); - if (this == src) { - // Can always assign to things of the same type. - return true; - } else if (IsObjectClass()) { - // Can assign any reference to java.lang.Object. - return !src->IsPrimitive(); - } else if (IsInterface()) { - return src->Implements(this); - } else if (src->IsArrayClass()) { - return IsAssignableFromArray(src); - } else { - return !src->IsInterface() && src->IsSubClass(this); - } - } - - Class* GetSuperClass() const { - // Can only get super class for loaded classes (hack for when runtime is - // initializing) - DCHECK(IsLoaded() || !Runtime::Current()->IsStarted()) << IsLoaded(); - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false); - } - - void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // super class is assigned once, except during class linker initialization - Class* old_super_class = GetFieldObject( - OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false); - DCHECK(old_super_class == NULL || old_super_class == new_super_class); - DCHECK(new_super_class != NULL); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class, false); - } - - bool HasSuperClass() const { - return GetSuperClass() != NULL; - } - - static MemberOffset SuperClassOffset() { - return MemberOffset(OFFSETOF_MEMBER(Class, super_class_)); - } - - ClassLoader* GetClassLoader() const; - - void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static MemberOffset DexCacheOffset() { - return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_)); - } - - enum { - kDumpClassFullDetail = 1, - kDumpClassClassLoader = (1 << 1), - kDumpClassInitialized = (1 << 2), - }; - - void DumpClass(std::ostream& os, int flags) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - DexCache* GetDexCache() const; - - void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ObjectArray* GetDirectMethods() const { - DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); - } - - void SetDirectMethods(ObjectArray* new_direct_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(NULL == GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false)); - DCHECK_NE(0, new_direct_methods->GetLength()); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), - new_direct_methods, false); - } - - AbstractMethod* GetDirectMethod(int32_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetDirectMethods()->Get(i); - } - - void SetDirectMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ - ObjectArray* direct_methods = - GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); - direct_methods->Set(i, f); - } - - // Returns the number of static, private, and constructor methods. - size_t NumDirectMethods() const { - return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0; - } - - ObjectArray* GetVirtualMethods() const { - DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); - } - - void SetVirtualMethods(ObjectArray* new_virtual_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // TODO: we reassign virtual methods to grow the table for miranda - // methods.. they should really just be assigned once - DCHECK_NE(0, new_virtual_methods->GetLength()); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), - new_virtual_methods, false); - } - - // Returns the number of non-inherited virtual methods. - size_t NumVirtualMethods() const { - return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0; - } - - AbstractMethod* GetVirtualMethod(uint32_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(IsResolved() || IsErroneous()); - return GetVirtualMethods()->Get(i); - } - - AbstractMethod* GetVirtualMethodDuringLinking(uint32_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(IsLoaded() || IsErroneous()); - return GetVirtualMethods()->Get(i); - } - - void SetVirtualMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray* virtual_methods = - GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); - virtual_methods->Set(i, f); - } - - ObjectArray* GetVTable() const { - DCHECK(IsResolved() || IsErroneous()); - return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false); - } - - ObjectArray* GetVTableDuringLinking() const { - DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false); - } - - void SetVTable(ObjectArray* new_vtable) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable, false); - } - - static MemberOffset VTableOffset() { - return OFFSET_OF_OBJECT_MEMBER(Class, vtable_); - } - - // Given a method implemented by this class but potentially from a - // super class, return the specific implementation - // method for this class. - AbstractMethod* FindVirtualMethodForVirtual(AbstractMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(!method->GetDeclaringClass()->IsInterface()); - // The argument method may from a super class. - // Use the index to a potentially overridden one for this instance's class. - return GetVTable()->Get(method->GetMethodIndex()); - } - - // Given a method implemented by this class' super class, return the specific implementation - // method for this class. - AbstractMethod* FindVirtualMethodForSuper(AbstractMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(!method->GetDeclaringClass()->IsInterface()); - return GetSuperClass()->GetVTable()->Get(method->GetMethodIndex()); - } - - // Given a method implemented by this class, but potentially from a - // super class or interface, return the specific implementation - // method for this class. - AbstractMethod* FindVirtualMethodForInterface(AbstractMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindVirtualMethodForVirtualOrInterface(AbstractMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (method->IsDirect()) { - return method; - } - if (method->GetDeclaringClass()->IsInterface()) { - return FindVirtualMethodForInterface(method); - } - return FindVirtualMethodForVirtual(method); - } - - AbstractMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - AbstractMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - int32_t GetIfTableCount() const { - IfTable* iftable = GetIfTable(); - if (iftable == NULL) { - return 0; - } - return iftable->Count(); - } - - IfTable* GetIfTable() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), false); - } - - void SetIfTable(IfTable* new_iftable) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable, false); - } - - // Get instance fields of the class (See also GetSFields). - ObjectArray* GetIFields() const { - DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); - } - - void SetIFields(ObjectArray* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(NULL == GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false)); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields, false); - } - - size_t NumInstanceFields() const { - return (GetIFields() != NULL) ? GetIFields()->GetLength() : 0; - } - - Field* GetInstanceField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ - DCHECK_NE(NumInstanceFields(), 0U); - return GetIFields()->Get(i); - } - - void SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ - ObjectArray* ifields= GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); - ifields->Set(i, f); - } - - // Returns the number of instance fields containing reference types. - size_t NumReferenceInstanceFields() const { - DCHECK(IsResolved() || IsErroneous()); - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false); - } - - size_t NumReferenceInstanceFieldsDuringLinking() const { - DCHECK(IsLoaded() || IsErroneous()); - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false); - } - - void SetNumReferenceInstanceFields(size_t new_num) { - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num, false); - } - - uint32_t GetReferenceInstanceOffsets() const { - DCHECK(IsResolved() || IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), false); - } - - void SetReferenceInstanceOffsets(uint32_t new_reference_offsets); - - // Beginning of static field data - static MemberOffset FieldsOffset() { - return OFFSET_OF_OBJECT_MEMBER(Class, fields_); - } - - // Returns the number of static fields containing reference types. - size_t NumReferenceStaticFields() const { - DCHECK(IsResolved() || IsErroneous()); - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false); - } - - size_t NumReferenceStaticFieldsDuringLinking() const { - DCHECK(IsLoaded() || IsErroneous()); - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false); - } - - void SetNumReferenceStaticFields(size_t new_num) { - DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num, false); - } - - // Gets the static fields of the class. - ObjectArray* GetSFields() const { - DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); - } - - void SetSFields(ObjectArray* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(NULL == GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false)); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields, false); - } - - size_t NumStaticFields() const { - return (GetSFields() != NULL) ? GetSFields()->GetLength() : 0; - } - - Field* GetStaticField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetSFields()->Get(i); - } - - void SetStaticField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray* sfields= GetFieldObject*>( - OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); - sfields->Set(i, f); - } - - uint32_t GetReferenceStaticOffsets() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_), false); - } - - void SetReferenceStaticOffsets(uint32_t new_reference_offsets); - - // Find a static or instance field using the JLS resolution order - Field* FindField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Finds the given instance field in this class or a superclass. - Field* FindInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Finds the given instance field in this class or a superclass, only searches classes that - // have the same dex cache. - Field* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - Field* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - Field* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Finds the given static field in this class or a superclass. - Field* FindStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Finds the given static field in this class or superclass, only searches classes that - // have the same dex cache. - Field* FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - Field* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - Field* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - pid_t GetClinitThreadId() const { - DCHECK(IsIdxLoaded() || IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), false); - } - - void SetClinitThreadId(pid_t new_clinit_thread_id) { - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id, false); - } - - Class* GetVerifyErrorClass() const { - // DCHECK(IsErroneous()); - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), false); - } - - uint16_t GetDexTypeIndex() const { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), false); - } - - void SetDexTypeIndex(uint16_t type_idx) { - SetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx, false); - } - - static Class* GetJavaLangClass() { - DCHECK(java_lang_Class_ != NULL); - return java_lang_Class_; - } - - // Can't call this SetClass or else gets called instead of Object::SetClass in places. - static void SetClassClass(Class* java_lang_Class); - static void ResetClass(); - - private: - void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(klass != NULL) << PrettyClass(this); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false); - } - - bool Implements(const Class* klass) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsArrayAssignableFromArray(const Class* klass) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsAssignableFromArray(const Class* klass) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // defining class loader, or NULL for the "bootstrap" system loader - ClassLoader* class_loader_; - - // For array classes, the component class object for instanceof/checkcast - // (for String[][][], this will be String[][]). NULL for non-array classes. - Class* component_type_; - - // DexCache of resolved constant pool entries (will be NULL for classes generated by the - // runtime such as arrays and primitive classes). - DexCache* dex_cache_; - - // static, private, and methods - ObjectArray* direct_methods_; - - // instance fields - // - // These describe the layout of the contents of an Object. - // Note that only the fields directly declared by this class are - // listed in ifields; fields declared by a superclass are listed in - // the superclass's Class.ifields. - // - // All instance fields that refer to objects are guaranteed to be at - // the beginning of the field list. num_reference_instance_fields_ - // specifies the number of reference fields. - ObjectArray* ifields_; - - // The interface table (iftable_) contains pairs of a interface class and an array of the - // interface methods. There is one pair per interface supported by this class. That means one - // pair for each interface we support directly, indirectly via superclass, or indirectly via a - // superinterface. This will be null if neither we nor our superclass implement any interfaces. - // - // Why we need this: given "class Foo implements Face", declare "Face faceObj = new Foo()". - // Invoke faceObj.blah(), where "blah" is part of the Face interface. We can't easily use a - // single vtable. - // - // For every interface a concrete class implements, we create an array of the concrete vtable_ - // methods for the methods in the interface. - IfTable* iftable_; - - // descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName - String* name_; - - // Static fields - ObjectArray* sfields_; - - // The superclass, or NULL if this is java.lang.Object, an interface or primitive type. - Class* super_class_; - - // If class verify fails, we must return same error on subsequent tries. - Class* verify_error_class_; - - // virtual methods defined in this class; invoked through vtable - ObjectArray* virtual_methods_; - - // Virtual method table (vtable), for use by "invoke-virtual". The vtable from the superclass is - // copied in, and virtual methods from our class either replace those from the super or are - // appended. For abstract classes, methods may be created in the vtable that aren't in - // virtual_ methods_ for miranda methods. - ObjectArray* vtable_; - - // access flags; low 16 bits are defined by VM spec - uint32_t access_flags_; - - // Total size of the Class instance; used when allocating storage on gc heap. - // See also object_size_. - size_t class_size_; - - // tid used to check for recursive invocation - pid_t clinit_thread_id_; - - // type index from dex file - // TODO: really 16bits - uint32_t dex_type_idx_; - - // number of instance fields that are object refs - size_t num_reference_instance_fields_; - - // number of static fields that are object refs - size_t num_reference_static_fields_; - - // Total object size; used when allocating storage on gc heap. - // (For interfaces and abstract classes this will be zero.) - // See also class_size_. - size_t object_size_; - - // primitive type value, or Primitive::kPrimNot (0); set for generated prim classes - Primitive::Type primitive_type_; - - // Bitmap of offsets of ifields. - uint32_t reference_instance_offsets_; - - // Bitmap of offsets of sfields. - uint32_t reference_static_offsets_; - - // state of class initialization - Status status_; - - // TODO: ? - // initiating class loader list - // NOTE: for classes with low serialNumber, these are unused, and the - // values are kept in a table in gDvm. - // InitiatingLoaderList initiating_loader_list_; - - // Location of first static field. - uint32_t fields_[0]; - - // java.lang.Class - static Class* java_lang_Class_; - - friend struct ClassOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(Class); -}; - -std::ostream& operator<<(std::ostream& os, const Class::Status& rhs); - -inline void Object::SetClass(Class* new_klass) { - // new_klass may be NULL prior to class linker initialization - // We don't mark the card since the class is guaranteed to be referenced from another location. - // Proxy classes are held live by the class loader, and other classes are roots of the class - // linker. - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false, false); -} - -inline bool Object::InstanceOf(const Class* klass) const { - DCHECK(klass != NULL); - DCHECK(GetClass() != NULL); - return klass->IsAssignableFrom(GetClass()); -} - -inline bool Object::IsClass() const { - Class* java_lang_Class = GetClass()->GetClass(); - return GetClass() == java_lang_Class; -} - -inline bool Object::IsObjectArray() const { - return IsArrayInstance() && !GetClass()->GetComponentType()->IsPrimitive(); -} - -template -inline ObjectArray* Object::AsObjectArray() { - DCHECK(IsObjectArray()); - return down_cast*>(this); -} - -template -inline const ObjectArray* Object::AsObjectArray() const { - DCHECK(IsObjectArray()); - return down_cast*>(this); -} - -inline bool Object::IsArrayInstance() const { - return GetClass()->IsArrayClass(); -} - -inline bool Object::IsField() const { - return GetClass()->IsFieldClass(); -} - -inline bool Object::IsMethod() const { - return GetClass()->IsMethodClass(); -} - -inline bool Object::IsReferenceInstance() const { - return GetClass()->IsReferenceClass(); -} - -inline bool Object::IsWeakReferenceInstance() const { - return GetClass()->IsWeakReferenceClass(); -} - -inline bool Object::IsSoftReferenceInstance() const { - return GetClass()->IsSoftReferenceClass(); -} - -inline bool Object::IsFinalizerReferenceInstance() const { - return GetClass()->IsFinalizerReferenceClass(); -} - -inline bool Object::IsPhantomReferenceInstance() const { - return GetClass()->IsPhantomReferenceClass(); -} - -inline size_t Object::SizeOf() const { - size_t result; - if (IsArrayInstance()) { - result = AsArray()->SizeOf(); - } else if (IsClass()) { - result = AsClass()->SizeOf(); - } else { - result = GetClass()->GetObjectSize(); - } - DCHECK(!IsField() || result == sizeof(Field)); - DCHECK(!IsMethod() || result == sizeof(AbstractMethod)); - return result; -} - -inline Class* Field::GetDeclaringClass() const { - Class* result = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), false); - DCHECK(result != NULL); - DCHECK(result->IsLoaded() || result->IsErroneous()); - return result; -} - -inline void Field::SetDeclaringClass(Class *new_declaring_class) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), new_declaring_class, false); -} - -inline Class* AbstractMethod::GetDeclaringClass() const { - Class* result = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, declaring_class_), false); - DCHECK(result != NULL) << this; - DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this; - return result; -} - -inline void AbstractMethod::SetDeclaringClass(Class *new_declaring_class) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, declaring_class_), new_declaring_class, false); -} - -inline size_t Array::SizeOf() const { - // This is safe from overflow because the array was already allocated, so we know it's sane. - size_t component_size = GetClass()->GetComponentSize(); - int32_t component_count = GetLength(); - size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); - size_t data_size = component_count * component_size; - return header_size + data_size; -} - -template -void ObjectArray::Set(int32_t i, T* object) { - if (LIKELY(IsValidIndex(i))) { - if (object != NULL) { - Class* element_class = GetClass()->GetComponentType(); - if (UNLIKELY(!object->InstanceOf(element_class))) { - ThrowArrayStoreException(object); - return; - } - } - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - SetFieldObject(data_offset, object, false); - } -} - -template -void ObjectArray::SetWithoutChecks(int32_t i, T* object) { - DCHECK(IsValidIndex(i)); - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - SetFieldObject(data_offset, object, false); -} - -template -void ObjectArray::SetPtrWithoutChecks(int32_t i, T* object) { - DCHECK(IsValidIndex(i)); - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - SetFieldPtr(data_offset, object, false); -} - -template -T* ObjectArray::GetWithoutChecks(int32_t i) const { - DCHECK(IsValidIndex(i)); - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - return GetFieldObject(data_offset, false); -} - -template -void ObjectArray::Copy(const ObjectArray* src, int src_pos, - ObjectArray* dst, int dst_pos, - size_t length) { - if (src->IsValidIndex(src_pos) && - src->IsValidIndex(src_pos+length-1) && - dst->IsValidIndex(dst_pos) && - dst->IsValidIndex(dst_pos+length-1)) { - MemberOffset src_offset(DataOffset(sizeof(Object*)).Int32Value() + src_pos * sizeof(Object*)); - MemberOffset dst_offset(DataOffset(sizeof(Object*)).Int32Value() + dst_pos * sizeof(Object*)); - Class* array_class = dst->GetClass(); - Heap* heap = Runtime::Current()->GetHeap(); - if (array_class == src->GetClass()) { - // No need for array store checks if arrays are of the same type - for (size_t i = 0; i < length; i++) { - Object* object = src->GetFieldObject(src_offset, false); - heap->VerifyObject(object); - // directly set field, we do a bulk write barrier at the end - dst->SetField32(dst_offset, reinterpret_cast(object), false, true); - src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*)); - dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*)); - } - } else { - Class* element_class = array_class->GetComponentType(); - CHECK(!element_class->IsPrimitive()); - for (size_t i = 0; i < length; i++) { - Object* object = src->GetFieldObject(src_offset, false); - if (object != NULL && !object->InstanceOf(element_class)) { - dst->ThrowArrayStoreException(object); - return; - } - heap->VerifyObject(object); - // directly set field, we do a bulk write barrier at the end - dst->SetField32(dst_offset, reinterpret_cast(object), false, true); - src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*)); - dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*)); - } - } - heap->WriteBarrierArray(dst, dst_pos, length); - } -} - -inline void IfTable::SetInterface(int32_t i, Class* interface) { - DCHECK(interface != NULL); - DCHECK(interface->IsInterface()); - DCHECK(Get((i * kMax) + kInterface) == NULL); - Set((i * kMax) + kInterface, interface); -} - -class MANAGED ClassClass : public Class { - private: - int32_t padding_; - int64_t serialVersionUID_; - friend struct ClassClassOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(ClassClass); -}; - -class MANAGED StringClass : public Class { - private: - CharArray* ASCII_; - Object* CASE_INSENSITIVE_ORDER_; - uint32_t REPLACEMENT_CHAR_; - int64_t serialVersionUID_; - friend struct StringClassOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(StringClass); -}; - -class MANAGED FieldClass : public Class { - private: - Object* ORDER_BY_NAME_AND_DECLARING_CLASS_; - friend struct FieldClassOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(FieldClass); -}; - -class MANAGED MethodClass : public Class { - private: - Object* ORDER_BY_SIGNATURE_; - friend struct MethodClassOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(MethodClass); -}; - -template -class MANAGED PrimitiveArray : public Array { - public: - typedef T ElementType; - - static PrimitiveArray* Alloc(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - const T* GetData() const { - intptr_t data = reinterpret_cast(this) + DataOffset(sizeof(T)).Int32Value(); - return reinterpret_cast(data); - } - - T* GetData() { - intptr_t data = reinterpret_cast(this) + DataOffset(sizeof(T)).Int32Value(); - return reinterpret_cast(data); - } - - T Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (!IsValidIndex(i)) { - return T(0); - } - return GetData()[i]; - } - - void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsValidIndex(i)) { - GetData()[i] = value; - } - } - - static void SetArrayClass(Class* array_class) { - CHECK(array_class_ == NULL); - CHECK(array_class != NULL); - array_class_ = array_class; - } - - static void ResetArrayClass() { - CHECK(array_class_ != NULL); - array_class_ = NULL; - } - - private: - static Class* array_class_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray); -}; - -// C++ mirror of java.lang.String -class MANAGED String : public Object { - public: - static MemberOffset CountOffset() { - return OFFSET_OF_OBJECT_MEMBER(String, count_); - } - - static MemberOffset ValueOffset() { - return OFFSET_OF_OBJECT_MEMBER(String, array_); - } - - static MemberOffset OffsetOffset() { - return OFFSET_OF_OBJECT_MEMBER(String, offset_); - } - - const CharArray* GetCharArray() const { - return GetFieldObject(ValueOffset(), false); - } - - int32_t GetOffset() const { - int32_t result = GetField32(OffsetOffset(), false); - DCHECK_LE(0, result); - return result; - } - - int32_t GetLength() const; - - int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength())); - } - - int32_t GetUtfLength() const { - return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength()); - } - - uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static String* AllocFromUtf16(Thread* self, - int32_t utf16_length, - const uint16_t* utf16_data_in, - int32_t hash_code = 0) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static String* AllocFromModifiedUtf8(Thread* self, const char* utf) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, - const char* utf8_data_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static String* Alloc(Thread* self, Class* java_lang_String, int32_t utf16_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static String* Alloc(Thread* self, Class* java_lang_String, CharArray* array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool Equals(const char* modified_utf8) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // TODO: do we need this overload? give it a more intention-revealing name. - bool Equals(const StringPiece& modified_utf8) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Compare UTF-16 code point values not in a locale-sensitive manner - int Compare(int32_t utf16_length, const char* utf8_data_in); - - // TODO: do we need this overload? give it a more intention-revealing name. - bool Equals(const uint16_t* that_chars, int32_t that_offset, - int32_t that_length) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Create a modified UTF-8 encoded std::string from a java/lang/String object. - std::string ToModifiedUtf8() const; - - int32_t FastIndexOf(int32_t ch, int32_t start) { - int32_t count = GetLength(); - if (start < 0) { - start = 0; - } else if (start > count) { - start = count; - } - const uint16_t* chars = GetCharArray()->GetData() + GetOffset(); - const uint16_t* p = chars + start; - const uint16_t* end = chars + count; - while (p < end) { - if (*p++ == ch) { - return (p - 1) - chars; - } - } - return -1; - } - - int32_t CompareTo(String* other) const; - - static Class* GetJavaLangString() { - DCHECK(java_lang_String_ != NULL); - return java_lang_String_; - } - - static void SetClass(Class* java_lang_String); - static void ResetClass(); - - private: - void SetHashCode(int32_t new_hash_code) { - DCHECK_EQ(0u, - GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false)); - SetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), - new_hash_code, false); - } - - void SetCount(int32_t new_count) { - DCHECK_LE(0, new_count); - SetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count, false); - } - - void SetOffset(int32_t new_offset) { - DCHECK_LE(0, new_offset); - DCHECK_GE(GetLength(), new_offset); - SetField32(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset, false); - } - - void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(new_array != NULL); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array, false); - } - - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - CharArray* array_; - - int32_t count_; - - uint32_t hash_code_; - - int32_t offset_; - - static Class* java_lang_String_; - - friend struct StringOffsets; // for verifying offset information - FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount - DISALLOW_IMPLICIT_CONSTRUCTORS(String); -}; - -// TODO: remove? only used in a unit test of itself. -struct StringHashCode { - int32_t operator()(String* string) const { - return string->GetHashCode(); - } -}; - -inline uint32_t Field::GetAccessFlags() const { - DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), false); -} - -inline MemberOffset Field::GetOffset() const { - DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous()); - return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), false)); -} - -inline MemberOffset Field::GetOffsetDuringLinking() const { - DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); - return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), false)); -} - -inline uint32_t Class::GetAccessFlags() const { - // Check class is loaded or this is java.lang.String that has a - // circularity issue during loading the names of its members - DCHECK(IsLoaded() || IsErroneous() || - this == String::GetJavaLangString() || - this == Field::GetJavaLangReflectField() || - this == AbstractMethod::GetConstructorClass() || - this == AbstractMethod::GetMethodClass()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false); -} - -inline uint32_t AbstractMethod::GetAccessFlags() const { - DCHECK(GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, access_flags_), false); -} - -inline uint16_t AbstractMethod::GetMethodIndex() const { - DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_index_), false); -} - -inline uint32_t AbstractMethod::GetDexMethodIndex() const { - DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, method_dex_index_), false); -} - -inline bool AbstractMethod::CheckIncompatibleClassChange(InvokeType type) { - switch (type) { - case kStatic: - return !IsStatic(); - case kDirect: - return !IsDirect() || IsStatic(); - case kVirtual: { - Class* methods_class = GetDeclaringClass(); - return IsDirect() || (methods_class->IsInterface() && !IsMiranda()); - } - case kSuper: - return false; // TODO: appropriate checks for call to super class. - case kInterface: { - Class* methods_class = GetDeclaringClass(); - return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass()); - } - default: - LOG(FATAL) << "Unreachable - invocation type: " << type; - return true; - } -} - -inline void AbstractMethod::AssertPcIsWithinCode(uintptr_t pc) const { - if (!kIsDebugBuild) { - return; - } - if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) { - return; - } - Runtime* runtime = Runtime::Current(); - if (GetCode() == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { - return; - } - DCHECK(IsWithinCode(pc)) - << PrettyMethod(this) - << " pc=" << std::hex << pc - << " code=" << GetCode() - << " size=" << GetCodeSize(); -} - -inline String* Class::GetName() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), false); -} -inline void Class::SetName(String* name) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false); -} - -// C++ mirror of java.lang.Throwable -class MANAGED Throwable : public Object { - public: - void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message, false); - } - String* GetDetailMessage() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false); - } - std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // This is a runtime version of initCause, you shouldn't use it if initCause may have been - // overridden. Also it asserts rather than throwing exceptions. Currently this is only used - // in cases like the verifier where the checks cannot fail and initCause isn't overridden. - void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsCheckedException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static Class* GetJavaLangThrowable() { - DCHECK(java_lang_Throwable_ != NULL); - return java_lang_Throwable_; - } - - static void SetClass(Class* java_lang_Throwable); - static void ResetClass(); - - private: - Object* GetStackState() const { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), true); - } - - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - Throwable* cause_; - String* detail_message_; - Object* stack_state_; // Note this is Java volatile: - Object* stack_trace_; - Object* suppressed_exceptions_; - - static Class* java_lang_Throwable_; - - friend struct ThrowableOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(Throwable); -}; - -// C++ mirror of java.lang.StackTraceElement -class MANAGED StackTraceElement : public Object { - public: - const String* GetDeclaringClass() const { - return GetFieldObject( - OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_), false); - } - - const String* GetMethodName() const { - return GetFieldObject( - OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_), false); - } - - const String* GetFileName() const { - return GetFieldObject( - OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_), false); - } - - int32_t GetLineNumber() const { - return GetField32( - OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), false); - } - - static StackTraceElement* Alloc(Thread* self, - String* declaring_class, - String* method_name, - String* file_name, - int32_t line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static void SetClass(Class* java_lang_StackTraceElement); - - static void ResetClass(); - - private: - // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". - String* declaring_class_; - String* file_name_; - String* method_name_; - int32_t line_number_; - - static Class* GetStackTraceElement() { - DCHECK(java_lang_StackTraceElement_ != NULL); - return java_lang_StackTraceElement_; - } - - static Class* java_lang_StackTraceElement_; - - friend struct StackTraceElementOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(StackTraceElement); -}; - -class MANAGED SynthesizedProxyClass : public Class { - public: - ObjectArray* GetInterfaces() { - return interfaces_; - } - - ObjectArray >* GetThrows() { - return throws_; - } - - private: - ObjectArray* interfaces_; - ObjectArray >* throws_; - DISALLOW_IMPLICIT_CONSTRUCTORS(SynthesizedProxyClass); -}; - -class MANAGED Proxy : public Object { - private: - Object* h_; - - friend struct ProxyOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy); -}; - -} // namespace art - -#endif // ART_SRC_OBJECT_H_ diff --git a/src/object_test.cc b/src/object_test.cc deleted file mode 100644 index f3b6a19739..0000000000 --- a/src/object_test.cc +++ /dev/null @@ -1,629 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "object.h" - -#include -#include - -#include "asm_support.h" -#include "class_linker.h" -#include "common_test.h" -#include "dex_file.h" -#include "heap.h" -#include "runtime_support.h" -#include "sirt_ref.h" -#include "UniquePtr.h" - -namespace art { - -class ObjectTest : public CommonTest { - protected: - void AssertString(int32_t length, - const char* utf8_in, - const char* utf16_expected_le, - int32_t expected_hash) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - UniquePtr utf16_expected(new uint16_t[length]); - for (int32_t i = 0; i < length; i++) { - uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | - ((utf16_expected_le[i*2 + 1] & 0xff) << 0)); - utf16_expected[i] = ch; - } - - Thread* self = Thread::Current(); - SirtRef string(self, String::AllocFromModifiedUtf8(self, length, utf8_in)); - ASSERT_EQ(length, string->GetLength()); - ASSERT_TRUE(string->GetCharArray() != NULL); - ASSERT_TRUE(string->GetCharArray()->GetData() != NULL); - // strlen is necessary because the 1-character string "\0" is interpreted as "" - ASSERT_TRUE(string->Equals(utf8_in) || length != static_cast(strlen(utf8_in))); - for (int32_t i = 0; i < length; i++) { - EXPECT_EQ(utf16_expected[i], string->CharAt(i)); - } - EXPECT_EQ(expected_hash, string->GetHashCode()); - } -}; - -// Keep the assembly code in sync -TEST_F(ObjectTest, AsmConstants) { - ASSERT_EQ(STRING_VALUE_OFFSET, String::ValueOffset().Int32Value()); - ASSERT_EQ(STRING_COUNT_OFFSET, String::CountOffset().Int32Value()); - ASSERT_EQ(STRING_OFFSET_OFFSET, String::OffsetOffset().Int32Value()); - ASSERT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value()); -} - -TEST_F(ObjectTest, IsInSamePackage) { - // Matches - EXPECT_TRUE(Class::IsInSamePackage("Ljava/lang/Object;", "Ljava/lang/Class;")); - EXPECT_TRUE(Class::IsInSamePackage("LFoo;", "LBar;")); - - // Mismatches - EXPECT_FALSE(Class::IsInSamePackage("Ljava/lang/Object;", "Ljava/io/File;")); - EXPECT_FALSE(Class::IsInSamePackage("Ljava/lang/Object;", "Ljava/lang/reflect/Method;")); -} - -TEST_F(ObjectTest, Clone) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef > a1(soa.Self(), - class_linker_->AllocObjectArray(soa.Self(), 256)); - size_t s1 = a1->SizeOf(); - Object* clone = a1->Clone(soa.Self()); - EXPECT_EQ(s1, clone->SizeOf()); - EXPECT_TRUE(clone->GetClass() == a1->GetClass()); -} - -TEST_F(ObjectTest, AllocObjectArray) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef > oa(soa.Self(), - class_linker_->AllocObjectArray(soa.Self(), 2)); - EXPECT_EQ(2, oa->GetLength()); - EXPECT_TRUE(oa->Get(0) == NULL); - EXPECT_TRUE(oa->Get(1) == NULL); - oa->Set(0, oa.get()); - EXPECT_TRUE(oa->Get(0) == oa.get()); - EXPECT_TRUE(oa->Get(1) == NULL); - oa->Set(1, oa.get()); - EXPECT_TRUE(oa->Get(0) == oa.get()); - EXPECT_TRUE(oa->Get(1) == oa.get()); - - Class* aioobe = class_linker_->FindSystemClass("Ljava/lang/ArrayIndexOutOfBoundsException;"); - - EXPECT_TRUE(oa->Get(-1) == NULL); - EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); - soa.Self()->ClearException(); - - EXPECT_TRUE(oa->Get(2) == NULL); - EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); - soa.Self()->ClearException(); - - ASSERT_TRUE(oa->GetClass() != NULL); - ClassHelper oa_ch(oa->GetClass()); - ASSERT_EQ(2U, oa_ch.NumDirectInterfaces()); - EXPECT_EQ(class_linker_->FindSystemClass("Ljava/lang/Cloneable;"), oa_ch.GetDirectInterface(0)); - EXPECT_EQ(class_linker_->FindSystemClass("Ljava/io/Serializable;"), oa_ch.GetDirectInterface(1)); -} - -TEST_F(ObjectTest, AllocArray) { - ScopedObjectAccess soa(Thread::Current()); - Class* c = class_linker_->FindSystemClass("[I"); - SirtRef a(soa.Self(), Array::Alloc(soa.Self(), c, 1)); - ASSERT_TRUE(c == a->GetClass()); - - c = class_linker_->FindSystemClass("[Ljava/lang/Object;"); - a.reset(Array::Alloc(soa.Self(), c, 1)); - ASSERT_TRUE(c == a->GetClass()); - - c = class_linker_->FindSystemClass("[[Ljava/lang/Object;"); - a.reset(Array::Alloc(soa.Self(), c, 1)); - ASSERT_TRUE(c == a->GetClass()); -} - -template -void TestPrimitiveArray(ClassLinker* cl) { - ScopedObjectAccess soa(Thread::Current()); - typedef typename ArrayT::ElementType T; - - ArrayT* a = ArrayT::Alloc(soa.Self(), 2); - EXPECT_EQ(2, a->GetLength()); - EXPECT_EQ(0, a->Get(0)); - EXPECT_EQ(0, a->Get(1)); - a->Set(0, T(123)); - EXPECT_EQ(T(123), a->Get(0)); - EXPECT_EQ(0, a->Get(1)); - a->Set(1, T(321)); - EXPECT_EQ(T(123), a->Get(0)); - EXPECT_EQ(T(321), a->Get(1)); - - Class* aioobe = cl->FindSystemClass("Ljava/lang/ArrayIndexOutOfBoundsException;"); - - EXPECT_EQ(0, a->Get(-1)); - EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); - soa.Self()->ClearException(); - - EXPECT_EQ(0, a->Get(2)); - EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); - soa.Self()->ClearException(); -} - -TEST_F(ObjectTest, PrimitiveArray_Boolean_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Byte_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Char_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Int_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Long_Alloc) { - TestPrimitiveArray(class_linker_); -} -TEST_F(ObjectTest, PrimitiveArray_Short_Alloc) { - TestPrimitiveArray(class_linker_); -} - -TEST_F(ObjectTest, CheckAndAllocArrayFromCode) { - // pretend we are trying to call 'new char[3]' from String.toCharArray - ScopedObjectAccess soa(Thread::Current()); - Class* java_util_Arrays = class_linker_->FindSystemClass("Ljava/util/Arrays;"); - AbstractMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V"); - const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I"); - ASSERT_TRUE(string_id != NULL); - const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId( - java_lang_dex_file_->GetIndexForStringId(*string_id)); - ASSERT_TRUE(type_id != NULL); - uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id); - Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false); - EXPECT_TRUE(array->IsArrayInstance()); - EXPECT_EQ(3, array->AsArray()->GetLength()); - EXPECT_TRUE(array->GetClass()->IsArrayClass()); - EXPECT_TRUE(array->GetClass()->GetComponentType()->IsPrimitive()); -} - -TEST_F(ObjectTest, CreateMultiArray) { - ScopedObjectAccess soa(Thread::Current()); - - SirtRef c(soa.Self(), class_linker_->FindSystemClass("I")); - SirtRef dims(soa.Self(), IntArray::Alloc(soa.Self(), 1)); - dims->Set(0, 1); - Array* multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); - EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass("[I")); - EXPECT_EQ(1, multi->GetLength()); - - dims->Set(0, -1); - multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); - EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException()->GetClass()), - "java.lang.NegativeArraySizeException"); - soa.Self()->ClearException(); - - dims.reset(IntArray::Alloc(soa.Self(), 2)); - for (int i = 1; i < 20; ++i) { - for (int j = 0; j < 20; ++j) { - dims->Set(0, i); - dims->Set(1, j); - multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); - EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass("[[I")); - EXPECT_EQ(i, multi->GetLength()); - for (int k = 0; k < i; ++k) { - Array* outer = multi->AsObjectArray()->Get(k); - EXPECT_TRUE(outer->GetClass() == class_linker_->FindSystemClass("[I")); - EXPECT_EQ(j, outer->GetLength()); - } - } - } -} - -TEST_F(ObjectTest, StaticFieldFromCode) { - // pretend we are trying to access 'Static.s0' from StaticsFromCode. - ScopedObjectAccess soa(Thread::Current()); - jobject class_loader = LoadDex("StaticsFromCode"); - const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0]; - CHECK(dex_file != NULL); - - Class* klass = - class_linker_->FindClass("LStaticsFromCode;", soa.Decode(class_loader)); - AbstractMethod* clinit = klass->FindDirectMethod("", "()V"); - const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;"); - ASSERT_TRUE(klass_string_id != NULL); - const DexFile::TypeId* klass_type_id = dex_file->FindTypeId( - dex_file->GetIndexForStringId(*klass_string_id)); - ASSERT_TRUE(klass_type_id != NULL); - - const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;"); - ASSERT_TRUE(type_string_id != NULL); - const DexFile::TypeId* type_type_id = dex_file->FindTypeId( - dex_file->GetIndexForStringId(*type_string_id)); - ASSERT_TRUE(type_type_id != NULL); - - const DexFile::StringId* name_str_id = dex_file->FindStringId("s0"); - ASSERT_TRUE(name_str_id != NULL); - - const DexFile::FieldId* field_id = dex_file->FindFieldId( - *klass_type_id, *name_str_id, *type_type_id); - ASSERT_TRUE(field_id != NULL); - uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id); - - Field* field = FindFieldFromCode(field_idx, clinit, Thread::Current(), StaticObjectRead, - sizeof(Object*)); - Object* s0 = field->GetObj(klass); - EXPECT_TRUE(s0 != NULL); - - SirtRef char_array(soa.Self(), CharArray::Alloc(soa.Self(), 0)); - field->SetObj(field->GetDeclaringClass(), char_array.get()); - EXPECT_EQ(char_array.get(), field->GetObj(klass)); - - field->SetObj(field->GetDeclaringClass(), NULL); - EXPECT_EQ(NULL, field->GetObj(klass)); - - // TODO: more exhaustive tests of all 6 cases of Field::*FromCode -} - -TEST_F(ObjectTest, String) { - ScopedObjectAccess soa(Thread::Current()); - // Test the empty string. - AssertString(0, "", "", 0); - - // Test one-byte characters. - AssertString(1, " ", "\x00\x20", 0x20); - AssertString(1, "", "\x00\x00", 0); - AssertString(1, "\x7f", "\x00\x7f", 0x7f); - AssertString(2, "hi", "\x00\x68\x00\x69", (31 * 0x68) + 0x69); - - // Test two-byte characters. - AssertString(1, "\xc2\x80", "\x00\x80", 0x80); - AssertString(1, "\xd9\xa6", "\x06\x66", 0x0666); - AssertString(1, "\xdf\xbf", "\x07\xff", 0x07ff); - AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69", (31 * ((31 * 0x68) + 0x0666)) + 0x69); - - // Test three-byte characters. - AssertString(1, "\xe0\xa0\x80", "\x08\x00", 0x0800); - AssertString(1, "\xe1\x88\xb4", "\x12\x34", 0x1234); - AssertString(1, "\xef\xbf\xbf", "\xff\xff", 0xffff); - AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69", (31 * ((31 * 0x68) + 0x1234)) + 0x69); -} - -TEST_F(ObjectTest, StringEqualsUtf8) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); - EXPECT_TRUE(string->Equals("android")); - EXPECT_FALSE(string->Equals("Android")); - EXPECT_FALSE(string->Equals("ANDROID")); - EXPECT_FALSE(string->Equals("")); - EXPECT_FALSE(string->Equals("and")); - EXPECT_FALSE(string->Equals("androids")); - - SirtRef empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); - EXPECT_TRUE(empty->Equals("")); - EXPECT_FALSE(empty->Equals("a")); -} - -TEST_F(ObjectTest, StringEquals) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); - SirtRef string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); - EXPECT_TRUE(string->Equals(string_2.get())); - EXPECT_FALSE(string->Equals("Android")); - EXPECT_FALSE(string->Equals("ANDROID")); - EXPECT_FALSE(string->Equals("")); - EXPECT_FALSE(string->Equals("and")); - EXPECT_FALSE(string->Equals("androids")); - - SirtRef empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); - EXPECT_TRUE(empty->Equals("")); - EXPECT_FALSE(empty->Equals("a")); -} - -TEST_F(ObjectTest, StringCompareTo) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); - SirtRef string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); - SirtRef string_3(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "Android")); - SirtRef string_4(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "and")); - SirtRef string_5(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); - EXPECT_EQ(0, string->CompareTo(string_2.get())); - EXPECT_LT(0, string->CompareTo(string_3.get())); - EXPECT_GT(0, string_3->CompareTo(string.get())); - EXPECT_LT(0, string->CompareTo(string_4.get())); - EXPECT_GT(0, string_4->CompareTo(string.get())); - EXPECT_LT(0, string->CompareTo(string_5.get())); - EXPECT_GT(0, string_5->CompareTo(string.get())); -} - -TEST_F(ObjectTest, StringLength) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android")); - EXPECT_EQ(string->GetLength(), 7); - EXPECT_EQ(string->GetUtfLength(), 7); - - string->SetOffset(2); - string->SetCount(5); - EXPECT_TRUE(string->Equals("droid")); - EXPECT_EQ(string->GetLength(), 5); - EXPECT_EQ(string->GetUtfLength(), 5); -} - -TEST_F(ObjectTest, DescriptorCompare) { - ScopedObjectAccess soa(Thread::Current()); - ClassLinker* linker = class_linker_; - - jobject jclass_loader_1 = LoadDex("ProtoCompare"); - jobject jclass_loader_2 = LoadDex("ProtoCompare2"); - SirtRef class_loader_1(soa.Self(), soa.Decode(jclass_loader_1)); - SirtRef class_loader_2(soa.Self(), soa.Decode(jclass_loader_2)); - - Class* klass1 = linker->FindClass("LProtoCompare;", class_loader_1.get()); - ASSERT_TRUE(klass1 != NULL); - Class* klass2 = linker->FindClass("LProtoCompare2;", class_loader_2.get()); - ASSERT_TRUE(klass2 != NULL); - - AbstractMethod* m1_1 = klass1->GetVirtualMethod(0); - MethodHelper mh(m1_1); - EXPECT_STREQ(mh.GetName(), "m1"); - AbstractMethod* m2_1 = klass1->GetVirtualMethod(1); - mh.ChangeMethod(m2_1); - EXPECT_STREQ(mh.GetName(), "m2"); - AbstractMethod* m3_1 = klass1->GetVirtualMethod(2); - mh.ChangeMethod(m3_1); - EXPECT_STREQ(mh.GetName(), "m3"); - AbstractMethod* m4_1 = klass1->GetVirtualMethod(3); - mh.ChangeMethod(m4_1); - EXPECT_STREQ(mh.GetName(), "m4"); - - AbstractMethod* m1_2 = klass2->GetVirtualMethod(0); - mh.ChangeMethod(m1_2); - EXPECT_STREQ(mh.GetName(), "m1"); - AbstractMethod* m2_2 = klass2->GetVirtualMethod(1); - mh.ChangeMethod(m2_2); - EXPECT_STREQ(mh.GetName(), "m2"); - AbstractMethod* m3_2 = klass2->GetVirtualMethod(2); - mh.ChangeMethod(m3_2); - EXPECT_STREQ(mh.GetName(), "m3"); - AbstractMethod* m4_2 = klass2->GetVirtualMethod(3); - mh.ChangeMethod(m4_2); - EXPECT_STREQ(mh.GetName(), "m4"); - - mh.ChangeMethod(m1_1); - MethodHelper mh2(m1_2); - EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); - EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); - - mh.ChangeMethod(m2_1); - mh2.ChangeMethod(m2_2); - EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); - EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); - - mh.ChangeMethod(m3_1); - mh2.ChangeMethod(m3_2); - EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); - EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); - - mh.ChangeMethod(m4_1); - mh2.ChangeMethod(m4_2); - EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2)); - EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh)); -} - - -TEST_F(ObjectTest, StringHashCode) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); - SirtRef A(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "A")); - SirtRef ABC(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC")); - - EXPECT_EQ(0, empty->GetHashCode()); - EXPECT_EQ(65, A->GetHashCode()); - EXPECT_EQ(64578, ABC->GetHashCode()); -} - -TEST_F(ObjectTest, InstanceOf) { - ScopedObjectAccess soa(Thread::Current()); - jobject jclass_loader = LoadDex("XandY"); - SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); - - Class* X = class_linker_->FindClass("LX;", class_loader.get()); - Class* Y = class_linker_->FindClass("LY;", class_loader.get()); - ASSERT_TRUE(X != NULL); - ASSERT_TRUE(Y != NULL); - - SirtRef x(soa.Self(), X->AllocObject(soa.Self())); - SirtRef y(soa.Self(), Y->AllocObject(soa.Self())); - ASSERT_TRUE(x.get() != NULL); - ASSERT_TRUE(y.get() != NULL); - - EXPECT_TRUE(x->InstanceOf(X)); - EXPECT_FALSE(x->InstanceOf(Y)); - EXPECT_TRUE(y->InstanceOf(X)); - EXPECT_TRUE(y->InstanceOf(Y)); - - Class* java_lang_Class = class_linker_->FindSystemClass("Ljava/lang/Class;"); - Class* Object_array_class = class_linker_->FindSystemClass("[Ljava/lang/Object;"); - - EXPECT_FALSE(java_lang_Class->InstanceOf(Object_array_class)); - EXPECT_TRUE(Object_array_class->InstanceOf(java_lang_Class)); - - // All array classes implement Cloneable and Serializable. - Object* array = ObjectArray::Alloc(soa.Self(), Object_array_class, 1); - Class* java_lang_Cloneable = class_linker_->FindSystemClass("Ljava/lang/Cloneable;"); - Class* java_io_Serializable = class_linker_->FindSystemClass("Ljava/io/Serializable;"); - EXPECT_TRUE(array->InstanceOf(java_lang_Cloneable)); - EXPECT_TRUE(array->InstanceOf(java_io_Serializable)); -} - -TEST_F(ObjectTest, IsAssignableFrom) { - ScopedObjectAccess soa(Thread::Current()); - jobject jclass_loader = LoadDex("XandY"); - SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); - Class* X = class_linker_->FindClass("LX;", class_loader.get()); - Class* Y = class_linker_->FindClass("LY;", class_loader.get()); - - EXPECT_TRUE(X->IsAssignableFrom(X)); - EXPECT_TRUE(X->IsAssignableFrom(Y)); - EXPECT_FALSE(Y->IsAssignableFrom(X)); - EXPECT_TRUE(Y->IsAssignableFrom(Y)); - - // class final String implements CharSequence, .. - Class* string = class_linker_->FindSystemClass("Ljava/lang/String;"); - Class* charseq = class_linker_->FindSystemClass("Ljava/lang/CharSequence;"); - // Can String be assigned to CharSequence without a cast? - EXPECT_TRUE(charseq->IsAssignableFrom(string)); - // Can CharSequence be assigned to String without a cast? - EXPECT_FALSE(string->IsAssignableFrom(charseq)); - - // Primitive types are only assignable to themselves - const char* prims = "ZBCSIJFD"; - Class* prim_types[strlen(prims)]; - for (size_t i = 0; i < strlen(prims); i++) { - prim_types[i] = class_linker_->FindPrimitiveClass(prims[i]); - } - for (size_t i = 0; i < strlen(prims); i++) { - for (size_t j = 0; i < strlen(prims); i++) { - if (i == j) { - EXPECT_TRUE(prim_types[i]->IsAssignableFrom(prim_types[j])); - } else { - EXPECT_FALSE(prim_types[i]->IsAssignableFrom(prim_types[j])); - } - } - } -} - -TEST_F(ObjectTest, IsAssignableFromArray) { - ScopedObjectAccess soa(Thread::Current()); - jobject jclass_loader = LoadDex("XandY"); - SirtRef class_loader(soa.Self(), soa.Decode(jclass_loader)); - Class* X = class_linker_->FindClass("LX;", class_loader.get()); - Class* Y = class_linker_->FindClass("LY;", class_loader.get()); - ASSERT_TRUE(X != NULL); - ASSERT_TRUE(Y != NULL); - - Class* YA = class_linker_->FindClass("[LY;", class_loader.get()); - Class* YAA = class_linker_->FindClass("[[LY;", class_loader.get()); - ASSERT_TRUE(YA != NULL); - ASSERT_TRUE(YAA != NULL); - - Class* XAA = class_linker_->FindClass("[[LX;", class_loader.get()); - ASSERT_TRUE(XAA != NULL); - - Class* O = class_linker_->FindSystemClass("Ljava/lang/Object;"); - Class* OA = class_linker_->FindSystemClass("[Ljava/lang/Object;"); - Class* OAA = class_linker_->FindSystemClass("[[Ljava/lang/Object;"); - Class* OAAA = class_linker_->FindSystemClass("[[[Ljava/lang/Object;"); - ASSERT_TRUE(O != NULL); - ASSERT_TRUE(OA != NULL); - ASSERT_TRUE(OAA != NULL); - ASSERT_TRUE(OAAA != NULL); - - Class* S = class_linker_->FindSystemClass("Ljava/io/Serializable;"); - Class* SA = class_linker_->FindSystemClass("[Ljava/io/Serializable;"); - Class* SAA = class_linker_->FindSystemClass("[[Ljava/io/Serializable;"); - ASSERT_TRUE(S != NULL); - ASSERT_TRUE(SA != NULL); - ASSERT_TRUE(SAA != NULL); - - Class* IA = class_linker_->FindSystemClass("[I"); - ASSERT_TRUE(IA != NULL); - - EXPECT_TRUE(YAA->IsAssignableFrom(YAA)); // identity - EXPECT_TRUE(XAA->IsAssignableFrom(YAA)); // element superclass - EXPECT_FALSE(YAA->IsAssignableFrom(XAA)); - EXPECT_FALSE(Y->IsAssignableFrom(YAA)); - EXPECT_FALSE(YA->IsAssignableFrom(YAA)); - EXPECT_TRUE(O->IsAssignableFrom(YAA)); // everything is an Object - EXPECT_TRUE(OA->IsAssignableFrom(YAA)); - EXPECT_TRUE(OAA->IsAssignableFrom(YAA)); - EXPECT_TRUE(S->IsAssignableFrom(YAA)); // all arrays are Serializable - EXPECT_TRUE(SA->IsAssignableFrom(YAA)); - EXPECT_FALSE(SAA->IsAssignableFrom(YAA)); // unless Y was Serializable - - EXPECT_FALSE(IA->IsAssignableFrom(OA)); - EXPECT_FALSE(OA->IsAssignableFrom(IA)); - EXPECT_TRUE(O->IsAssignableFrom(IA)); -} - -TEST_F(ObjectTest, FindInstanceField) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC")); - ASSERT_TRUE(s.get() != NULL); - Class* c = s->GetClass(); - ASSERT_TRUE(c != NULL); - - // Wrong type. - EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == NULL); - EXPECT_TRUE(c->FindInstanceField("count", "J") == NULL); - - // Wrong name. - EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == NULL); - EXPECT_TRUE(c->FindInstanceField("Count", "I") == NULL); - - // Right name and type. - Field* f1 = c->FindDeclaredInstanceField("count", "I"); - Field* f2 = c->FindInstanceField("count", "I"); - EXPECT_TRUE(f1 != NULL); - EXPECT_TRUE(f2 != NULL); - EXPECT_EQ(f1, f2); - - // TODO: check that s.count == 3. - - // Ensure that we handle superclass fields correctly... - c = class_linker_->FindSystemClass("Ljava/lang/StringBuilder;"); - ASSERT_TRUE(c != NULL); - // No StringBuilder.count... - EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == NULL); - // ...but there is an AbstractStringBuilder.count. - EXPECT_TRUE(c->FindInstanceField("count", "I") != NULL); -} - -TEST_F(ObjectTest, FindStaticField) { - ScopedObjectAccess soa(Thread::Current()); - SirtRef s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC")); - ASSERT_TRUE(s.get() != NULL); - Class* c = s->GetClass(); - ASSERT_TRUE(c != NULL); - - // Wrong type. - EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL); - EXPECT_TRUE(c->FindStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL); - - // Wrong name. - EXPECT_TRUE(c->FindDeclaredStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL); - EXPECT_TRUE(c->FindStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL); - - // Right name and type. - Field* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;"); - Field* f2 = c->FindStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;"); - EXPECT_TRUE(f1 != NULL); - EXPECT_TRUE(f2 != NULL); - EXPECT_EQ(f1, f2); - - // TODO: test static fields via superclasses. - // TODO: test static fields via interfaces. - // TODO: test that interfaces trump superclasses. -} - -} // namespace art diff --git a/src/object_utils.h b/src/object_utils.h index 068dd66fdd..ea4de903b1 100644 --- a/src/object_utils.h +++ b/src/object_utils.h @@ -18,14 +18,17 @@ #define ART_SRC_OBJECT_UTILS_H_ #include "class_linker.h" -#include "dex_cache.h" #include "dex_file.h" -#include "intern_table.h" #include "monitor.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "mirror/class.h" +#include "mirror/dex_cache.h" +#include "mirror/field.h" +#include "mirror/iftable.h" +#include "mirror/string.h" + #include "runtime.h" #include "sirt_ref.h" -#include "UniquePtr.h" #include @@ -33,7 +36,7 @@ namespace art { class ObjectLock { public: - explicit ObjectLock(Thread* self, Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit ObjectLock(Thread* self, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(self), obj_(object) { CHECK(object != NULL); obj_->MonitorEnter(self_); @@ -57,13 +60,13 @@ class ObjectLock { private: Thread* const self_; - Object* obj_; + mirror::Object* obj_; DISALLOW_COPY_AND_ASSIGN(ObjectLock); }; class ClassHelper { public: - ClassHelper(const Class* c = NULL, ClassLinker* l = NULL) + ClassHelper(const mirror::Class* c = NULL, ClassLinker* l = NULL) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_def_(NULL), class_linker_(l), @@ -76,12 +79,12 @@ class ClassHelper { } } - void ChangeClass(const Class* new_c) + void ChangeClass(const mirror::Class* new_c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(new_c != NULL) << "klass_=" << klass_; // Log what we were changing from if any CHECK(new_c->IsClass()) << "new_c=" << new_c; if (dex_cache_ != NULL) { - DexCache* new_c_dex_cache = new_c->GetDexCache(); + mirror::DexCache* new_c_dex_cache = new_c->GetDexCache(); if (new_c_dex_cache != dex_cache_) { dex_cache_ = new_c_dex_cache; dex_file_ = NULL; @@ -112,7 +115,7 @@ class ClassHelper { const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string result("["); - const Class* saved_klass = klass_; + const mirror::Class* saved_klass = klass_; CHECK(saved_klass != NULL); ChangeClass(klass_->GetComponentType()); result += GetDescriptor(); @@ -157,7 +160,7 @@ class ClassHelper { return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_; } - Class* GetDirectInterface(uint32_t idx) + mirror::Class* GetDirectInterface(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); @@ -172,7 +175,7 @@ class ClassHelper { return klass_->GetIfTable()->GetInterface(idx); } else { uint16_t type_idx = GetDirectInterfaceTypeIdx(idx); - Class* interface = GetDexCache()->GetResolvedType(type_idx); + mirror::Class* interface = GetDexCache()->GetResolvedType(type_idx); if (interface == NULL) { interface = GetClassLinker()->ResolveType(GetDexFile(), type_idx, klass_); CHECK(interface != NULL || Thread::Current()->IsExceptionPending()); @@ -190,7 +193,7 @@ class ClassHelper { } std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* dex_cache = GetDexCache(); + mirror::DexCache* dex_cache = GetDexCache(); if (dex_cache != NULL && !klass_->IsProxyClass()) { return dex_cache->GetLocation()->ToModifiedUtf8(); } else { @@ -206,8 +209,8 @@ class ClassHelper { return *dex_file_; } - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* result = dex_cache_; + mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::DexCache* result = dex_cache_; if (result == NULL) { DCHECK(klass_ != NULL); result = klass_->GetDexCache(); @@ -241,10 +244,10 @@ class ClassHelper { const DexFile::ClassDef* class_def_; ClassLinker* class_linker_; - DexCache* dex_cache_; + mirror::DexCache* dex_cache_; const DexFile* dex_file_; const DexFile::TypeList* interface_type_list_; - const Class* klass_; + const mirror::Class* klass_; std::string descriptor_; DISALLOW_COPY_AND_ASSIGN(ClassHelper); @@ -253,14 +256,14 @@ class ClassHelper { class FieldHelper { public: FieldHelper() : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), field_(NULL) {} - explicit FieldHelper(const Field* f) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), field_(f) {} - FieldHelper(const Field* f, ClassLinker* l) + explicit FieldHelper(const mirror::Field* f) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), field_(f) {} + FieldHelper(const mirror::Field* f, ClassLinker* l) : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), field_(f) {} - void ChangeField(const Field* new_f) { + void ChangeField(const mirror::Field* new_f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(new_f != NULL); if (dex_cache_ != NULL) { - DexCache* new_f_dex_cache = new_f->GetDeclaringClass()->GetDexCache(); + mirror::DexCache* new_f_dex_cache = new_f->GetDeclaringClass()->GetDexCache(); if (new_f_dex_cache != dex_cache_) { dex_cache_ = new_f_dex_cache; dex_file_ = NULL; @@ -279,22 +282,12 @@ class FieldHelper { return field_index == 0 ? "interfaces" : "throws"; } } - String* GetNameAsString() { - uint32_t field_index = field_->GetDexFieldIndex(); - if (!field_->GetDeclaringClass()->IsProxyClass()) { - const DexFile& dex_file = GetDexFile(); - const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index); - return GetClassLinker()->ResolveString(dex_file, field_id.name_idx_, GetDexCache()); - } else { - return Runtime::Current()->GetInternTable()->InternStrong(GetName()); - } - } - Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index); - Class* type = GetDexCache()->GetResolvedType(field_id.type_idx_); + mirror::Class* type = GetDexCache()->GetResolvedType(field_id.type_idx_); if (type == NULL) { type = GetClassLinker()->ResolveType(field_id.type_idx_, field_); CHECK(type != NULL || Thread::Current()->IsExceptionPending()); @@ -347,8 +340,8 @@ class FieldHelper { } private: - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* result = dex_cache_; + mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::DexCache* result = dex_cache_; if (result == NULL) { result = field_->GetDeclaringClass()->GetDexCache(); dex_cache_ = result; @@ -371,9 +364,9 @@ class FieldHelper { } ClassLinker* class_linker_; - DexCache* dex_cache_; + mirror::DexCache* dex_cache_; const DexFile* dex_file_; - const Field* field_; + const mirror::Field* field_; std::string declaring_class_descriptor_; DISALLOW_COPY_AND_ASSIGN(FieldHelper); @@ -385,29 +378,29 @@ class MethodHelper { : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) {} - explicit MethodHelper(const AbstractMethod* m) + explicit MethodHelper(const mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } - MethodHelper(const AbstractMethod* m, ClassLinker* l) + MethodHelper(const mirror::AbstractMethod* m, ClassLinker* l) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } - void ChangeMethod(AbstractMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ChangeMethod(mirror::AbstractMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(new_m != NULL); if (dex_cache_ != NULL) { - Class* klass = new_m->GetDeclaringClass(); + mirror::Class* klass = new_m->GetDeclaringClass(); if (klass->IsProxyClass()) { dex_cache_ = NULL; dex_file_ = NULL; } else { - DexCache* new_m_dex_cache = klass->GetDexCache(); + mirror::DexCache* new_m_dex_cache = klass->GetDexCache(); if (new_m_dex_cache != dex_cache_) { dex_cache_ = new_m_dex_cache; dex_file_ = NULL; @@ -439,7 +432,7 @@ class MethodHelper { } } - String* GetNameAsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::String* GetNameAsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); @@ -487,17 +480,18 @@ class MethodHelper { return GetDexFile().GetProtoParameters(proto); } - ObjectArray* GetParameterTypes(Thread* self) + mirror::ObjectArray* GetParameterTypes(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::TypeList* params = GetParameterTypeList(); uint32_t num_params = params == NULL ? 0 : params->Size(); - SirtRef > result(self, GetClassLinker()->AllocClassArray(self, num_params)); + SirtRef > + result(self, GetClassLinker()->AllocClassArray(self, num_params)); if (UNLIKELY(result.get() == NULL)) { CHECK(self->IsExceptionPending()); return NULL; } for (uint32_t i = 0; i < num_params; i++) { - Class* param_type = GetClassFromTypeIdx(params->GetTypeItem(i).type_idx_); + mirror::Class* param_type = GetClassFromTypeIdx(params->GetTypeItem(i).type_idx_); if (param_type == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); return NULL; @@ -507,7 +501,7 @@ class MethodHelper { return result.get(); } - Class* GetReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -536,7 +530,7 @@ class MethodHelper { const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* klass = method_->GetDeclaringClass(); + mirror::Class* klass = method_->GetDeclaringClass(); DCHECK(!klass->IsProxyClass()); uint16_t type_idx = klass->GetDexTypeIndex(); const DexFile& dex_file = GetDexFile(); @@ -561,7 +555,7 @@ class MethodHelper { return index; } - ClassLoader* GetClassLoader() + mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->GetDeclaringClass()->GetClassLoader(); } @@ -626,9 +620,9 @@ class MethodHelper { return method_->GetDexCacheResolvedTypes()->Get(type_idx) != NULL; } - Class* GetClassFromTypeIdx(uint16_t type_idx) + mirror::Class* GetClassFromTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* type = method_->GetDexCacheResolvedTypes()->Get(type_idx); + mirror::Class* type = method_->GetDexCacheResolvedTypes()->Get(type_idx); if (type == NULL) { type = GetClassLinker()->ResolveType(type_idx, method_); CHECK(type != NULL || Thread::Current()->IsExceptionPending()); @@ -642,7 +636,7 @@ class MethodHelper { return dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)); } - Class* GetDexCacheResolvedType(uint16_t type_idx) + mirror::Class* GetDexCacheResolvedType(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->GetDexCacheResolvedTypes()->Get(type_idx); } @@ -650,24 +644,24 @@ class MethodHelper { const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { - const DexCache* dex_cache = GetDexCache(); + const mirror::DexCache* dex_cache = GetDexCache(); result = dex_file_ = dex_cache->GetDexFile(); } return *result; } - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DexCache* result = dex_cache_; + mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::DexCache* result = dex_cache_; if (result == NULL) { - Class* klass = method_->GetDeclaringClass(); + mirror::Class* klass = method_->GetDeclaringClass(); result = klass->GetDexCache(); dex_cache_ = result; } return result; } - String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - String* s = method_->GetDexCacheStrings()->Get(string_idx); + mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::String* s = method_->GetDexCacheStrings()->Get(string_idx); if (UNLIKELY(s == NULL)) { s = GetClassLinker()->ResolveString(GetDexFile(), string_idx, GetDexCache()); } @@ -677,11 +671,11 @@ class MethodHelper { private: // Set the method_ field, for proxy methods looking up the interface method via the resolved // methods table. - void SetMethod(const AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMethod(const mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method != NULL) { - Class* klass = method->GetDeclaringClass(); + mirror::Class* klass = method->GetDeclaringClass(); if (klass->IsProxyClass()) { - AbstractMethod* interface_method = + mirror::AbstractMethod* interface_method = method->GetDexCacheResolvedMethods()->Get(method->GetDexMethodIndex()); CHECK(interface_method != NULL); CHECK(interface_method == GetClassLinker()->FindMethodForProxy(klass, method)); @@ -701,9 +695,9 @@ class MethodHelper { } ClassLinker* class_linker_; - DexCache* dex_cache_; + mirror::DexCache* dex_cache_; const DexFile* dex_file_; - const AbstractMethod* method_; + const mirror::AbstractMethod* method_; const char* shorty_; uint32_t shorty_len_; diff --git a/src/primitive.h b/src/primitive.h index cb78ccc293..eaa04cd054 100644 --- a/src/primitive.h +++ b/src/primitive.h @@ -23,8 +23,9 @@ #include "base/macros.h" namespace art { - +namespace mirror { class Object; +} // namespace mirror class Primitive { public: @@ -77,7 +78,7 @@ class Primitive { case kPrimFloat: return 4; case kPrimLong: case kPrimDouble: return 8; - case kPrimNot: return sizeof(Object*); + case kPrimNot: return sizeof(mirror::Object*); default: LOG(FATAL) << "Invalid type " << static_cast(type); return 0; diff --git a/src/reference_table.cc b/src/reference_table.cc index cdb3004440..192535ab02 100644 --- a/src/reference_table.cc +++ b/src/reference_table.cc @@ -18,8 +18,14 @@ #include "base/mutex.h" #include "indirect_reference_table.h" - -#include "object.h" +#include "mirror/array.h" +#include "mirror/array-inl.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/string.h" +#include "thread.h" +#include "utils.h" namespace art { @@ -32,7 +38,7 @@ ReferenceTable::ReferenceTable(const char* name, size_t initial_size, size_t max ReferenceTable::~ReferenceTable() { } -void ReferenceTable::Add(const Object* obj) { +void ReferenceTable::Add(const mirror::Object* obj) { DCHECK(obj != NULL); if (entries_.size() == max_size_) { LOG(FATAL) << "ReferenceTable '" << name_ << "' " @@ -41,7 +47,7 @@ void ReferenceTable::Add(const Object* obj) { entries_.push_back(obj); } -void ReferenceTable::Remove(const Object* obj) { +void ReferenceTable::Remove(const mirror::Object* obj) { // We iterate backwards on the assumption that references are LIFO. for (int i = entries_.size() - 1; i >= 0; --i) { if (entries_[i] == obj) { @@ -53,7 +59,7 @@ void ReferenceTable::Remove(const Object* obj) { // If "obj" is an array, return the number of elements in the array. // Otherwise, return zero. -static size_t GetElementCount(const Object* obj) { +static size_t GetElementCount(const mirror::Object* obj) { if (obj == NULL || obj == kClearedJniWeakGlobal || !obj->IsArrayInstance()) { return 0; } @@ -61,7 +67,7 @@ static size_t GetElementCount(const Object* obj) { } struct ObjectComparator { - bool operator()(const Object* obj1, const Object* obj2) + bool operator()(const mirror::Object* obj1, const mirror::Object* obj2) // TODO: enable analysis when analysis can work with the STL. NO_THREAD_SAFETY_ANALYSIS { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); @@ -99,7 +105,7 @@ struct ObjectComparator { // Pass in the number of elements in the array (or 0 if this is not an // array object), and the number of additional objects that are identical // or equivalent to the original. -static void DumpSummaryLine(std::ostream& os, const Object* obj, size_t element_count, +static void DumpSummaryLine(std::ostream& os, const mirror::Object* obj, size_t element_count, int identical, int equiv) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (obj == NULL) { @@ -153,7 +159,7 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) { } os << " Last " << (count - first) << " entries (of " << count << "):\n"; for (int idx = count - 1; idx >= first; --idx) { - const Object* ref = entries[idx]; + const mirror::Object* ref = entries[idx]; if (ref == NULL) { continue; } @@ -175,7 +181,7 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) { if (element_count != 0) { StringAppendF(&extras, " (%zd elements)", element_count); } else if (ref->GetClass()->IsStringClass()) { - String* s = const_cast(ref)->AsString(); + mirror::String* s = const_cast(ref)->AsString(); std::string utf8(s->ToModifiedUtf8()); if (s->GetLength() <= 16) { StringAppendF(&extras, " \"%s\"", utf8.c_str()); @@ -206,8 +212,8 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) { size_t equiv = 0; size_t identical = 0; for (size_t idx = 1; idx < count; idx++) { - const Object* prev = sorted_entries[idx-1]; - const Object* current = sorted_entries[idx]; + const mirror::Object* prev = sorted_entries[idx-1]; + const mirror::Object* current = sorted_entries[idx]; size_t element_count = GetElementCount(prev); if (current == prev) { // Same reference, added more than once. @@ -225,7 +231,7 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) { DumpSummaryLine(os, sorted_entries.back(), GetElementCount(sorted_entries.back()), identical, equiv); } -void ReferenceTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void ReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) { typedef Table::const_iterator It; // TODO: C++0x auto for (It it = entries_.begin(), end = entries_.end(); it != end; ++it) { visitor(*it, arg); diff --git a/src/reference_table.h b/src/reference_table.h index f398eb2a44..5abb5c7b46 100644 --- a/src/reference_table.h +++ b/src/reference_table.h @@ -22,11 +22,13 @@ #include #include -#include "heap.h" +#include "locks.h" +#include "root_visitor.h" namespace art { - +namespace mirror { class Object; +} // namespace mirror // Maintain a table of references. Used for JNI monitor references and // JNI pinned array references. @@ -37,18 +39,18 @@ class ReferenceTable { ReferenceTable(const char* name, size_t initial_size, size_t max_size); ~ReferenceTable(); - void Add(const Object* obj); + void Add(const mirror::Object* obj); - void Remove(const Object* obj); + void Remove(const mirror::Object* obj); size_t Size() const; void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VisitRoots(Heap::RootVisitor* visitor, void* arg); + void VisitRoots(RootVisitor* visitor, void* arg); private: - typedef std::vector Table; + typedef std::vector Table; static void Dump(std::ostream& os, const Table& entries) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); friend class IndirectReferenceTable; // For Dump. diff --git a/src/reference_table_test.cc b/src/reference_table_test.cc index c400f83c3c..16fbd94b21 100644 --- a/src/reference_table_test.cc +++ b/src/reference_table_test.cc @@ -25,7 +25,7 @@ class ReferenceTableTest : public CommonTest { TEST_F(ReferenceTableTest, Basics) { ScopedObjectAccess soa(Thread::Current()); - Object* o1 = String::AllocFromModifiedUtf8(soa.Self(), "hello"); + mirror::Object* o1 = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello"); ReferenceTable rt("test", 0, 11); @@ -56,7 +56,7 @@ TEST_F(ReferenceTableTest, Basics) { } // Add a second object 10 times and check dumping is sane. - Object* o2 = ShortArray::Alloc(soa.Self(), 0); + mirror::Object* o2 = mirror::ShortArray::Alloc(soa.Self(), 0); for (size_t i = 0; i < 10; ++i) { rt.Add(o2); EXPECT_EQ(i + 2, rt.Size()); diff --git a/src/reflection.cc b/src/reflection.cc index 1ffad3fbf9..16a5502061 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -18,7 +18,13 @@ #include "class_linker.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "scoped_thread_state_change.h" #include "well_known_classes.h" @@ -28,17 +34,17 @@ namespace art { jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject javaReceiver, jobject javaArgs) { jmethodID mid = soa.Env()->FromReflectedMethod(javaMethod); - AbstractMethod* m = soa.DecodeMethod(mid); + mirror::AbstractMethod* m = soa.DecodeMethod(mid); - Class* declaring_class = m->GetDeclaringClass(); + mirror::Class* declaring_class = m->GetDeclaringClass(); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaring_class, true, true)) { return NULL; } - Object* receiver = NULL; + mirror::Object* receiver = NULL; if (!m->IsStatic()) { // Check that the receiver is non-null and an instance of the field's declaring class. - receiver = soa.Decode(javaReceiver); + receiver = soa.Decode(javaReceiver); if (!VerifyObjectInClass(receiver, declaring_class)) { return NULL; } @@ -49,7 +55,8 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject } // Get our arrays of arguments and their types, and check they're the same size. - ObjectArray* objects = soa.Decode*>(javaArgs); + mirror::ObjectArray* objects = + soa.Decode*>(javaArgs); MethodHelper mh(m); const DexFile::TypeList* classes = mh.GetParameterTypeList(); uint32_t classes_size = classes == NULL ? 0 : classes->Size(); @@ -65,8 +72,8 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject UniquePtr args(new jvalue[arg_count]); JValue* decoded_args = reinterpret_cast(args.get()); for (uint32_t i = 0; i < arg_count; ++i) { - Object* arg = objects->Get(i); - Class* dst_class = mh.GetClassFromTypeIdx(classes->GetTypeItem(i).type_idx_); + mirror::Object* arg = objects->Get(i); + mirror::Class* dst_class = mh.GetClassFromTypeIdx(classes->GetTypeItem(i).type_idx_); if (!UnboxPrimitiveForArgument(arg, dst_class, decoded_args[i], m, i)) { return NULL; } @@ -93,7 +100,7 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject return soa.AddLocalReference(BoxPrimitive(mh.GetReturnType()->GetPrimitiveType(), value)); } -bool VerifyObjectInClass(Object* o, Class* c) { +bool VerifyObjectInClass(mirror::Object* o, mirror::Class* c) { const char* exception = NULL; if (o == NULL) { exception = "Ljava/lang/NullPointerException;"; @@ -194,7 +201,7 @@ bool ConvertPrimitiveValue(Primitive::Type srcType, Primitive::Type dstType, return false; } -Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) { +mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) { if (src_class == Primitive::kPrimNot) { return value.GetL(); } @@ -242,7 +249,7 @@ Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) { return result.GetL(); } -static std::string UnboxingFailureKind(AbstractMethod* m, int index, Field* f) +static std::string UnboxingFailureKind(mirror::AbstractMethod* m, int index, mirror::Field* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m != NULL && index != -1) { ++index; // Humans count from 1. @@ -254,8 +261,8 @@ static std::string UnboxingFailureKind(AbstractMethod* m, int index, Field* f) return "result"; } -static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, AbstractMethod* m, - int index, Field* f) +static bool UnboxPrimitive(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, + mirror::AbstractMethod* m, int index, mirror::Field* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!dst_class->IsPrimitive()) { if (o != NULL && !o->InstanceOf(dst_class)) { @@ -285,9 +292,9 @@ static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, A JValue boxed_value; std::string src_descriptor(ClassHelper(o->GetClass()).GetDescriptor()); - Class* src_class = NULL; + mirror::Class* src_class = NULL; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Field* primitive_field = o->GetClass()->GetIFields()->Get(0); + mirror::Field* primitive_field = o->GetClass()->GetIFields()->Get(0); if (src_descriptor == "Ljava/lang/Boolean;") { src_class = class_linker->FindPrimitiveClass('Z'); boxed_value.SetZ(primitive_field->GetBoolean(o)); @@ -325,17 +332,19 @@ static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, A boxed_value, unboxed_value); } -bool UnboxPrimitiveForArgument(Object* o, Class* dst_class, JValue& unboxed_value, AbstractMethod* m, size_t index) { +bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, + mirror::AbstractMethod* m, size_t index) { CHECK(m != NULL); return UnboxPrimitive(o, dst_class, unboxed_value, m, index, NULL); } -bool UnboxPrimitiveForField(Object* o, Class* dst_class, JValue& unboxed_value, Field* f) { +bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, + mirror::Field* f) { CHECK(f != NULL); return UnboxPrimitive(o, dst_class, unboxed_value, NULL, -1, f); } -bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value) { +bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value) { return UnboxPrimitive(o, dst_class, unboxed_value, NULL, -1, NULL); } diff --git a/src/reflection.h b/src/reflection.h index 601543f346..8f3224380c 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -21,22 +21,24 @@ #include "primitive.h" namespace art { - +namespace mirror { +class AbstractMethod; class Class; class Field; -union JValue; -class AbstractMethod; class Object; +} // namespace mirror +union JValue; class ScopedObjectAccess; -Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) +mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -bool UnboxPrimitiveForArgument(Object* o, Class* dst_class, JValue& unboxed_value, - AbstractMethod* m, size_t index) +bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, + mirror::AbstractMethod* m, size_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -bool UnboxPrimitiveForField(Object* o, Class* dst_class, JValue& unboxed_value, Field* f) +bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, + mirror::Field* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value) +bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, @@ -46,7 +48,7 @@ bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver, jobject args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -bool VerifyObjectInClass(Object* o, Class* c) +bool VerifyObjectInClass(mirror::Object* o, mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/root_visitor.h b/src/root_visitor.h new file mode 100644 index 0000000000..d53acd3621 --- /dev/null +++ b/src/root_visitor.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_ROOT_VISITOR_H_ +#define ART_SRC_ROOT_VISITOR_H_ + +namespace art { +namespace mirror { +class Object; +} // namespace mirror +class StackVisitor; + +typedef void (RootVisitor)(const mirror::Object* root, void* arg); +typedef void (VerifyRootVisitor)(const mirror::Object* root, void* arg, size_t vreg, + const StackVisitor* visitor); +typedef bool (IsMarkedTester)(const mirror::Object* object, void* arg); + +} // namespace art + +#endif // ART_SRC_ROOT_VISITOR_H_ diff --git a/src/runtime.cc b/src/runtime.cc index 5c73fef7d0..085a9bfdbf 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -24,17 +24,24 @@ #include #include +#include "atomic.h" #include "class_linker.h" -#include "class_loader.h" #include "constants_arm.h" #include "constants_mips.h" #include "constants_x86.h" #include "debugger.h" +#include "gc/card_table-inl.h" #include "heap.h" #include "image.h" #include "instrumentation.h" #include "intern_table.h" #include "jni_internal.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/array.h" +#include "mirror/class_loader.h" +#include "mirror/field.h" +#include "mirror/object-inl.h" +#include "mirror/throwable.h" #include "monitor.h" #include "oat_file.h" #include "ScopedLocalRef.h" @@ -627,23 +634,25 @@ static void CreateSystemClassLoader() { ScopedObjectAccess soa(Thread::Current()); - Class* class_loader_class = soa.Decode(WellKnownClasses::java_lang_ClassLoader); + mirror::Class* class_loader_class = + soa.Decode(WellKnownClasses::java_lang_ClassLoader); CHECK(Runtime::Current()->GetClassLinker()->EnsureInitialized(class_loader_class, true, true)); - AbstractMethod* getSystemClassLoader = class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;"); + mirror::AbstractMethod* getSystemClassLoader = + class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;"); CHECK(getSystemClassLoader != NULL); - ClassLoader* class_loader = - down_cast(InvokeWithJValues(soa, NULL, getSystemClassLoader, NULL).GetL()); + mirror::ClassLoader* class_loader = + down_cast(InvokeWithJValues(soa, NULL, getSystemClassLoader, NULL).GetL()); CHECK(class_loader != NULL); soa.Self()->SetClassLoaderOverride(class_loader); - Class* thread_class = soa.Decode(WellKnownClasses::java_lang_Thread); + mirror::Class* thread_class = soa.Decode(WellKnownClasses::java_lang_Thread); CHECK(Runtime::Current()->GetClassLinker()->EnsureInitialized(thread_class, true, true)); - Field* contextClassLoader = thread_class->FindDeclaredInstanceField("contextClassLoader", - "Ljava/lang/ClassLoader;"); + mirror::Field* contextClassLoader = thread_class->FindDeclaredInstanceField("contextClassLoader", + "Ljava/lang/ClassLoader;"); CHECK(contextClassLoader != NULL); contextClassLoader->SetObject(soa.Self()->GetPeer(), class_loader); @@ -1015,7 +1024,7 @@ void Runtime::DetachCurrentThread() { thread_list_->Unregister(self); } -void Runtime::VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg) { +void Runtime::VisitConcurrentRoots(RootVisitor* visitor, void* arg) { if (intern_table_->IsDirty()) { intern_table_->VisitRoots(visitor, arg); } @@ -1024,7 +1033,7 @@ void Runtime::VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg) { } } -void Runtime::VisitNonThreadRoots(Heap::RootVisitor* visitor, void* arg) { +void Runtime::VisitNonThreadRoots(RootVisitor* visitor, void* arg) { Dbg::VisitRoots(visitor, arg); java_vm_->VisitRoots(visitor, arg); if (pre_allocated_OutOfMemoryError_ != NULL) { @@ -1041,7 +1050,7 @@ void Runtime::VisitNonThreadRoots(Heap::RootVisitor* visitor, void* arg) { } } -void Runtime::VisitNonConcurrentRoots(Heap::RootVisitor* visitor, void* arg) { +void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor, void* arg) { thread_list_->VisitRoots(visitor, arg); VisitNonThreadRoots(visitor, arg); } @@ -1053,48 +1062,50 @@ void Runtime::DirtyRoots() { class_linker_->Dirty(); } -void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void Runtime::VisitRoots(RootVisitor* visitor, void* arg) { VisitConcurrentRoots(visitor, arg); VisitNonConcurrentRoots(visitor, arg); } -void Runtime::SetJniDlsymLookupStub(ByteArray* jni_stub_array) { +void Runtime::SetJniDlsymLookupStub(mirror::ByteArray* jni_stub_array) { CHECK(jni_stub_array != NULL) << " jni_stub_array=" << jni_stub_array; CHECK(jni_stub_array_ == NULL || jni_stub_array_ == jni_stub_array) << "jni_stub_array_=" << jni_stub_array_ << " jni_stub_array=" << jni_stub_array; jni_stub_array_ = jni_stub_array; } -void Runtime::SetAbstractMethodErrorStubArray(ByteArray* abstract_method_error_stub_array) { +void Runtime::SetAbstractMethodErrorStubArray(mirror::ByteArray* abstract_method_error_stub_array) { CHECK(abstract_method_error_stub_array != NULL); CHECK(abstract_method_error_stub_array_ == NULL || abstract_method_error_stub_array_ == abstract_method_error_stub_array); abstract_method_error_stub_array_ = abstract_method_error_stub_array; } -void Runtime::SetResolutionStubArray(ByteArray* resolution_stub_array, TrampolineType type) { +void Runtime::SetResolutionStubArray(mirror::ByteArray* resolution_stub_array, TrampolineType type) { CHECK(resolution_stub_array != NULL); CHECK(!HasResolutionStubArray(type) || resolution_stub_array_[type] == resolution_stub_array); resolution_stub_array_[type] = resolution_stub_array; } -AbstractMethod* Runtime::CreateResolutionMethod() { - Class* method_class = AbstractMethod::GetMethodClass(); +mirror::AbstractMethod* Runtime::CreateResolutionMethod() { + mirror::Class* method_class = mirror::AbstractMethod::GetMethodClass(); Thread* self = Thread::Current(); - SirtRef method(self, down_cast(method_class->AllocObject(self))); + SirtRef + method(self, down_cast(method_class->AllocObject(self))); method->SetDeclaringClass(method_class); // TODO: use a special method for resolution method saves method->SetDexMethodIndex(DexFile::kDexNoIndex16); - ByteArray* unknown_resolution_stub = GetResolutionStubArray(kUnknownMethod); + mirror::ByteArray* unknown_resolution_stub = GetResolutionStubArray(kUnknownMethod); CHECK(unknown_resolution_stub != NULL); method->SetCode(unknown_resolution_stub->GetData()); return method.get(); } -AbstractMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_set, - CalleeSaveType type) { - Class* method_class = AbstractMethod::GetMethodClass(); +mirror::AbstractMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_set, + CalleeSaveType type) { + mirror::Class* method_class = mirror::AbstractMethod::GetMethodClass(); Thread* self = Thread::Current(); - SirtRef method(self, down_cast(method_class->AllocObject(self))); + SirtRef + method(self, down_cast(method_class->AllocObject(self))); method->SetDeclaringClass(method_class); // TODO: use a special method for callee saves method->SetDexMethodIndex(DexFile::kDexNoIndex16); @@ -1154,7 +1165,7 @@ AbstractMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_set, return method.get(); } -void Runtime::SetCalleeSaveMethod(AbstractMethod* method, CalleeSaveType type) { +void Runtime::SetCalleeSaveMethod(mirror::AbstractMethod* method, CalleeSaveType type) { DCHECK_LT(static_cast(type), static_cast(kLastCalleeSaveType)); callee_save_methods_[type] = method; } diff --git a/src/runtime.h b/src/runtime.h index 84cc8262ca..1d71c1326a 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -32,6 +32,7 @@ #include "instruction_set.h" #include "jobject_comparator.h" #include "locks.h" +#include "root_visitor.h" #include "runtime_stats.h" #include "safe_map.h" @@ -41,21 +42,23 @@ namespace art { +namespace mirror { +class AbstractMethod; +class ClassLoader; template class PrimitiveArray; typedef PrimitiveArray ByteArray; +class String; +class Throwable; +} // namespace mirror class ClassLinker; -class ClassLoader; class DexFile; class Heap; class Instrumentation; class InternTable; struct JavaVMExt; -class AbstractMethod; class MonitorList; class SignalCatcher; -class String; class ThreadList; -class Throwable; class Trace; class Runtime { @@ -215,7 +218,7 @@ class Runtime { return monitor_list_; } - Throwable* GetPreAllocatedOutOfMemoryError() { + mirror::Throwable* GetPreAllocatedOutOfMemoryError() { return pre_allocated_OutOfMemoryError_; } @@ -235,40 +238,40 @@ class Runtime { void DirtyRoots(); // Visit all the roots. - void VisitRoots(Heap::RootVisitor* visitor, void* arg) + void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Visit all of the roots we can do safely do concurrently. - void VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg); + void VisitConcurrentRoots(RootVisitor* visitor, void* arg); // Visit all of the non thread roots, we can do this with mutators unpaused. - void VisitNonThreadRoots(Heap::RootVisitor* visitor, void* arg); + void VisitNonThreadRoots(RootVisitor* visitor, void* arg); // Visit all other roots which must be done with mutators suspended. - void VisitNonConcurrentRoots(Heap::RootVisitor* visitor, void* arg) + void VisitNonConcurrentRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasJniDlsymLookupStub() const { return jni_stub_array_ != NULL; } - ByteArray* GetJniDlsymLookupStub() const { + mirror::ByteArray* GetJniDlsymLookupStub() const { CHECK(HasJniDlsymLookupStub()); return jni_stub_array_; } - void SetJniDlsymLookupStub(ByteArray* jni_stub_array); + void SetJniDlsymLookupStub(mirror::ByteArray* jni_stub_array); bool HasAbstractMethodErrorStubArray() const { return abstract_method_error_stub_array_ != NULL; } - ByteArray* GetAbstractMethodErrorStubArray() const { + mirror::ByteArray* GetAbstractMethodErrorStubArray() const { CHECK(abstract_method_error_stub_array_ != NULL); return abstract_method_error_stub_array_; } - void SetAbstractMethodErrorStubArray(ByteArray* abstract_method_error_stub_array); + void SetAbstractMethodErrorStubArray(mirror::ByteArray* abstract_method_error_stub_array); enum TrampolineType { kStaticMethod, @@ -280,16 +283,16 @@ class Runtime { return resolution_stub_array_[type] != NULL; } - ByteArray* GetResolutionStubArray(TrampolineType type) const { + mirror::ByteArray* GetResolutionStubArray(TrampolineType type) const { CHECK(HasResolutionStubArray(type)); DCHECK_LT(static_cast(type), static_cast(kLastTrampolineMethodType)); return resolution_stub_array_[type]; } - void SetResolutionStubArray(ByteArray* resolution_stub_array, TrampolineType type); + void SetResolutionStubArray(mirror::ByteArray* resolution_stub_array, TrampolineType type); // Returns a special method that calls into a trampoline for runtime method resolution - AbstractMethod* GetResolutionMethod() const { + mirror::AbstractMethod* GetResolutionMethod() const { CHECK(HasResolutionMethod()); return resolution_method_; } @@ -298,11 +301,11 @@ class Runtime { return resolution_method_ != NULL; } - void SetResolutionMethod(AbstractMethod* method) { + void SetResolutionMethod(mirror::AbstractMethod* method) { resolution_method_ = method; } - AbstractMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::AbstractMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -316,20 +319,21 @@ class Runtime { return callee_save_methods_[type] != NULL; } - AbstractMethod* GetCalleeSaveMethod(CalleeSaveType type) const { + mirror::AbstractMethod* GetCalleeSaveMethod(CalleeSaveType type) const { DCHECK(HasCalleeSaveMethod(type)); return callee_save_methods_[type]; } - void SetCalleeSaveMethod(AbstractMethod* method, CalleeSaveType type); + void SetCalleeSaveMethod(mirror::AbstractMethod* method, CalleeSaveType type); - AbstractMethod* CreateCalleeSaveMethod(InstructionSet instruction_set, CalleeSaveType type) + mirror::AbstractMethod* CreateCalleeSaveMethod(InstructionSet instruction_set, + CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set) + mirror::AbstractMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set) + mirror::AbstractMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetStat(int kind); @@ -419,20 +423,20 @@ class Runtime { JavaVMExt* java_vm_; - Throwable* pre_allocated_OutOfMemoryError_; + mirror::Throwable* pre_allocated_OutOfMemoryError_; - ByteArray* jni_stub_array_; + mirror::ByteArray* jni_stub_array_; - ByteArray* abstract_method_error_stub_array_; + mirror::ByteArray* abstract_method_error_stub_array_; - ByteArray* resolution_stub_array_[kLastTrampolineMethodType]; + mirror::ByteArray* resolution_stub_array_[kLastTrampolineMethodType]; - AbstractMethod* callee_save_methods_[kLastCalleeSaveType]; + mirror::AbstractMethod* callee_save_methods_[kLastCalleeSaveType]; - AbstractMethod* resolution_method_; + mirror::AbstractMethod* resolution_method_; // As returned by ClassLoader.getSystemClassLoader() - ClassLoader* system_class_loader_; + mirror::ClassLoader* system_class_loader_; // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by // the shutdown lock so that threads aren't born while we're shutting down. diff --git a/src/runtime_support.cc b/src/runtime_support.cc index b276917be9..84a19cf491 100644 --- a/src/runtime_support.cc +++ b/src/runtime_support.cc @@ -16,6 +16,14 @@ #include "runtime_support.h" +#include "class_linker-inl.h" +#include "gc/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" #include "reflection.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" @@ -92,13 +100,14 @@ int32_t art_f2i(float f) { namespace art { // Helper function to allocate array for FILLED_NEW_ARRAY. -Array* CheckAndAllocArrayFromCode(uint32_t type_idx, AbstractMethod* method, int32_t component_count, - Thread* self, bool access_check) { +mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, Thread* self, + bool access_check) { if (UNLIKELY(component_count < 0)) { self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); return NULL; // Failure } - Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); if (klass == NULL) { // Error @@ -119,19 +128,19 @@ Array* CheckAndAllocArrayFromCode(uint32_t type_idx, AbstractMethod* method, int return NULL; // Failure } else { if (access_check) { - Class* referrer = method->GetDeclaringClass(); + mirror::Class* referrer = method->GetDeclaringClass(); if (UNLIKELY(!referrer->CanAccess(klass))) { ThrowIllegalAccessErrorClass(referrer, klass); return NULL; // Failure } } DCHECK(klass->IsArrayClass()) << PrettyClass(klass); - return Array::Alloc(self, klass, component_count); + return mirror::Array::Alloc(self, klass, component_count); } } -Field* FindFieldFromCode(uint32_t field_idx, const AbstractMethod* referrer, Thread* self, - FindFieldType type, size_t expected_size) { +mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size) { bool is_primitive; bool is_set; bool is_static; @@ -147,7 +156,7 @@ Field* FindFieldFromCode(uint32_t field_idx, const AbstractMethod* referrer, Thr default: is_primitive = true; is_set = true; is_static = true; break; } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); + mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); if (UNLIKELY(resolved_field == NULL)) { DCHECK(self->IsExceptionPending()); // Throw exception and unwind. return NULL; // Failure. @@ -156,8 +165,8 @@ Field* FindFieldFromCode(uint32_t field_idx, const AbstractMethod* referrer, Thr ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); return NULL; } - Class* fields_class = resolved_field->GetDeclaringClass(); - Class* referring_class = referrer->GetDeclaringClass(); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); if (UNLIKELY(!referring_class->CanAccess(fields_class) || !referring_class->CanAccessMember(fields_class, resolved_field->GetAccessFlags()))) { @@ -210,11 +219,12 @@ Field* FindFieldFromCode(uint32_t field_idx, const AbstractMethod* referrer, Thr } // Slow path method resolution -AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) { +mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); bool is_direct = type == kStatic || type == kDirect; - AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); + mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); if (UNLIKELY(resolved_method == NULL)) { DCHECK(self->IsExceptionPending()); // Throw exception and unwind. return NULL; // Failure. @@ -228,7 +238,7 @@ AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, Abs if (is_direct) { return resolved_method; } else if (type == kInterface) { - AbstractMethod* interface_method = + mirror::AbstractMethod* interface_method = this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); if (UNLIKELY(interface_method == NULL)) { ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, @@ -238,7 +248,7 @@ AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, Abs return interface_method; } } else { - ObjectArray* vtable; + mirror::ObjectArray* vtable; uint16_t vtable_index = resolved_method->GetMethodIndex(); if (type == kSuper) { vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); @@ -255,8 +265,8 @@ AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, Abs referrer); return NULL; // Failure. } - Class* methods_class = resolved_method->GetDeclaringClass(); - Class* referring_class = referrer->GetDeclaringClass(); + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); if (UNLIKELY(!referring_class->CanAccess(methods_class) || !referring_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags()))) { @@ -280,7 +290,7 @@ AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, Abs if (is_direct) { return resolved_method; } else if (type == kInterface) { - AbstractMethod* interface_method = + mirror::AbstractMethod* interface_method = this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); if (UNLIKELY(interface_method == NULL)) { ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, @@ -290,10 +300,10 @@ AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, Abs return interface_method; } } else { - ObjectArray* vtable; + mirror::ObjectArray* vtable; uint16_t vtable_index = resolved_method->GetMethodIndex(); if (type == kSuper) { - Class* super_class = referring_class->GetSuperClass(); + mirror::Class* super_class = referring_class->GetSuperClass(); if (LIKELY(super_class != NULL)) { vtable = referring_class->GetSuperClass()->GetVTable(); } else { @@ -317,16 +327,16 @@ AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, Abs } } -Class* ResolveVerifyAndClinit(uint32_t type_idx, const AbstractMethod* referrer, Thread* self, - bool can_run_clinit, bool verify_access) { +mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, const mirror::AbstractMethod* referrer, + Thread* self, bool can_run_clinit, bool verify_access) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* klass = class_linker->ResolveType(type_idx, referrer); + mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); if (UNLIKELY(klass == NULL)) { CHECK(self->IsExceptionPending()); return NULL; // Failure - Indicate to caller to deliver exception } // Perform access check if necessary. - Class* referring_class = referrer->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { ThrowIllegalAccessErrorClass(referring_class, klass); return NULL; // Failure - Indicate to caller to deliver exception @@ -396,12 +406,12 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char } else { JValue jv; jv.SetJ(args.at(i).j); - Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); + mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); if (val == NULL) { CHECK(soa.Self()->IsExceptionPending()); return zero; } - soa.Decode* >(args_jobj)->Set(i, val); + soa.Decode* >(args_jobj)->Set(i, val); } } } @@ -425,9 +435,9 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char return zero; } else { JValue result_unboxed; - MethodHelper mh(soa.Decode(interface_method_jobj)); - Class* result_type = mh.GetReturnType(); - Object* result_ref = soa.Decode(result); + MethodHelper mh(soa.Decode(interface_method_jobj)); + mirror::Class* result_type = mh.GetReturnType(); + mirror::Object* result_ref = soa.Decode(result); bool unboxed_okay = UnboxPrimitiveForResult(result_ref, result_type, result_unboxed); if (!unboxed_okay) { soa.Self()->ThrowNewWrappedException("Ljava/lang/ClassCastException;", @@ -441,12 +451,14 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char } else { // In the case of checked exceptions that aren't declared, the exception must be wrapped by // a UndeclaredThrowableException. - Throwable* exception = soa.Self()->GetException(); + mirror::Throwable* exception = soa.Self()->GetException(); if (exception->IsCheckedException()) { - Object* rcvr = soa.Decode(rcvr_jobj); - SynthesizedProxyClass* proxy_class = down_cast(rcvr->GetClass()); - AbstractMethod* interface_method = soa.Decode(interface_method_jobj); - AbstractMethod* proxy_method = + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::SynthesizedProxyClass* proxy_class = + down_cast(rcvr->GetClass()); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::AbstractMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); int throws_index = -1; size_t num_virt_methods = proxy_class->NumVirtualMethods(); @@ -457,11 +469,11 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char } } CHECK_NE(throws_index, -1); - ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); - Class* exception_class = exception->GetClass(); + mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); + mirror::Class* exception_class = exception->GetClass(); bool declares_exception = false; for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { - Class* declared_exception = declared_exceptions->Get(i); + mirror::Class* declared_exception = declared_exceptions->Get(i); declares_exception = declared_exception->IsAssignableFrom(exception_class); } if (!declares_exception) { diff --git a/src/runtime_support.h b/src/runtime_support.h index 1c8d1740b1..a504237044 100644 --- a/src/runtime_support.h +++ b/src/runtime_support.h @@ -23,10 +23,11 @@ #include "indirect_reference_table.h" #include "invoke_type.h" #include "jni_internal.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "mirror/array.h" +#include "mirror/throwable.h" #include "object_utils.h" #include "thread.h" -#include "verifier/method_verifier.h" extern "C" void art_interpreter_invoke_handler(); extern "C" void art_proxy_invoke_handler(); @@ -40,21 +41,21 @@ extern "C" int64_t art_f2l(float f); extern "C" int32_t art_f2i(float f); namespace art { - -class Array; +namespace mirror { class Class; class Field; -class AbstractMethod; class Object; +} // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. // When verification/compiler hasn't been able to verify access, optionally perform an access // check. -static inline Object* AllocObjectFromCode(uint32_t type_idx, AbstractMethod* method, Thread* self, - bool access_check) +static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + Thread* self, + bool access_check) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); Runtime* runtime = Runtime::Current(); if (UNLIKELY(klass == NULL)) { klass = runtime->GetClassLinker()->ResolveType(type_idx, method); @@ -69,7 +70,7 @@ static inline Object* AllocObjectFromCode(uint32_t type_idx, AbstractMethod* met PrettyDescriptor(klass).c_str()); return NULL; // Failure } - Class* referrer = method->GetDeclaringClass(); + mirror::Class* referrer = method->GetDeclaringClass(); if (UNLIKELY(!referrer->CanAccess(klass))) { ThrowIllegalAccessErrorClass(referrer, klass); return NULL; // Failure @@ -87,14 +88,15 @@ static inline Object* AllocObjectFromCode(uint32_t type_idx, AbstractMethod* met // it cannot be resolved, throw an error. If it can, use it to create an array. // When verification/compiler hasn't been able to verify access, optionally perform an access // check. -static inline Array* AllocArrayFromCode(uint32_t type_idx, AbstractMethod* method, int32_t component_count, - Thread* self, bool access_check) +static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(component_count < 0)) { self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); return NULL; // Failure } - Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); if (klass == NULL) { // Error @@ -104,17 +106,18 @@ static inline Array* AllocArrayFromCode(uint32_t type_idx, AbstractMethod* metho CHECK(klass->IsArrayClass()) << PrettyClass(klass); } if (access_check) { - Class* referrer = method->GetDeclaringClass(); + mirror::Class* referrer = method->GetDeclaringClass(); if (UNLIKELY(!referrer->CanAccess(klass))) { ThrowIllegalAccessErrorClass(referrer, klass); return NULL; // Failure } } - return Array::Alloc(self, klass, component_count); + return mirror::Array::Alloc(self, klass, component_count); } -extern Array* CheckAndAllocArrayFromCode(uint32_t type_idx, AbstractMethod* method, int32_t component_count, - Thread* self, bool access_check) +extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Type of find field operation for fast and slow case. @@ -130,19 +133,21 @@ enum FindFieldType { }; // Slow field find that can initialize classes and may throw exceptions. -extern Field* FindFieldFromCode(uint32_t field_idx, const AbstractMethod* referrer, Thread* self, - FindFieldType type, size_t expected_size) +extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Fast path field resolution that can't initialize classes or throw exceptions. -static inline Field* FindFieldFast(uint32_t field_idx, const AbstractMethod* referrer, - FindFieldType type, size_t expected_size) +static inline mirror::Field* FindFieldFast(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + FindFieldType type, size_t expected_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); + mirror::Field* resolved_field = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { return NULL; } - Class* fields_class = resolved_field->GetDeclaringClass(); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); // Check class is initiliazed or initializing. if (UNLIKELY(!fields_class->IsInitializing())) { return NULL; @@ -167,7 +172,7 @@ static inline Field* FindFieldFast(uint32_t field_idx, const AbstractMethod* ref // Incompatible class change. return NULL; } - Class* referring_class = referrer->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); if (UNLIKELY(!referring_class->CanAccess(fields_class) || !referring_class->CanAccessMember(fields_class, resolved_field->GetAccessFlags()) || @@ -184,14 +189,16 @@ static inline Field* FindFieldFast(uint32_t field_idx, const AbstractMethod* ref } // Fast path method resolution that can't throw exceptions. -static inline AbstractMethod* FindMethodFast(uint32_t method_idx, Object* this_object, - const AbstractMethod* referrer, bool access_check, InvokeType type) +static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, + mirror::Object* this_object, + const mirror::AbstractMethod* referrer, + bool access_check, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { bool is_direct = type == kStatic || type == kDirect; if (UNLIKELY(this_object == NULL && !is_direct)) { return NULL; } - AbstractMethod* resolved_method = + mirror::AbstractMethod* resolved_method = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); if (UNLIKELY(resolved_method == NULL)) { return NULL; @@ -202,8 +209,8 @@ static inline AbstractMethod* FindMethodFast(uint32_t method_idx, Object* this_o if (UNLIKELY(icce)) { return NULL; } - Class* methods_class = resolved_method->GetDeclaringClass(); - Class* referring_class = referrer->GetDeclaringClass(); + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); if (UNLIKELY(!referring_class->CanAccess(methods_class) || !referring_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags()))) { @@ -224,17 +231,20 @@ static inline AbstractMethod* FindMethodFast(uint32_t method_idx, Object* this_o } } -extern AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) +extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -extern Class* ResolveVerifyAndClinit(uint32_t type_idx, const AbstractMethod* referrer, Thread* self, - bool can_run_clinit, bool verify_access) +extern mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, + const mirror::AbstractMethod* referrer, Thread* self, + bool can_run_clinit, bool verify_access) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -static inline String* ResolveStringFromCode(const AbstractMethod* referrer, uint32_t string_idx) +static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, + uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); return class_linker->ResolveString(string_idx, referrer); @@ -244,7 +254,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_) { // Save any pending exception over monitor exit call. - Throwable* saved_exception = NULL; + mirror::Throwable* saved_exception = NULL; if (UNLIKELY(self->IsExceptionPending())) { saved_exception = self->GetException(); self->ClearException(); @@ -263,7 +273,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) } } -static inline void CheckReferenceResult(Object* o, Thread* self) +static inline void CheckReferenceResult(mirror::Object* o, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (o == NULL) { return; @@ -273,9 +283,9 @@ static inline void CheckReferenceResult(Object* o, Thread* self) PrettyMethod(self->GetCurrentMethod()).c_str()); } // Make sure that the result is an instance of the type this method was expected to return. - AbstractMethod* m = self->GetCurrentMethod(); + mirror::AbstractMethod* m = self->GetCurrentMethod(); MethodHelper mh(m); - Class* return_type = mh.GetReturnType(); + mirror::Class* return_type = mh.GetReturnType(); if (!o->InstanceOf(return_type)) { JniAbortF(NULL, "attempt to return an instance of %s from %s", diff --git a/src/scoped_thread_state_change.h b/src/scoped_thread_state_change.h index 5eabbdf09f..80d47c5528 100644 --- a/src/scoped_thread_state_change.h +++ b/src/scoped_thread_state_change.h @@ -165,7 +165,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { * passed in), or NULL on failure. */ template - T AddLocalReference(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. if (obj == NULL) { return NULL; @@ -208,7 +208,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { return down_cast(Self()->DecodeJObject(obj)); } - Field* DecodeField(jfieldID fid) const + mirror::Field* DecodeField(jfieldID fid) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -218,10 +218,10 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { // TODO: we should make these unique weak globals if Field instances can ever move. UNIMPLEMENTED(WARNING); #endif - return reinterpret_cast(fid); + return reinterpret_cast(fid); } - jfieldID EncodeField(Field* field) const + jfieldID EncodeField(mirror::Field* field) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -233,7 +233,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { return reinterpret_cast(field); } - AbstractMethod* DecodeMethod(jmethodID mid) const + mirror::AbstractMethod* DecodeMethod(jmethodID mid) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -243,10 +243,10 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { // TODO: we should make these unique weak globals if Method instances can ever move. UNIMPLEMENTED(WARNING); #endif - return reinterpret_cast(mid); + return reinterpret_cast(mid); } - jmethodID EncodeMethod(AbstractMethod* method) const + jmethodID EncodeMethod(mirror::AbstractMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. diff --git a/src/stack.cc b/src/stack.cc index 2e1f4ae311..c998f2a4dd 100644 --- a/src/stack.cc +++ b/src/stack.cc @@ -18,7 +18,10 @@ #include "compiler.h" #include "oat/runtime/context.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "thread_list.h" @@ -39,7 +42,7 @@ size_t ManagedStack::NumJniShadowFrameReferences() const { return count; } -bool ManagedStack::ShadowFramesContain(Object** shadow_frame_entry) const { +bool ManagedStack::ShadowFramesContain(mirror::Object** shadow_frame_entry) const { for (const ManagedStack* current_fragment = this; current_fragment != NULL; current_fragment = current_fragment->GetLink()) { for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL; @@ -74,7 +77,7 @@ size_t StackVisitor::GetNativePcOffset() const { return GetMethod()->NativePcOffset(cur_quick_frame_pc_); } -uint32_t StackVisitor::GetVReg(AbstractMethod* m, uint16_t vreg, VRegKind kind) const { +uint32_t StackVisitor::GetVReg(mirror::AbstractMethod* m, uint16_t vreg, VRegKind kind) const { if (cur_quick_frame_ != NULL) { DCHECK(context_ != NULL); // You can't reliably read registers without a context. DCHECK(m == GetMethod()); @@ -98,7 +101,8 @@ uint32_t StackVisitor::GetVReg(AbstractMethod* m, uint16_t vreg, VRegKind kind) } } -void StackVisitor::SetVReg(AbstractMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) { +void StackVisitor::SetVReg(mirror::AbstractMethod* m, uint16_t vreg, uint32_t new_value, + VRegKind kind) { if (cur_quick_frame_ != NULL) { DCHECK(context_ != NULL); // You can't reliably write registers without a context. DCHECK(m == GetMethod()); @@ -136,14 +140,14 @@ void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) { } uintptr_t StackVisitor::GetReturnPc() const { - AbstractMethod** sp = GetCurrentQuickFrame(); + mirror::AbstractMethod** sp = GetCurrentQuickFrame(); DCHECK(sp != NULL); byte* pc_addr = reinterpret_cast(sp) + GetMethod()->GetReturnPcOffsetInBytes(); return *reinterpret_cast(pc_addr); } void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) { - AbstractMethod** sp = GetCurrentQuickFrame(); + mirror::AbstractMethod** sp = GetCurrentQuickFrame(); CHECK(sp != NULL); byte* pc_addr = reinterpret_cast(sp) + GetMethod()->GetReturnPcOffsetInBytes(); *reinterpret_cast(pc_addr) = new_ret_pc; @@ -182,7 +186,7 @@ void StackVisitor::DescribeStack(Thread* thread) { std::string StackVisitor::DescribeLocation() const { std::string result("Visiting method '"); - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m == NULL) { return "upcall"; } @@ -200,9 +204,9 @@ InstrumentationStackFrame StackVisitor::GetInstrumentationStackFrame(uint32_t de void StackVisitor::SanityCheckFrame() const { #ifndef NDEBUG - AbstractMethod* method = GetMethod(); - CHECK(method->GetClass() == AbstractMethod::GetMethodClass() || - method->GetClass() == AbstractMethod::GetConstructorClass()); + mirror::AbstractMethod* method = GetMethod(); + CHECK(method->GetClass() == mirror::AbstractMethod::GetMethodClass() || + method->GetClass() == mirror::AbstractMethod::GetConstructorClass()); if (cur_quick_frame_ != NULL) { method->AssertPcIsWithinCode(cur_quick_frame_pc_); // Frame sanity. @@ -229,7 +233,7 @@ void StackVisitor::WalkStack(bool include_transitions) { if (cur_quick_frame_ != NULL) { // Handle quick stack frames. // Can't be both a shadow and a quick fragment. DCHECK(current_fragment->GetTopShadowFrame() == NULL); - AbstractMethod* method = *cur_quick_frame_; + mirror::AbstractMethod* method = *cur_quick_frame_; while (method != NULL) { SanityCheckFrame(); bool should_continue = VisitFrame(); @@ -259,7 +263,7 @@ void StackVisitor::WalkStack(bool include_transitions) { } cur_quick_frame_pc_ = return_pc; byte* next_frame = reinterpret_cast(cur_quick_frame_) + frame_size; - cur_quick_frame_ = reinterpret_cast(next_frame); + cur_quick_frame_ = reinterpret_cast(next_frame); cur_depth_++; method = *cur_quick_frame_; } diff --git a/src/stack.h b/src/stack.h index 8d0efe9e33..c3d20f5ef6 100644 --- a/src/stack.h +++ b/src/stack.h @@ -17,11 +17,9 @@ #ifndef ART_SRC_STACK_H_ #define ART_SRC_STACK_H_ -#include "base/macros.h" #include "dex_file.h" -#include "heap.h" #include "instrumentation.h" -#include "jni.h" +#include "base/macros.h" #include "oat/runtime/context.h" #include @@ -29,9 +27,12 @@ namespace art { +namespace mirror { class AbstractMethod; -class Context; class Object; +} // namespace mirror + +class Context; class ShadowFrame; class StackIndirectReferenceTable; class ScopedObjectAccess; @@ -59,10 +60,10 @@ class ShadowFrame { public: // Create ShadowFrame for interpreter. static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link, - AbstractMethod* method, uint32_t dex_pc) { + mirror::AbstractMethod* method, uint32_t dex_pc) { size_t sz = sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + - (sizeof(Object*) * num_vregs); + (sizeof(mirror::Object*) * num_vregs); uint8_t* memory = new uint8_t[sz]; ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); return sf; @@ -121,13 +122,13 @@ class ShadowFrame { return *reinterpret_cast(vreg); } - Object* GetVRegReference(size_t i) const { + mirror::Object* GetVRegReference(size_t i) const { DCHECK_LT(i, NumberOfVRegs()); if (HasReferenceArray()) { return References()[i]; } else { const uint32_t* vreg = &vregs_[i]; - return *reinterpret_cast(vreg); + return *reinterpret_cast(vreg); } } @@ -153,26 +154,26 @@ class ShadowFrame { *reinterpret_cast(vreg) = val; } - void SetVRegReference(size_t i, Object* val) { + void SetVRegReference(size_t i, mirror::Object* val) { DCHECK_LT(i, NumberOfVRegs()); uint32_t* vreg = &vregs_[i]; - *reinterpret_cast(vreg) = val; + *reinterpret_cast(vreg) = val; if (HasReferenceArray()) { References()[i] = val; } } - AbstractMethod* GetMethod() const { + mirror::AbstractMethod* GetMethod() const { DCHECK_NE(method_, static_cast(NULL)); return method_; } - void SetMethod(AbstractMethod* method) { + void SetMethod(mirror::AbstractMethod* method) { DCHECK_NE(method, static_cast(NULL)); method_ = method; } - bool Contains(Object** shadow_frame_entry_obj) const { + bool Contains(mirror::Object** shadow_frame_entry_obj) const { if (HasReferenceArray()) { return ((&References()[0] <= shadow_frame_entry_obj) && (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); @@ -204,8 +205,8 @@ class ShadowFrame { } private: - ShadowFrame(uint32_t num_vregs, ShadowFrame* link, AbstractMethod* method, uint32_t dex_pc, - bool has_reference_array) + ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::AbstractMethod* method, + uint32_t dex_pc, bool has_reference_array) : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) { CHECK_LT(num_vregs, static_cast(kHasReferenceArray)); if (has_reference_array) { @@ -220,14 +221,14 @@ class ShadowFrame { } } - Object* const* References() const { + mirror::Object* const* References() const { DCHECK(HasReferenceArray()); const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; - return reinterpret_cast(vreg_end); + return reinterpret_cast(vreg_end); } - Object** References() { - return const_cast(const_cast(this)->References()); + mirror::Object** References() { + return const_cast(const_cast(this)->References()); } enum ShadowFrameFlag { @@ -237,7 +238,7 @@ class ShadowFrame { uint32_t number_of_vregs_; // Link to previous shadow frame or NULL. ShadowFrame* link_; - AbstractMethod* method_; + mirror::AbstractMethod* method_; uint32_t dex_pc_; uint32_t vregs_[0]; @@ -272,11 +273,11 @@ class PACKED(4) ManagedStack { return link_; } - AbstractMethod** GetTopQuickFrame() const { + mirror::AbstractMethod** GetTopQuickFrame() const { return top_quick_frame_; } - void SetTopQuickFrame(AbstractMethod** top) { + void SetTopQuickFrame(mirror::AbstractMethod** top) { top_quick_frame_ = top; } @@ -320,12 +321,12 @@ class PACKED(4) ManagedStack { size_t NumJniShadowFrameReferences() const; - bool ShadowFramesContain(Object** shadow_frame_entry) const; + bool ShadowFramesContain(mirror::Object** shadow_frame_entry) const; private: ManagedStack* link_; ShadowFrame* top_shadow_frame_; - AbstractMethod** top_quick_frame_; + mirror::AbstractMethod** top_quick_frame_; uintptr_t top_quick_frame_pc_; }; @@ -342,7 +343,7 @@ class StackVisitor { void WalkStack(bool include_transitions = false) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - AbstractMethod* GetMethod() const { + mirror::AbstractMethod* GetMethod() const { if (cur_shadow_frame_ != NULL) { return cur_shadow_frame_->GetMethod(); } else if (cur_quick_frame_ != NULL) { @@ -388,16 +389,16 @@ class StackVisitor { return num_frames_; } - uint32_t GetVReg(AbstractMethod* m, uint16_t vreg, VRegKind kind) const + uint32_t GetVReg(mirror::AbstractMethod* m, uint16_t vreg, VRegKind kind) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetVReg(AbstractMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) + void SetVReg(mirror::AbstractMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uintptr_t GetGPR(uint32_t reg) const; void SetGPR(uint32_t reg, uintptr_t value); - uint32_t GetVReg(AbstractMethod** cur_quick_frame, const DexFile::CodeItem* code_item, + uint32_t GetVReg(mirror::AbstractMethod** cur_quick_frame, const DexFile::CodeItem* code_item, uint32_t core_spills, uint32_t fp_spills, size_t frame_size, uint16_t vreg) const { int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg); @@ -471,7 +472,7 @@ class StackVisitor { return cur_quick_frame_pc_; } - AbstractMethod** GetCurrentQuickFrame() const { + mirror::AbstractMethod** GetCurrentQuickFrame() const { return cur_quick_frame_; } @@ -480,7 +481,7 @@ class StackVisitor { } StackIndirectReferenceTable* GetCurrentSirt() const { - AbstractMethod** sp = GetCurrentQuickFrame(); + mirror::AbstractMethod** sp = GetCurrentQuickFrame(); ++sp; // Skip Method*; SIRT comes next; return reinterpret_cast(sp); } @@ -499,7 +500,7 @@ class StackVisitor { Thread* const thread_; ShadowFrame* cur_shadow_frame_; - AbstractMethod** cur_quick_frame_; + mirror::AbstractMethod** cur_quick_frame_; uintptr_t cur_quick_frame_pc_; // Lazily computed, number of frames in the stack. size_t num_frames_; diff --git a/src/stack_indirect_reference_table.h b/src/stack_indirect_reference_table.h index 92fb00367f..dd106344de 100644 --- a/src/stack_indirect_reference_table.h +++ b/src/stack_indirect_reference_table.h @@ -21,8 +21,9 @@ #include "base/macros.h" namespace art { - +namespace mirror { class Object; +} class Thread; // Stack allocated indirect reference table. It can allocated within @@ -30,7 +31,8 @@ class Thread; // storage or manually allocated by SirtRef to hold one reference. class StackIndirectReferenceTable { public: - explicit StackIndirectReferenceTable(Object* object) : number_of_references_(1), link_(NULL) { + explicit StackIndirectReferenceTable(mirror::Object* object) : + number_of_references_(1), link_(NULL) { references_[0] = object; } @@ -51,17 +53,17 @@ class StackIndirectReferenceTable { link_ = sirt; } - Object* GetReference(size_t i) const { + mirror::Object* GetReference(size_t i) const { DCHECK_LT(i, number_of_references_); return references_[i]; } - void SetReference(size_t i, Object* object) { + void SetReference(size_t i, mirror::Object* object) { DCHECK_LT(i, number_of_references_); references_[i] = object; } - bool Contains(Object** sirt_entry) const { + bool Contains(mirror::Object** sirt_entry) const { // A SIRT should always contain something. One created by the // jni_compiler should have a jobject/jclass as a native method is // passed in a this pointer or a class @@ -87,7 +89,7 @@ class StackIndirectReferenceTable { StackIndirectReferenceTable* link_; // number_of_references_ are available if this is allocated and filled in by jni_compiler. - Object* references_[1]; + mirror::Object* references_[1]; DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable); }; diff --git a/src/thread.cc b/src/thread.cc index 46cba06dc3..01d6072f12 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -30,16 +30,22 @@ #include "base/mutex.h" #include "class_linker.h" -#include "class_loader.h" +#include "class_linker-inl.h" #include "cutils/atomic.h" #include "cutils/atomic-inline.h" #include "debugger.h" #include "gc_map.h" +#include "gc/card_table-inl.h" #include "heap.h" #include "jni_internal.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/stack_trace_element.h" #include "monitor.h" #include "oat/runtime/context.h" -#include "object.h" #include "object_utils.h" #include "reflection.h" #include "runtime.h" @@ -139,20 +145,20 @@ void* Thread::CreateCallback(void* arg) { // Copy peer into self, deleting global reference when done. CHECK(self->jpeer_ != NULL); - self->opeer_ = soa.Decode(self->jpeer_); + self->opeer_ = soa.Decode(self->jpeer_); self->GetJniEnv()->DeleteGlobalRef(self->jpeer_); self->jpeer_ = NULL; { - SirtRef thread_name(self, self->GetThreadName(soa)); + SirtRef thread_name(self, self->GetThreadName(soa)); self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); } Dbg::PostThreadStart(self); // Invoke the 'run' method of our java.lang.Thread. - Object* receiver = self->opeer_; + mirror::Object* receiver = self->opeer_; jmethodID mid = WellKnownClasses::java_lang_Thread_run; - AbstractMethod* m = + mirror::AbstractMethod* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid)); m->Invoke(self, receiver, NULL, NULL); } @@ -162,8 +168,9 @@ void* Thread::CreateCallback(void* arg) { return NULL; } -Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object* thread_peer) { - Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); +Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, + mirror::Object* thread_peer) { + mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); Thread* result = reinterpret_cast(static_cast(f->GetInt(thread_peer))); // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ // to stop it from going away. @@ -177,7 +184,7 @@ Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object } Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) { - return FromManagedThread(soa, soa.Decode(java_thread)); + return FromManagedThread(soa, soa.Decode(java_thread)); } static size_t FixStackSize(size_t stack_size) { @@ -391,7 +398,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) } { ScopedObjectAccess soa(this); - opeer_ = soa.Decode(peer.get()); + opeer_ = soa.Decode(peer.get()); } env->CallNonvirtualVoidMethod(peer.get(), WellKnownClasses::java_lang_Thread, @@ -405,7 +412,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) reinterpret_cast(self)); ScopedObjectAccess soa(self); - SirtRef peer_thread_name(soa.Self(), GetThreadName(soa)); + SirtRef peer_thread_name(soa.Self(), GetThreadName(soa)); if (peer_thread_name.get() == NULL) { // The Thread constructor should have set the Thread.name to a // non-null value. However, because we can run without code @@ -414,9 +421,9 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)-> SetBoolean(opeer_, thread_is_daemon); soa.DecodeField(WellKnownClasses::java_lang_Thread_group)-> - SetObject(opeer_, soa.Decode(thread_group)); + SetObject(opeer_, soa.Decode(thread_group)); soa.DecodeField(WellKnownClasses::java_lang_Thread_name)-> - SetObject(opeer_, soa.Decode(thread_name.get())); + SetObject(opeer_, soa.Decode(thread_name.get())); soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)-> SetInt(opeer_, thread_priority); peer_thread_name.reset(GetThreadName(soa)); @@ -506,9 +513,9 @@ void Thread::Dump(std::ostream& os) const { DumpStack(os); } -String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const { - Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); - return (opeer_ != NULL) ? reinterpret_cast(f->GetObject(opeer_)) : NULL; +mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const { + mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); + return (opeer_ != NULL) ? reinterpret_cast(f->GetObject(opeer_)) : NULL; } void Thread::GetThreadName(std::string& name) const { @@ -748,12 +755,14 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_); is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_); - Object* thread_group = + mirror::Object* thread_group = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_); if (thread_group != NULL) { - Field* group_name_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); - String* group_name_string = reinterpret_cast(group_name_field->GetObject(thread_group)); + mirror::Field* group_name_field = + soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); + mirror::String* group_name_string = + reinterpret_cast(group_name_field->GetObject(thread_group)); group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : ""; } } else { @@ -848,13 +857,13 @@ struct StackDumpVisitor : public StackVisitor { } bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; } const int kMaxRepetition = 3; - Class* c = m->GetDeclaringClass(); - const DexCache* dex_cache = c->GetDexCache(); + mirror::Class* c = m->GetDeclaringClass(); + const mirror::DexCache* dex_cache = c->GetDexCache(); int line_number = -1; if (dex_cache != NULL) { // be tolerant of bad input const DexFile& dex_file = *dex_cache->GetDexFile(); @@ -893,7 +902,7 @@ struct StackDumpVisitor : public StackVisitor { return true; } - static void DumpLockedObject(Object* o, void* context) + static void DumpLockedObject(mirror::Object* o, void* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::ostream& os = *reinterpret_cast(context); os << " - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n"; @@ -903,7 +912,7 @@ struct StackDumpVisitor : public StackVisitor { const Thread* thread; bool can_allocate; MethodHelper mh; - AbstractMethod* last_method; + mirror::AbstractMethod* last_method; int last_line_number; int repetition_count; int frame_count; @@ -1016,14 +1025,14 @@ bool Thread::IsStillStarting() const { void Thread::AssertNoPendingException() const { if (UNLIKELY(IsExceptionPending())) { ScopedObjectAccess soa(Thread::Current()); - Throwable* exception = GetException(); + mirror::Throwable* exception = GetException(); LOG(FATAL) << "No pending exception expected: " << exception->Dump(); } } -static void MonitorExitVisitor(const Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS { +static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS { Thread* self = reinterpret_cast(arg); - Object* entered_monitor = const_cast(object); + mirror::Object* entered_monitor = const_cast(object); if (self->HoldsLock(entered_monitor)) { LOG(WARNING) << "Calling MonitorExit on object " << object << " (" << PrettyTypeOf(object) << ")" @@ -1049,7 +1058,8 @@ void Thread::Destroy() { // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone // who is waiting. - Object* lock = soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_); + mirror::Object* lock = + soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_); // (This conditional is only needed for tests, where Thread.lock won't have been set.) if (lock != NULL) { lock->MonitorEnter(self); @@ -1125,7 +1135,7 @@ void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) { void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) { // this.group.removeThread(this); // group can be null if we're in the compiler or a test. - Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_); + mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_); if (ogroup != NULL) { ScopedLocalRef group(soa.Env(), soa.AddLocalReference(ogroup)); ScopedLocalRef peer(soa.Env(), soa.AddLocalReference(opeer_)); @@ -1144,7 +1154,7 @@ size_t Thread::NumSirtReferences() { } bool Thread::SirtContains(jobject obj) const { - Object** sirt_entry = reinterpret_cast(obj); + mirror::Object** sirt_entry = reinterpret_cast(obj); for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { if (cur->Contains(sirt_entry)) { return true; @@ -1154,11 +1164,11 @@ bool Thread::SirtContains(jobject obj) const { return managed_stack_.ShadowFramesContain(sirt_entry); } -void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) { +void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) { for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { size_t num_refs = cur->NumberOfReferences(); for (size_t j = 0; j < num_refs; j++) { - Object* object = cur->GetReference(j); + mirror::Object* object = cur->GetReference(j); if (object != NULL) { visitor(object, arg); } @@ -1166,19 +1176,19 @@ void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) { } } -Object* Thread::DecodeJObject(jobject obj) const { +mirror::Object* Thread::DecodeJObject(jobject obj) const { Locks::mutator_lock_->AssertSharedHeld(this); if (obj == NULL) { return NULL; } IndirectRef ref = reinterpret_cast(obj); IndirectRefKind kind = GetIndirectRefKind(ref); - Object* result; + mirror::Object* result; switch (kind) { case kLocal: { IndirectReferenceTable& locals = jni_env_->locals; - result = const_cast(locals.Get(ref)); + result = const_cast(locals.Get(ref)); break; } case kGlobal: @@ -1186,7 +1196,7 @@ Object* Thread::DecodeJObject(jobject obj) const { JavaVMExt* vm = Runtime::Current()->GetJavaVM(); IndirectReferenceTable& globals = vm->globals; MutexLock mu(const_cast(this), vm->globals_lock); - result = const_cast(globals.Get(ref)); + result = const_cast(globals.Get(ref)); break; } case kWeakGlobal: @@ -1194,7 +1204,7 @@ Object* Thread::DecodeJObject(jobject obj) const { JavaVMExt* vm = Runtime::Current()->GetJavaVM(); IndirectReferenceTable& weak_globals = vm->weak_globals; MutexLock mu(const_cast(this), vm->weak_globals_lock); - result = const_cast(weak_globals.Get(ref)); + result = const_cast(weak_globals.Get(ref)); if (result == kClearedJniWeakGlobal) { // This is a special case where it's okay to return NULL. return NULL; @@ -1206,10 +1216,10 @@ Object* Thread::DecodeJObject(jobject obj) const { // TODO: make stack indirect reference table lookup more efficient // Check if this is a local reference in the SIRT if (SirtContains(obj)) { - result = *reinterpret_cast(obj); // Read from SIRT + result = *reinterpret_cast(obj); // Read from SIRT } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) { // Assume an invalid local reference is actually a direct pointer. - result = reinterpret_cast(obj); + result = reinterpret_cast(obj); } else { result = kInvalidIndirectRefObject; } @@ -1272,9 +1282,9 @@ class CountStackDepthVisitor : public StackVisitor { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (skipping_ && !m->IsRuntimeMethod() && - !Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { + !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { skipping_ = false; } if (!skipping_) { @@ -1310,14 +1320,14 @@ class BuildInternalStackTraceVisitor : public StackVisitor { bool Init(int depth) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Allocate method trace with an extra slot that will hold the PC trace - SirtRef > + SirtRef > method_trace(self_, - Runtime::Current()->GetClassLinker()->AllocObjectArray(self_, - depth + 1)); + Runtime::Current()->GetClassLinker()->AllocObjectArray(self_, + depth + 1)); if (method_trace.get() == NULL) { return false; } - IntArray* dex_pc_trace = IntArray::Alloc(self_, depth); + mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth); if (dex_pc_trace == NULL) { return false; } @@ -1347,7 +1357,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { skip_depth_--; return true; } - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; // Ignore runtime frames (in particular callee save). } @@ -1357,7 +1367,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { return true; } - ObjectArray* GetInternalStackTrace() const { + mirror::ObjectArray* GetInternalStackTrace() const { return method_trace_; } @@ -1368,9 +1378,9 @@ class BuildInternalStackTraceVisitor : public StackVisitor { // Current position down stack trace. uint32_t count_; // Array of dex PC values. - IntArray* dex_pc_trace_; + mirror::IntArray* dex_pc_trace_; // An array of the methods on the stack, the last entry is a reference to the PC trace. - ObjectArray* method_trace_; + mirror::ObjectArray* method_trace_; }; jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const { @@ -1387,7 +1397,7 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) return NULL; // Allocation failed. } build_trace_visitor.WalkStack(); - ObjectArray* trace = build_trace_visitor.GetInternalStackTrace(); + mirror::ObjectArray* trace = build_trace_visitor.GetInternalStackTrace(); if (kIsDebugBuild) { for (int32_t i = 0; i < trace->GetLength(); ++i) { CHECK(trace->Get(i) != NULL); @@ -1401,18 +1411,19 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job // Transition into runnable state to work on Object*/Array* ScopedObjectAccess soa(env); // Decode the internal stack trace into the depth, method trace and PC trace - ObjectArray* method_trace = soa.Decode*>(internal); + mirror::ObjectArray* method_trace = + soa.Decode*>(internal); int32_t depth = method_trace->GetLength() - 1; - IntArray* pc_trace = down_cast(method_trace->Get(depth)); + mirror::IntArray* pc_trace = down_cast(method_trace->Get(depth)); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); jobjectArray result; - ObjectArray* java_traces; + mirror::ObjectArray* java_traces; if (output_array != NULL) { // Reuse the array we were given. result = output_array; - java_traces = soa.Decode*>(output_array); + java_traces = soa.Decode*>(output_array); // ...adjusting the number of frames we'll write to not exceed the array length. depth = std::min(depth, java_traces->GetLength()); } else { @@ -1431,7 +1442,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job MethodHelper mh; for (int32_t i = 0; i < depth; ++i) { // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) - AbstractMethod* method = down_cast(method_trace->Get(i)); + mirror::AbstractMethod* method = down_cast(method_trace->Get(i)); mh.ChangeMethod(method); uint32_t dex_pc = pc_trace->Get(i); int32_t line_number = mh.GetLineNumFromDexPC(dex_pc); @@ -1440,27 +1451,28 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job const char* descriptor = mh.GetDeclaringClassDescriptor(); CHECK(descriptor != NULL); std::string class_name(PrettyDescriptor(descriptor)); - SirtRef class_name_object(soa.Self(), - String::AllocFromModifiedUtf8(soa.Self(), - class_name.c_str())); + SirtRef class_name_object(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), + class_name.c_str())); if (class_name_object.get() == NULL) { return NULL; } const char* method_name = mh.GetName(); CHECK(method_name != NULL); - SirtRef method_name_object(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), - method_name)); + SirtRef method_name_object(soa.Self(), + mirror::String::AllocFromModifiedUtf8(soa.Self(), + method_name)); if (method_name_object.get() == NULL) { return NULL; } const char* source_file = mh.GetDeclaringClassSourceFile(); - SirtRef source_name_object(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), - source_file)); - StackTraceElement* obj = StackTraceElement::Alloc(soa.Self(), - class_name_object.get(), - method_name_object.get(), - source_name_object.get(), - line_number); + SirtRef source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), + source_file)); + mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(), + class_name_object.get(), + method_name_object.get(), + source_name_object.get(), + line_number); if (obj == NULL) { return NULL; } @@ -1520,10 +1532,11 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, co env, reinterpret_cast(env->AllocObject(exception_class.get()))); if (exception.get() != NULL) { ScopedObjectAccessUnchecked soa(env); - Throwable* t = reinterpret_cast(soa.Self()->DecodeJObject(exception.get())); - t->SetDetailMessage(String::AllocFromModifiedUtf8(soa.Self(), msg)); + mirror::Throwable* t = + reinterpret_cast(soa.Self()->DecodeJObject(exception.get())); + t->SetDetailMessage(mirror::String::AllocFromModifiedUtf8(soa.Self(), msg)); if (cause != NULL) { - t->SetCause(soa.Decode(cause)); + t->SetCause(soa.Decode(cause)); } soa.Self()->SetException(t); } else { @@ -1691,7 +1704,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_ static const bool kDebugExceptionDelivery = false; class CatchBlockStackVisitor : public StackVisitor { public: - CatchBlockStackVisitor(Thread* self, Throwable* exception) + CatchBlockStackVisitor(Thread* self, mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(self, self->GetLongJumpContext()), self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL), @@ -1708,7 +1721,7 @@ class CatchBlockStackVisitor : public StackVisitor { bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = GetMethod(); + mirror::AbstractMethod* method = GetMethod(); if (method == NULL) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. handler_quick_frame_pc_ = GetCurrentQuickFramePc(); @@ -1751,7 +1764,7 @@ class CatchBlockStackVisitor : public StackVisitor { } void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* catch_method = *handler_quick_frame_; + mirror::AbstractMethod* catch_method = *handler_quick_frame_; if (kDebugExceptionDelivery) { if (catch_method == NULL) { LOG(INFO) << "Handler is upcall"; @@ -1777,14 +1790,14 @@ class CatchBlockStackVisitor : public StackVisitor { private: Thread* self_; - Throwable* exception_; + mirror::Throwable* exception_; // The type of the exception catch block to find. - Class* to_find_; - AbstractMethod* throw_method_; + mirror::Class* to_find_; + mirror::AbstractMethod* throw_method_; JDWP::FrameId throw_frame_id_; uint32_t throw_dex_pc_; // Quick frame with found handler or last frame if no handler found. - AbstractMethod** handler_quick_frame_; + mirror::AbstractMethod** handler_quick_frame_; // PC to branch to for the handler. uintptr_t handler_quick_frame_pc_; // Associated dex PC. @@ -1798,13 +1811,13 @@ class CatchBlockStackVisitor : public StackVisitor { }; void Thread::QuickDeliverException() { - Throwable* exception = GetException(); // Get exception from thread + mirror::Throwable* exception = GetException(); // Get exception from thread CHECK(exception != NULL); // Don't leave exception visible while we try to find the handler, which may cause class // resolution. ClearException(); if (kDebugExceptionDelivery) { - String* msg = exception->GetDetailMessage(); + mirror::String* msg = exception->GetDetailMessage(); std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) << ": " << str_msg << "\n"); @@ -1826,14 +1839,14 @@ Context* Thread::GetLongJumpContext() { return result; } -AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const { +mirror::AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const { struct CurrentMethodVisitor : public StackVisitor { CurrentMethodVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(thread, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {} virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. return true; @@ -1843,7 +1856,7 @@ AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) con frame_id_ = GetFrameId(); return false; } - AbstractMethod* method_; + mirror::AbstractMethod* method_; uint32_t dex_pc_; size_t frame_id_; }; @@ -1859,7 +1872,7 @@ AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) con return visitor.method_; } -bool Thread::HoldsLock(Object* object) { +bool Thread::HoldsLock(mirror::Object* object) { if (object == NULL) { return false; } @@ -1881,12 +1894,12 @@ class ReferenceMapVisitor : public StackVisitor { } ShadowFrame* shadow_frame = GetCurrentShadowFrame(); if (shadow_frame != NULL) { - AbstractMethod* m = shadow_frame->GetMethod(); + mirror::AbstractMethod* m = shadow_frame->GetMethod(); size_t num_regs = shadow_frame->NumberOfVRegs(); if (m->IsNative() || shadow_frame->HasReferenceArray()) { // SIRT for JNI or References for interpreter. for (size_t reg = 0; reg < num_regs; ++reg) { - Object* ref = shadow_frame->GetVRegReference(reg); + mirror::Object* ref = shadow_frame->GetVRegReference(reg); if (ref != NULL) { visitor_(ref, reg, this); } @@ -1907,7 +1920,7 @@ class ReferenceMapVisitor : public StackVisitor { num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs); for (size_t reg = 0; reg < num_regs; ++reg) { if (TestBitmap(reg, reg_bitmap)) { - Object* ref = shadow_frame->GetVRegReference(reg); + mirror::Object* ref = shadow_frame->GetVRegReference(reg); if (ref != NULL) { visitor_(ref, reg, this); } @@ -1915,7 +1928,7 @@ class ReferenceMapVisitor : public StackVisitor { } } } else { - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); // Process register map (which native and runtime methods don't have) if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { const uint8_t* native_gc_map = m->GetNativeGcMap(); @@ -1934,20 +1947,21 @@ class ReferenceMapVisitor : public StackVisitor { uint32_t fp_spills = m->GetFpSpillMask(); size_t frame_size = m->GetFrameSizeInBytes(); // For all dex registers in the bitmap - AbstractMethod** cur_quick_frame = GetCurrentQuickFrame(); + mirror::AbstractMethod** cur_quick_frame = GetCurrentQuickFrame(); DCHECK(cur_quick_frame != NULL); for (size_t reg = 0; reg < num_regs; ++reg) { // Does this register hold a reference? if (TestBitmap(reg, reg_bitmap)) { uint32_t vmap_offset; - Object* ref; + mirror::Object* ref; if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) { uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg)); - ref = reinterpret_cast(val); + ref = reinterpret_cast(val); } else { - ref = reinterpret_cast(GetVReg(cur_quick_frame, code_item, core_spills, - fp_spills, frame_size, reg)); + ref = reinterpret_cast(GetVReg(cur_quick_frame, code_item, + core_spills, fp_spills, frame_size, + reg)); } if (ref != NULL) { @@ -1975,46 +1989,46 @@ class ReferenceMapVisitor : public StackVisitor { class RootCallbackVisitor { public: - RootCallbackVisitor(Heap::RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) { + RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) { } - void operator()(const Object* obj, size_t, const StackVisitor*) const { + void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const { visitor_(obj, arg_); } private: - Heap::RootVisitor* visitor_; + RootVisitor* visitor_; void* arg_; }; class VerifyCallbackVisitor { public: - VerifyCallbackVisitor(Heap::VerifyRootVisitor* visitor, void* arg) + VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) { } - void operator()(const Object* obj, size_t vreg, const StackVisitor* visitor) const { + void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const { visitor_(obj, arg_, vreg, visitor); } private: - Heap::VerifyRootVisitor* const visitor_; + VerifyRootVisitor* const visitor_; void* const arg_; }; struct VerifyRootWrapperArg { - Heap::VerifyRootVisitor* visitor; + VerifyRootVisitor* visitor; void* arg; }; -static void VerifyRootWrapperCallback(const Object* root, void* arg) { +static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) { VerifyRootWrapperArg* wrapperArg = reinterpret_cast(arg); wrapperArg->visitor(root, wrapperArg->arg, 0, NULL); } -void Thread::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) { +void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) { // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we // don't have. VerifyRootWrapperArg wrapperArg; @@ -2043,7 +2057,7 @@ void Thread::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) { ReleaseLongJumpContext(context); } -void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) { +void Thread::VisitRoots(RootVisitor* visitor, void* arg) { if (opeer_ != NULL) { visitor(opeer_, arg); } diff --git a/src/thread.h b/src/thread.h index 13e1cab058..c63fddf3d1 100644 --- a/src/thread.h +++ b/src/thread.h @@ -31,19 +31,30 @@ #include "oat/runtime/oat_support_entrypoints.h" #include "locks.h" #include "offsets.h" +#include "root_visitor.h" #include "runtime_stats.h" #include "stack.h" #include "stack_indirect_reference_table.h" +#include "thread_state.h" #include "UniquePtr.h" namespace art { +namespace mirror { class AbstractMethod; class Array; -class BaseMutex; class Class; -class ClassLinker; class ClassLoader; +class Object; +template class ObjectArray; +template class PrimitiveArray; +typedef PrimitiveArray IntArray; +class StackTraceElement; +class StaticStorageBase; +class Throwable; +} // namespace mirror +class BaseMutex; +class ClassLinker; class Closure; class Context; struct DebugInvokeReq; @@ -51,20 +62,12 @@ class DexFile; struct JavaVMExt; struct JNIEnvExt; class Monitor; -class Object; class Runtime; class ScopedObjectAccess; class ScopedObjectAccessUnchecked; class ShadowFrame; -class StackTraceElement; -class StaticStorageBase; class Thread; class ThreadList; -class Throwable; - -template class ObjectArray; -template class PrimitiveArray; -typedef PrimitiveArray IntArray; // Thread priorities. These must match the Thread.MIN_PRIORITY, // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants. @@ -74,28 +77,6 @@ enum ThreadPriority { kMaxThreadPriority = 10, }; -enum ThreadState { - // Thread.State JDWP state - kTerminated, // TERMINATED TS_ZOMBIE Thread.run has returned, but Thread* still around - kRunnable, // RUNNABLE TS_RUNNING runnable - kTimedWaiting, // TIMED_WAITING TS_WAIT in Object.wait() with a timeout - kSleeping, // TIMED_WAITING TS_SLEEPING in Thread.sleep() - kBlocked, // BLOCKED TS_MONITOR blocked on a monitor - kWaiting, // WAITING TS_WAIT in Object.wait() - kWaitingForGcToComplete, // WAITING TS_WAIT blocked waiting for GC - kWaitingPerformingGc, // WAITING TS_WAIT performing GC - kWaitingForDebuggerSend, // WAITING TS_WAIT blocked waiting for events to be sent - kWaitingForDebuggerToAttach, // WAITING TS_WAIT blocked waiting for debugger to attach - kWaitingInMainDebuggerLoop, // WAITING TS_WAIT blocking/reading/processing debugger events - kWaitingForDebuggerSuspension, // WAITING TS_WAIT waiting for debugger suspend all - kWaitingForJniOnLoad, // WAITING TS_WAIT waiting for execution of dlopen and JNI on load code - kWaitingForSignalCatcherOutput, // WAITING TS_WAIT waiting for signal catcher IO to complete - kWaitingInMainSignalCatcherLoop, // WAITING TS_WAIT blocking/reading/processing signals - kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code - kNative, // RUNNABLE TS_RUNNING running in a JNI native method - kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger -}; - enum ThreadFlag { kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the // safepoint handler. @@ -127,7 +108,8 @@ class PACKED(4) Thread { return reinterpret_cast(thread); } - static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer) + static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, + mirror::Object* thread_peer) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -245,7 +227,7 @@ class PACKED(4) Thread { return daemon_; } - bool HoldsLock(Object*); + bool HoldsLock(mirror::Object*); /* * Changes the priority of this thread to match that of the java.lang.Thread object. @@ -272,7 +254,7 @@ class PACKED(4) Thread { } // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. - String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const + mirror::String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, @@ -282,7 +264,7 @@ class PACKED(4) Thread { // Sets the thread's name. void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(jpeer_ == NULL); return opeer_; } @@ -301,13 +283,13 @@ class PACKED(4) Thread { return exception_ != NULL; } - Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return exception_; } void AssertNoPendingException() const; - void SetException(Throwable* new_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetException(mirror::Throwable* new_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(new_exception != NULL); // TODO: DCHECK(!IsExceptionPending()); exception_ = new_exception; @@ -317,7 +299,7 @@ class PACKED(4) Thread { exception_ = NULL; } - void DeliverException(Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void DeliverException(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (exception == NULL) { ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception"); } else { @@ -334,11 +316,11 @@ class PACKED(4) Thread { long_jump_context_ = context; } - AbstractMethod* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const + mirror::AbstractMethod* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetTopOfStack(void* stack, uintptr_t pc) { - AbstractMethod** top_method = reinterpret_cast(stack); + mirror::AbstractMethod** top_method = reinterpret_cast(stack); managed_stack_.SetTopQuickFrame(top_method); managed_stack_.SetTopQuickFramePc(pc); } @@ -369,7 +351,7 @@ class PACKED(4) Thread { //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc); - void* FindExceptionHandlerInMethod(const AbstractMethod* method, + void* FindExceptionHandlerInMethod(const mirror::AbstractMethod* method, void* throw_pc, const DexFile& dex_file, ClassLinker* class_linker); @@ -384,7 +366,7 @@ class PACKED(4) Thread { } // Convert a jobject into a Object* - Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implements java.lang.Thread.interrupted. bool Interrupted(); @@ -393,11 +375,11 @@ class PACKED(4) Thread { void Interrupt(); void Notify(); - ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return class_loader_override_; } - void SetClassLoaderOverride(ClassLoader* class_loader_override) { + void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) { class_loader_override_ = class_loader_override; } @@ -413,10 +395,10 @@ class PACKED(4) Thread { static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, jobjectArray output_array = NULL, int* stack_depth = NULL); - void VisitRoots(Heap::RootVisitor* visitor, void* arg) + void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) + void VerifyRoots(VerifyRootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #if VERIFY_OBJECT_ENABLED @@ -535,7 +517,7 @@ class PACKED(4) Thread { // Is the given obj in this thread's stack indirect reference table? bool SirtContains(jobject obj) const; - void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg); + void SirtVisitRoots(RootVisitor* visitor, void* arg); void PushSirt(StackIndirectReferenceTable* sirt) { sirt->SetLink(top_sirt_); @@ -692,7 +674,7 @@ class PACKED(4) Thread { byte* card_table_; // The pending exception or NULL. - Throwable* exception_; + mirror::Throwable* exception_; // The end of this thread's stack. This is the lowest safely-addressable address on the stack. // We leave extra space so there's room for the code that throws StackOverflowError. @@ -711,7 +693,7 @@ class PACKED(4) Thread { // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread // start up, until the thread is registered and the local opeer_ is used. - Object* opeer_; + mirror::Object* opeer_; jobject jpeer_; // The "lowest addressable byte" of the stack @@ -740,7 +722,7 @@ class PACKED(4) Thread { // The next thread in the wait set this thread is part of. Thread* wait_next_; // If we're blocked in MonitorEnter, this is the object we're trying to lock. - Object* monitor_enter_object_; + mirror::Object* monitor_enter_object_; friend class Monitor; friend class MonitorInfo; @@ -754,7 +736,7 @@ class PACKED(4) Thread { // Needed to get the right ClassLoader in JNI_OnLoad, but also // useful for testing. - ClassLoader* class_loader_override_; + mirror::ClassLoader* class_loader_override_; // Thread local, lazily allocated, long jump context. Used to deliver exceptions. Context* long_jump_context_; diff --git a/src/thread_list.cc b/src/thread_list.cc index 13c965cc18..ea8baacd30 100644 --- a/src/thread_list.cc +++ b/src/thread_list.cc @@ -22,6 +22,7 @@ #include "base/mutex.h" #include "debugger.h" +#include "thread.h" #include "timing_logger.h" #include "utils.h" @@ -579,14 +580,14 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) { } } -void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { +void ThreadList::VisitRoots(RootVisitor* visitor, void* arg) const { MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->VisitRoots(visitor, arg); } } -void ThreadList::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) const { +void ThreadList::VerifyRoots(VerifyRootVisitor* visitor, void* arg) const { MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->VerifyRoots(visitor, arg); diff --git a/src/thread_list.h b/src/thread_list.h index 7ded5e31a1..0470cfc3b9 100644 --- a/src/thread_list.h +++ b/src/thread_list.h @@ -18,10 +18,14 @@ #define ART_SRC_THREAD_LIST_H_ #include "base/mutex.h" -#include "thread.h" +#include "root_visitor.h" -namespace art { +#include +#include +namespace art { +class Closure; +class Thread; class TimingLogger; class ThreadList { @@ -84,10 +88,10 @@ class ThreadList { LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_); void Unregister(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_); - void VisitRoots(Heap::RootVisitor* visitor, void* arg) const + void VisitRoots(RootVisitor* visitor, void* arg) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) const + void VerifyRoots(VerifyRootVisitor* visitor, void* arg) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Return a copy of the thread list. diff --git a/src/thread_state.h b/src/thread_state.h new file mode 100644 index 0000000000..7c4a16f914 --- /dev/null +++ b/src/thread_state.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_THREAD_STATE_H_ +#define ART_SRC_THREAD_STATE_H_ + +namespace art { + +enum ThreadState { + // Thread.State JDWP state + kTerminated, // TERMINATED TS_ZOMBIE Thread.run has returned, but Thread* still around + kRunnable, // RUNNABLE TS_RUNNING runnable + kTimedWaiting, // TIMED_WAITING TS_WAIT in Object.wait() with a timeout + kSleeping, // TIMED_WAITING TS_SLEEPING in Thread.sleep() + kBlocked, // BLOCKED TS_MONITOR blocked on a monitor + kWaiting, // WAITING TS_WAIT in Object.wait() + kWaitingForGcToComplete, // WAITING TS_WAIT blocked waiting for GC + kWaitingPerformingGc, // WAITING TS_WAIT performing GC + kWaitingForDebuggerSend, // WAITING TS_WAIT blocked waiting for events to be sent + kWaitingForDebuggerToAttach, // WAITING TS_WAIT blocked waiting for debugger to attach + kWaitingInMainDebuggerLoop, // WAITING TS_WAIT blocking/reading/processing debugger events + kWaitingForDebuggerSuspension, // WAITING TS_WAIT waiting for debugger suspend all + kWaitingForJniOnLoad, // WAITING TS_WAIT waiting for execution of dlopen and JNI on load code + kWaitingForSignalCatcherOutput, // WAITING TS_WAIT waiting for signal catcher IO to complete + kWaitingInMainSignalCatcherLoop, // WAITING TS_WAIT blocking/reading/processing signals + kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code + kNative, // RUNNABLE TS_RUNNING running in a JNI native method + kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger +}; + +} // namespace art + +#endif // ART_SRC_THREAD_STATE_H_ diff --git a/src/timing_logger.cc b/src/timing_logger.cc new file mode 100644 index 0000000000..fee7a30579 --- /dev/null +++ b/src/timing_logger.cc @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "timing_logger.h" + +#include "base/logging.h" +#include "utils.h" + +#include +#include + +namespace art { + +void TimingLogger::Dump() const { + Dump(LOG(INFO)); +} + +void TimingLogger::Dump(std::ostream& os) const { + uint64_t largest_time = 0; + os << name_ << ": begin\n"; + for (size_t i = 1; i < times_.size(); ++i) { + uint64_t delta_time = times_[i] - times_[i - 1]; + largest_time = std::max(largest_time, delta_time); + } + // Compute which type of unit we will use for printing the timings. + TimeUnit tu = GetAppropriateTimeUnit(largest_time); + uint64_t divisor = GetNsToTimeUnitDivisor(tu); + for (size_t i = 1; i < times_.size(); ++i) { + uint64_t delta_time = times_[i] - times_[i - 1]; + if (!precise_ && divisor >= 1000) { + // Make the fraction 0. + delta_time -= delta_time % (divisor / 1000); + } + os << name_ << ": " << std::setw(8) << FormatDuration(delta_time, tu) << " " << labels_[i] + << "\n"; + } + os << name_ << ": end, " << NsToMs(GetTotalNs()) << " ms\n"; +} + +void CumulativeLogger::Dump() const { + Dump(LOG(INFO)); +} + +void CumulativeLogger::Dump(std::ostream& os) const { + os << name_ << ": iterations " << iterations_ << " begin\n"; + //Find which unit we will use for the timing logger. + uint64_t largest_mean = 0; + for (size_t i = 0; i < times_.size(); ++i) { + // Convert back to nanoseconds from microseconds. + uint64_t mean = times_[i] / iterations_; + largest_mean = std::max(largest_mean, mean); + } + // Convert largest mean back to ns + TimeUnit tu = GetAppropriateTimeUnit(largest_mean * kAdjust); + uint64_t divisor = GetNsToTimeUnitDivisor(tu); + for (size_t i = 0; i < times_.size(); ++i) { + uint64_t mean_x2 = times_squared_[i] / iterations_; + uint64_t mean = times_[i] / iterations_; + uint64_t variance = mean_x2 - (mean * mean); + uint64_t std_dev = static_cast(std::sqrt(static_cast(variance))); + if (!precise_ && divisor >= 1000) { + // Make the fraction 0. + mean -= mean % (divisor / 1000); + std_dev -= std_dev % (divisor / 1000); + } + os << StringPrintf("%s: %10s (std_dev %8s) %s\n", + name_.c_str(), + FormatDuration(mean * kAdjust, tu).c_str(), + FormatDuration(std_dev * kAdjust, tu).c_str(), + labels_[i].c_str()); + } + uint64_t total_mean_x2 = total_time_squared_; + uint64_t mean_total_ns = GetTotalTime(); + if (iterations_ != 0) { + total_mean_x2 /= iterations_; + mean_total_ns /= iterations_; + } + uint64_t total_variance = total_mean_x2 - (mean_total_ns * mean_total_ns); + uint64_t total_std_dev = static_cast( + std::sqrt(static_cast(total_variance))); + os << name_ << ": end, mean " << PrettyDuration(mean_total_ns * kAdjust) + << " std_dev " << PrettyDuration(total_std_dev * kAdjust) << "\n"; +} + +} // namespace art diff --git a/src/timing_logger.h b/src/timing_logger.h index 3b3dcfc1aa..fc47028afe 100644 --- a/src/timing_logger.h +++ b/src/timing_logger.h @@ -17,10 +17,8 @@ #ifndef ART_SRC_TIMING_LOGGER_H_ #define ART_SRC_TIMING_LOGGER_H_ -#include "base/logging.h" -#include "utils.h" +#include "utils.h" // For NanoTime. -#include #include #include #include @@ -47,31 +45,9 @@ class TimingLogger { labels_.push_back(label); } - void Dump() const { - Dump(LOG(INFO)); - } + void Dump() const; - void Dump(std::ostream& os) const { - uint64_t largest_time = 0; - os << name_ << ": begin\n"; - for (size_t i = 1; i < times_.size(); ++i) { - uint64_t delta_time = times_[i] - times_[i - 1]; - largest_time = std::max(largest_time, delta_time); - } - // Compute which type of unit we will use for printing the timings. - TimeUnit tu = GetAppropriateTimeUnit(largest_time); - uint64_t divisor = GetNsToTimeUnitDivisor(tu); - for (size_t i = 1; i < times_.size(); ++i) { - uint64_t delta_time = times_[i] - times_[i - 1]; - if (!precise_ && divisor >= 1000) { - // Make the fraction 0. - delta_time -= delta_time % (divisor / 1000); - } - os << name_ << ": " << std::setw(8) << FormatDuration(delta_time, tu) << " " << labels_[i] - << "\n"; - } - os << name_ << ": end, " << NsToMs(GetTotalNs()) << " ms\n"; - } + void Dump(std::ostream& os) const; uint64_t GetTotalNs() const { return times_.back() - times_.front(); @@ -149,50 +125,9 @@ class CumulativeLogger { total_time_squared_ += total_time * total_time; } - void Dump() const { - Dump(LOG(INFO)); - } + void Dump() const; - void Dump(std::ostream& os) const { - os << name_ << ": iterations " << iterations_ << " begin\n"; - //Find which unit we will use for the timing logger. - uint64_t largest_mean = 0; - for (size_t i = 0; i < times_.size(); ++i) { - // Convert back to nanoseconds from microseconds. - uint64_t mean = times_[i] / iterations_; - largest_mean = std::max(largest_mean, mean); - } - // Convert largest mean back to ns - TimeUnit tu = GetAppropriateTimeUnit(largest_mean * kAdjust); - uint64_t divisor = GetNsToTimeUnitDivisor(tu); - for (size_t i = 0; i < times_.size(); ++i) { - uint64_t mean_x2 = times_squared_[i] / iterations_; - uint64_t mean = times_[i] / iterations_; - uint64_t variance = mean_x2 - (mean * mean); - uint64_t std_dev = static_cast(std::sqrt(static_cast(variance))); - if (!precise_ && divisor >= 1000) { - // Make the fraction 0. - mean -= mean % (divisor / 1000); - std_dev -= std_dev % (divisor / 1000); - } - os << StringPrintf("%s: %10s (std_dev %8s) %s\n", - name_.c_str(), - FormatDuration(mean * kAdjust, tu).c_str(), - FormatDuration(std_dev * kAdjust, tu).c_str(), - labels_[i].c_str()); - } - uint64_t total_mean_x2 = total_time_squared_; - uint64_t mean_total_ns = GetTotalTime(); - if (iterations_ != 0) { - total_mean_x2 /= iterations_; - mean_total_ns /= iterations_; - } - uint64_t total_variance = total_mean_x2 - (mean_total_ns * mean_total_ns); - uint64_t total_std_dev = static_cast( - std::sqrt(static_cast(total_variance))); - os << name_ << ": end, mean " << PrettyDuration(mean_total_ns * kAdjust) - << " std_dev " << PrettyDuration(total_std_dev * kAdjust) << "\n"; - } + void Dump(std::ostream& os) const; uint64_t GetTotalNs() const { return GetTotalTime() * kAdjust; diff --git a/src/trace.cc b/src/trace.cc index 7b3cea8e3d..a23d202663 100644 --- a/src/trace.cc +++ b/src/trace.cc @@ -21,8 +21,10 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "debugger.h" -#include "dex_cache.h" #include "instrumentation.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/object_array-inl.h" #if !defined(ART_USE_LLVM_COMPILER) #include "oat/runtime/oat_support_entrypoints.h" #endif @@ -322,7 +324,8 @@ void Trace::FinishTracing() { } } -void Trace::LogMethodTraceEvent(Thread* self, const AbstractMethod* method, Trace::TraceEvent event) { +void Trace::LogMethodTraceEvent(Thread* self, const mirror::AbstractMethod* method, + Trace::TraceEvent event) { if (thread_clock_base_map_.find(self) == thread_clock_base_map_.end()) { uint64_t time = ThreadCpuMicroTime(); thread_clock_base_map_.Put(self, time); @@ -367,16 +370,17 @@ void Trace::GetVisitedMethods(size_t end_offset) { while (ptr < end) { uint32_t method_value = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); - AbstractMethod* method = reinterpret_cast(TraceMethodId(method_value)); + mirror::AbstractMethod* method = + reinterpret_cast(TraceMethodId(method_value)); visited_methods_.insert(method); ptr += record_size_; } } void Trace::DumpMethodList(std::ostream& os) { - typedef std::set::const_iterator It; // TODO: C++0x auto + typedef std::set::const_iterator It; // TODO: C++0x auto for (It it = visited_methods_.begin(); it != visited_methods_.end(); ++it) { - const AbstractMethod* method = *it; + const mirror::AbstractMethod* method = *it; MethodHelper mh(method); os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(), diff --git a/src/trace.h b/src/trace.h index e3f254ee7a..1be1cc4142 100644 --- a/src/trace.h +++ b/src/trace.h @@ -29,7 +29,9 @@ namespace art { +namespace mirror { class AbstractMethod; +} // namespace mirror class Thread; enum ProfilerClockSource { @@ -59,7 +61,7 @@ class Trace { bool UseWallClock(); bool UseThreadCpuClock(); - void LogMethodTraceEvent(Thread* self, const AbstractMethod* method, TraceEvent event); + void LogMethodTraceEvent(Thread* self, const mirror::AbstractMethod* method, TraceEvent event); private: explicit Trace(File* trace_file, int buffer_size, int flags); @@ -73,7 +75,7 @@ class Trace { void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); // Set of methods visited by the profiler. - std::set visited_methods_; + std::set visited_methods_; // Maps a thread to its clock base. SafeMap thread_clock_base_map_; diff --git a/src/utf.cc b/src/utf.cc index 174fe22f4f..cc7e2629db 100644 --- a/src/utf.cc +++ b/src/utf.cc @@ -17,7 +17,7 @@ #include "utf.h" #include "base/logging.h" -#include "object.h" +#include "mirror/array.h" namespace art { @@ -66,7 +66,7 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t } } -int32_t ComputeUtf16Hash(const CharArray* chars, int32_t offset, +int32_t ComputeUtf16Hash(const mirror::CharArray* chars, int32_t offset, size_t char_count) { int32_t hash = 0; for (size_t i = 0; i < char_count; i++) { diff --git a/src/utf.h b/src/utf.h index e95289e471..44899bfdd2 100644 --- a/src/utf.h +++ b/src/utf.h @@ -29,9 +29,10 @@ * See http://en.wikipedia.org/wiki/UTF-8#Modified_UTF-8 for the details. */ namespace art { - +namespace mirror { template class PrimitiveArray; typedef PrimitiveArray CharArray; +} // namespace mirror /* * Returns the number of UTF-16 characters in the given modified UTF-8 string. @@ -65,7 +66,7 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t /* * The java.lang.String hashCode() algorithm. */ -int32_t ComputeUtf16Hash(const CharArray* chars, int32_t offset, size_t char_count) +int32_t ComputeUtf16Hash(const mirror::CharArray* chars, int32_t offset, size_t char_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count); diff --git a/src/utils.cc b/src/utils.cc index e2231c2154..6b93da835d 100644 --- a/src/utils.cc +++ b/src/utils.cc @@ -25,10 +25,17 @@ #include "UniquePtr.h" #include "base/unix_file/fd_file.h" -#include "class_loader.h" -#include "object.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class.h" +#include "mirror/class_loader.h" +#include "mirror/field.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/string.h" #include "object_utils.h" #include "os.h" +#include "utf.h" #if !defined(HAVE_POSIX_CLOCKS) #include @@ -220,14 +227,14 @@ void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts } } -std::string PrettyDescriptor(const String* java_descriptor) { +std::string PrettyDescriptor(const mirror::String* java_descriptor) { if (java_descriptor == NULL) { return "null"; } return PrettyDescriptor(java_descriptor->ToModifiedUtf8()); } -std::string PrettyDescriptor(const Class* klass) { +std::string PrettyDescriptor(const mirror::Class* klass) { if (klass == NULL) { return "null"; } @@ -288,7 +295,7 @@ std::string PrettyDescriptor(Primitive::Type type) { return PrettyDescriptor(descriptor_string); } -std::string PrettyField(const Field* f, bool with_type) { +std::string PrettyField(const mirror::Field* f, bool with_type) { if (f == NULL) { return "null"; } @@ -357,7 +364,7 @@ std::string PrettyReturnType(const char* signature) { return PrettyDescriptor(return_type); } -std::string PrettyMethod(const AbstractMethod* m, bool with_signature) { +std::string PrettyMethod(const mirror::AbstractMethod* m, bool with_signature) { if (m == NULL) { return "null"; } @@ -390,7 +397,7 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with return result; } -std::string PrettyTypeOf(const Object* obj) { +std::string PrettyTypeOf(const mirror::Object* obj) { if (obj == NULL) { return "null"; } @@ -406,7 +413,7 @@ std::string PrettyTypeOf(const Object* obj) { return result; } -std::string PrettyClass(const Class* c) { +std::string PrettyClass(const mirror::Class* c) { if (c == NULL) { return "null"; } @@ -417,7 +424,7 @@ std::string PrettyClass(const Class* c) { return result; } -std::string PrettyClassAndClassLoader(const Class* c) { +std::string PrettyClassAndClassLoader(const mirror::Class* c) { if (c == NULL) { return "null"; } @@ -613,7 +620,7 @@ std::string DescriptorToName(const char* descriptor) { return descriptor; } -std::string JniShortName(const AbstractMethod* m) { +std::string JniShortName(const mirror::AbstractMethod* m) { MethodHelper mh(m); std::string class_name(mh.GetDeclaringClassDescriptor()); // Remove the leading 'L' and trailing ';'... @@ -632,7 +639,7 @@ std::string JniShortName(const AbstractMethod* m) { return short_name; } -std::string JniLongName(const AbstractMethod* m) { +std::string JniLongName(const mirror::AbstractMethod* m) { std::string long_name; long_name += JniShortName(m); long_name += "__"; diff --git a/src/utils.h b/src/utils.h index 640743ccf8..f3c9b7a10a 100644 --- a/src/utils.h +++ b/src/utils.h @@ -30,12 +30,15 @@ namespace art { -class Class; class DexFile; + +namespace mirror { +class Class; class Field; class AbstractMethod; class Object; class String; +} // namespace mirror enum TimeUnit { kTimeUnitNanosecond, @@ -172,21 +175,21 @@ bool EndsWith(const std::string& s, const char* suffix); // Returns a human-readable equivalent of 'descriptor'. So "I" would be "int", // "[[I" would be "int[][]", "[Ljava/lang/String;" would be // "java.lang.String[]", and so forth. -std::string PrettyDescriptor(const String* descriptor); +std::string PrettyDescriptor(const mirror::String* descriptor); std::string PrettyDescriptor(const std::string& descriptor); std::string PrettyDescriptor(Primitive::Type type); -std::string PrettyDescriptor(const Class* klass) +std::string PrettyDescriptor(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or // "int a.b.C.f" (depending on the value of 'with_type'). -std::string PrettyField(const Field* f, bool with_type = true) +std::string PrettyField(const mirror::Field* f, bool with_type = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). -std::string PrettyMethod(const AbstractMethod* m, bool with_signature = true) +std::string PrettyMethod(const mirror::AbstractMethod* m, bool with_signature = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); @@ -194,7 +197,7 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with // So given an instance of java.lang.String, the output would // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class". -std::string PrettyTypeOf(const Object* obj) +std::string PrettyTypeOf(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable form of the type at an index in the specified dex file. @@ -203,11 +206,11 @@ std::string PrettyType(uint32_t type_idx, const DexFile& dex_file); // Returns a human-readable form of the name of the given class. // Given String.class, the output would be "java.lang.Class". -std::string PrettyClass(const Class* c) +std::string PrettyClass(const mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class with its class loader. -std::string PrettyClassAndClassLoader(const Class* c) +std::string PrettyClassAndClassLoader(const mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable size string such as "1MB". @@ -250,10 +253,10 @@ bool IsValidDescriptor(const char* s); // "Ljava/lang/String;" bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. -std::string JniShortName(const AbstractMethod* m) +std::string JniShortName(const mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. -std::string JniLongName(const AbstractMethod* m) +std::string JniLongName(const mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); diff --git a/src/utils_test.cc b/src/utils_test.cc index f1983be08d..0966e717ca 100644 --- a/src/utils_test.cc +++ b/src/utils_test.cc @@ -14,8 +14,12 @@ * limitations under the License. */ -#include "object.h" #include "common_test.h" +#include "mirror/array.h" +#include "mirror/array-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/string.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" #include "utils.h" @@ -89,15 +93,15 @@ TEST_F(UtilsTest, PrettyTypeOf) { ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyTypeOf(NULL)); - SirtRef s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "")); + SirtRef s(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "")); EXPECT_EQ("java.lang.String", PrettyTypeOf(s.get())); - SirtRef a(soa.Self(), ShortArray::Alloc(soa.Self(), 2)); + SirtRef a(soa.Self(), mirror::ShortArray::Alloc(soa.Self(), 2)); EXPECT_EQ("short[]", PrettyTypeOf(a.get())); - Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); + mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); ASSERT_TRUE(c != NULL); - Object* o = ObjectArray::Alloc(soa.Self(), c, 0); + mirror::Object* o = mirror::ObjectArray::Alloc(soa.Self(), c, 0); EXPECT_EQ("java.lang.String[]", PrettyTypeOf(o)); EXPECT_EQ("java.lang.Class", PrettyTypeOf(o->GetClass())); } @@ -105,18 +109,18 @@ TEST_F(UtilsTest, PrettyTypeOf) { TEST_F(UtilsTest, PrettyClass) { ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyClass(NULL)); - Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); + mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); ASSERT_TRUE(c != NULL); - Object* o = ObjectArray::Alloc(soa.Self(), c, 0); + mirror::Object* o = mirror::ObjectArray::Alloc(soa.Self(), c, 0); EXPECT_EQ("java.lang.Class", PrettyClass(o->GetClass())); } TEST_F(UtilsTest, PrettyClassAndClassLoader) { ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyClassAndClassLoader(NULL)); - Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); + mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;"); ASSERT_TRUE(c != NULL); - Object* o = ObjectArray::Alloc(soa.Self(), c, 0); + mirror::Object* o = mirror::ObjectArray::Alloc(soa.Self(), c, 0); EXPECT_EQ("java.lang.Class", PrettyClassAndClassLoader(o->GetClass())); } @@ -124,9 +128,9 @@ TEST_F(UtilsTest, PrettyField) { ScopedObjectAccess soa(Thread::Current()); EXPECT_EQ("null", PrettyField(NULL)); - Class* java_lang_String = class_linker_->FindSystemClass("Ljava/lang/String;"); + mirror::Class* java_lang_String = class_linker_->FindSystemClass("Ljava/lang/String;"); - Field* f; + mirror::Field* f; f = java_lang_String->FindDeclaredInstanceField("count", "I"); EXPECT_EQ("int java.lang.String.count", PrettyField(f)); EXPECT_EQ("java.lang.String.count", PrettyField(f, false)); @@ -193,9 +197,9 @@ TEST_F(UtilsTest, MangleForJni) { TEST_F(UtilsTest, JniShortName_JniLongName) { ScopedObjectAccess soa(Thread::Current()); - Class* c = class_linker_->FindSystemClass("Ljava/lang/String;"); + mirror::Class* c = class_linker_->FindSystemClass("Ljava/lang/String;"); ASSERT_TRUE(c != NULL); - AbstractMethod* m; + mirror::AbstractMethod* m; m = c->FindVirtualMethod("charAt", "(I)C"); ASSERT_TRUE(m != NULL); diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc index 7afa6d475e..bcac374d46 100644 --- a/src/verifier/method_verifier.cc +++ b/src/verifier/method_verifier.cc @@ -22,13 +22,20 @@ #include "base/stringpiece.h" #include "class_linker.h" #include "compiler.h" -#include "dex_cache.h" #include "dex_file.h" #include "dex_instruction.h" #include "dex_instruction_visitor.h" +#include "gc/card_table-inl.h" #include "indenter.h" #include "intern_table.h" #include "leb128.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "runtime.h" #include "verifier/dex_gc_map.h" @@ -167,11 +174,12 @@ void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InsnFlags* flags, } } -MethodVerifier::FailureKind MethodVerifier::VerifyClass(const Class* klass, std::string& error) { +MethodVerifier::FailureKind MethodVerifier::VerifyClass(const mirror::Class* klass, + std::string& error) { if (klass->IsVerified()) { return kNoFailure; } - Class* super = klass->GetSuperClass(); + mirror::Class* super = klass->GetSuperClass(); if (super == NULL && StringPiece(ClassHelper(klass).GetDescriptor()) != "Ljava/lang/Object;") { error = "Verifier rejected class "; error += PrettyDescriptor(klass); @@ -199,7 +207,10 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const Class* klass, std: } MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, - DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, std::string& error) { + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, + uint32_t class_def_idx, + std::string& error) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx); const byte* class_data = dex_file->GetClassData(class_def); if (class_data == NULL) { @@ -224,7 +235,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, } previous_direct_method_idx = method_idx; InvokeType type = it.GetMethodInvokeType(class_def); - AbstractMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type); + mirror::AbstractMethod* method = + linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type); if (method == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); // We couldn't resolve the method, but continue regardless. @@ -258,7 +270,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, } previous_virtual_method_idx = method_idx; InvokeType type = it.GetMethodInvokeType(class_def); - AbstractMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type); + mirror::AbstractMethod* method = + linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type); if (method == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); // We couldn't resolve the method, but continue regardless. @@ -288,9 +301,14 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, } } -MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx, const DexFile* dex_file, - DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, - const DexFile::CodeItem* code_item, AbstractMethod* method, uint32_t method_access_flags) { +MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx, + const DexFile* dex_file, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, + uint32_t class_def_idx, + const DexFile::CodeItem* code_item, + mirror::AbstractMethod* method, + uint32_t method_access_flags) { MethodVerifier::FailureKind result = kNoFailure; uint64_t start_ns = NanoTime(); @@ -326,9 +344,10 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx, co } void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx, - const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, uint32_t class_def_idx, - const DexFile::CodeItem* code_item, AbstractMethod* method, + const DexFile* dex_file, mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, + const DexFile::CodeItem* code_item, + mirror::AbstractMethod* method, uint32_t method_access_flags) { MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def_idx, code_item, dex_method_idx, method, method_access_flags, true); @@ -339,11 +358,12 @@ void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_i } std::vector MethodVerifier::DescribeVRegs(uint32_t dex_method_idx, - const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, + const DexFile* dex_file, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - AbstractMethod* method, + mirror::AbstractMethod* method, uint32_t method_access_flags, uint32_t dex_pc) { MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def_idx, code_item, dex_method_idx, method, method_access_flags, true); @@ -351,9 +371,11 @@ std::vector MethodVerifier::DescribeVRegs(uint32_t dex_method_idx, return verifier.DescribeVRegs(dex_pc); } -MethodVerifier::MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - uint32_t dex_method_idx, AbstractMethod* method, uint32_t method_access_flags, +MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, + const DexFile::CodeItem* code_item, + uint32_t dex_method_idx, mirror::AbstractMethod* method, + uint32_t method_access_flags, bool can_load_classes) : reg_types_(can_load_classes), work_insn_idx_(-1), @@ -374,7 +396,7 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, can_load_classes_(can_load_classes) { } -void MethodVerifier::FindLocksAtDexPc(AbstractMethod* m, uint32_t dex_pc, +void MethodVerifier::FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc, std::vector& monitor_enter_dex_pcs) { MethodHelper mh(m); MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(), @@ -569,8 +591,9 @@ bool MethodVerifier::ScanTryCatchBlocks() { // Ensure exception types are resolved so that they don't need resolution to be delivered, // unresolved exception types will be ignored by exception delivery if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) { - Class* exception_type = linker->ResolveType(*dex_file_, iterator.GetHandlerTypeIndex(), - dex_cache_, class_loader_); + mirror::Class* exception_type = linker->ResolveType(*dex_file_, + iterator.GetHandlerTypeIndex(), + dex_cache_, class_loader_); if (exception_type == NULL) { DCHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); @@ -1960,7 +1983,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { dec_insn.opcode == Instruction::INVOKE_SUPER_RANGE); bool is_super = (dec_insn.opcode == Instruction::INVOKE_SUPER || dec_insn.opcode == Instruction::INVOKE_SUPER_RANGE); - AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_VIRTUAL, is_range, is_super); + mirror::AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_VIRTUAL, + is_range, is_super); const char* descriptor; if (called_method == NULL) { uint32_t method_idx = dec_insn.vB; @@ -1982,7 +2006,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_DIRECT: case Instruction::INVOKE_DIRECT_RANGE: { bool is_range = (dec_insn.opcode == Instruction::INVOKE_DIRECT_RANGE); - AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_DIRECT, is_range, false); + mirror::AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_DIRECT, + is_range, false); const char* return_type_descriptor; bool is_constructor; if (called_method == NULL) { @@ -2048,7 +2073,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_STATIC: case Instruction::INVOKE_STATIC_RANGE: { bool is_range = (dec_insn.opcode == Instruction::INVOKE_STATIC_RANGE); - AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_STATIC, is_range, false); + mirror::AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_STATIC, is_range, false); const char* descriptor; if (called_method == NULL) { uint32_t method_idx = dec_insn.vB; @@ -2070,9 +2095,9 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_INTERFACE: case Instruction::INVOKE_INTERFACE_RANGE: { bool is_range = (dec_insn.opcode == Instruction::INVOKE_INTERFACE_RANGE); - AbstractMethod* abs_method = VerifyInvocationArgs(dec_insn, METHOD_INTERFACE, is_range, false); + mirror::AbstractMethod* abs_method = VerifyInvocationArgs(dec_insn, METHOD_INTERFACE, is_range, false); if (abs_method != NULL) { - Class* called_interface = abs_method->GetDeclaringClass(); + mirror::Class* called_interface = abs_method->GetDeclaringClass(); if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) { Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected interface class in invoke-interface '" << PrettyMethod(abs_method) << "'"; @@ -2558,7 +2583,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) { const char* descriptor = dex_file_->StringByTypeIdx(class_idx); const RegType& referrer = GetDeclaringClass(); - Class* klass = dex_cache_->GetResolvedType(class_idx); + mirror::Class* klass = dex_cache_->GetResolvedType(class_idx); const RegType& result = klass != NULL ? reg_types_.FromClass(klass, klass->IsFinal()) : reg_types_.FromDescriptor(class_loader_, descriptor, false); @@ -2621,7 +2646,8 @@ const RegType& MethodVerifier::GetCaughtExceptionType() { return *common_super; } -AbstractMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx, MethodType method_type) { +mirror::AbstractMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx, + MethodType method_type) { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx); const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_); if (klass_type.IsConflict()) { @@ -2633,9 +2659,9 @@ AbstractMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_ if (klass_type.IsUnresolvedTypes()) { return NULL; // Can't resolve Class so no more to do here } - Class* klass = klass_type.GetClass(); + mirror::Class* klass = klass_type.GetClass(); const RegType& referrer = GetDeclaringClass(); - AbstractMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx); + mirror::AbstractMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx); if (res_method == NULL) { const char* name = dex_file_->GetMethodName(method_id); std::string signature(dex_file_->CreateMethodSignature(method_id.proto_idx_, NULL)); @@ -2712,11 +2738,12 @@ AbstractMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_ return res_method; } -AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstruction& dec_insn, - MethodType method_type, bool is_range, bool is_super) { +mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstruction& dec_insn, + MethodType method_type, bool is_range, + bool is_super) { // Resolve the method. This could be an abstract or concrete method depending on what sort of call // we're making. - AbstractMethod* res_method = ResolveMethodAndCheckAccess(dec_insn.vB, method_type); + mirror::AbstractMethod* res_method = ResolveMethodAndCheckAccess(dec_insn.vB, method_type); if (res_method == NULL) { // error or class is unresolved return NULL; } @@ -2732,7 +2759,7 @@ AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstruction& d << " to super " << PrettyMethod(res_method); return NULL; } - Class* super_klass = super.GetClass(); + mirror::Class* super_klass = super.GetClass(); if (res_method->GetMethodIndex() >= super_klass->GetVTable()->GetLength()) { MethodHelper mh(res_method); Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from " @@ -2771,7 +2798,7 @@ AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstruction& d return NULL; } if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) { - Class* klass = res_method->GetDeclaringClass(); + mirror::Class* klass = res_method->GetDeclaringClass(); const RegType& res_method_class = reg_types_.FromClass(klass, klass->IsFinal()); if (!res_method_class.IsAssignableFrom(actual_arg_type)) { Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type @@ -2935,7 +2962,7 @@ void MethodVerifier::VerifyAPut(const DecodedInstruction& dec_insn, } } -Field* MethodVerifier::GetStaticField(int field_idx) { +mirror::Field* MethodVerifier::GetStaticField(int field_idx) { const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx); // Check access to class const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_); @@ -2948,7 +2975,7 @@ Field* MethodVerifier::GetStaticField(int field_idx) { if (klass_type.IsUnresolvedTypes()) { return NULL; // Can't resolve Class so no more to do here, will do checking at runtime. } - Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx, + mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_, class_loader_); if (field == NULL) { LOG(INFO) << "unable to resolve static field " << field_idx << " (" @@ -2970,7 +2997,7 @@ Field* MethodVerifier::GetStaticField(int field_idx) { } } -Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) { +mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) { const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx); // Check access to class const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_); @@ -2983,7 +3010,7 @@ Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) if (klass_type.IsUnresolvedTypes()) { return NULL; // Can't resolve Class so no more to do here } - Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx, + mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_, class_loader_); if (field == NULL) { LOG(INFO) << "unable to resolve instance field " << field_idx << " (" @@ -3005,7 +3032,7 @@ Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) // Cannot infer and check type, however, access will cause null pointer exception return field; } else { - Class* klass = field->GetDeclaringClass(); + mirror::Class* klass = field->GetDeclaringClass(); const RegType& field_klass = reg_types_.FromClass(klass, klass->IsFinal()); if (obj_type.IsUninitializedTypes() && (!IsConstructor() || GetDeclaringClass().Equals(obj_type) || @@ -3032,7 +3059,7 @@ Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn, const RegType& insn_type, bool is_primitive, bool is_static) { uint32_t field_idx = is_static ? dec_insn.vB : dec_insn.vC; - Field* field; + mirror::Field* field; if (is_static) { field = GetStaticField(field_idx); } else { @@ -3040,7 +3067,7 @@ void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn, field = GetInstanceField(object_type, field_idx); } const char* descriptor; - ClassLoader* loader; + mirror::ClassLoader* loader; if (field != NULL) { descriptor = FieldHelper(field).GetTypeDescriptor(); loader = field->GetDeclaringClass()->GetClassLoader(); @@ -3085,7 +3112,7 @@ void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn, void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn, const RegType& insn_type, bool is_primitive, bool is_static) { uint32_t field_idx = is_static ? dec_insn.vB : dec_insn.vC; - Field* field; + mirror::Field* field; if (is_static) { field = GetStaticField(field_idx); } else { @@ -3093,7 +3120,7 @@ void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn, field = GetInstanceField(object_type, field_idx); } const char* descriptor; - ClassLoader* loader; + mirror::ClassLoader* loader; if (field != NULL) { descriptor = FieldHelper(field).GetTypeDescriptor(); loader = field->GetDeclaringClass()->GetClassLoader(); @@ -3215,7 +3242,7 @@ const RegType& MethodVerifier::GetMethodReturnType() { const RegType& MethodVerifier::GetDeclaringClass() { if (foo_method_ != NULL) { - Class* klass = foo_method_->GetDeclaringClass(); + mirror::Class* klass = foo_method_->GetDeclaringClass(); return reg_types_.FromClass(klass, klass->IsFinal()); } else { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h index 7779efe0d2..a36a1f93dd 100644 --- a/src/verifier/method_verifier.h +++ b/src/verifier/method_verifier.h @@ -28,7 +28,7 @@ #include "compiler.h" #include "dex_file.h" #include "dex_instruction.h" -#include "object.h" +#include "mirror/object.h" #include "reg_type.h" #include "reg_type_cache.h" #include "register_line.h" @@ -156,25 +156,25 @@ class MethodVerifier { }; /* Verify a class. Returns "kNoFailure" on success. */ - static FailureKind VerifyClass(const Class* klass, std::string& error) + static FailureKind VerifyClass(const mirror::Class* klass, std::string& error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static FailureKind VerifyClass(const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, uint32_t class_def_idx, + static FailureKind VerifyClass(const DexFile* dex_file, mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, std::string& error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file, - DexCache* dex_cache, ClassLoader* class_loader, + mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - AbstractMethod* method, uint32_t method_access_flags) + mirror::AbstractMethod* method, uint32_t method_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static std::vector DescribeVRegs(uint32_t dex_method_idx, - const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, + const DexFile* dex_file, mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - AbstractMethod* method, + mirror::AbstractMethod* method, uint32_t method_access_flags, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -209,7 +209,7 @@ class MethodVerifier { // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding // to the locks held at 'dex_pc' in 'm'. - static void FindLocksAtDexPc(AbstractMethod* m, uint32_t dex_pc, + static void FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc, std::vector& monitor_enter_dex_pcs) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -229,9 +229,11 @@ class MethodVerifier { } private: - explicit MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - uint32_t method_idx, AbstractMethod* method, uint32_t access_flags, bool can_load_classes) + explicit MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, + const DexFile::CodeItem* code_item, + uint32_t method_idx, mirror::AbstractMethod* method, uint32_t access_flags, + bool can_load_classes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Adds the given string to the beginning of the last failure message. @@ -251,9 +253,11 @@ class MethodVerifier { * (3) Iterate through the method, checking type safety and looking * for code flow problems. */ - static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file, DexCache* dex_cache, - ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, - AbstractMethod* method, uint32_t method_access_flags) + static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file, + mirror::DexCache* dex_cache, + mirror::ClassLoader* class_loader, uint32_t class_def_idx, + const DexFile::CodeItem* code_item, + mirror::AbstractMethod* method, uint32_t method_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Run verification on the method. Returns true if verification completes and false if the input @@ -477,11 +481,11 @@ class MethodVerifier { bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Lookup instance field and fail for resolution violations - Field* GetInstanceField(const RegType& obj_type, int field_idx) + mirror::Field* GetInstanceField(const RegType& obj_type, int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Lookup static field and fail for resolution violations - Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an iget or sget instruction. void VerifyISGet(const DecodedInstruction& insn, const RegType& insn_type, @@ -511,7 +515,7 @@ class MethodVerifier { * the referrer can access the resolved method. * Does not throw exceptions. */ - AbstractMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) + mirror::AbstractMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* @@ -536,7 +540,7 @@ class MethodVerifier { * Returns the resolved method on success, NULL on failure (with *failure * set appropriately). */ - AbstractMethod* VerifyInvocationArgs(const DecodedInstruction& dec_insn, + mirror::AbstractMethod* VerifyInvocationArgs(const DecodedInstruction& dec_insn, MethodType method_type, bool is_range, bool is_super) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -594,7 +598,7 @@ class MethodVerifier { void ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc); // Describe VRegs at the given dex pc. - std::vector DescribeVRegs(uint32_t dex_pc); + std::vector DescribeVRegs(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); InsnFlags* CurrentInsnFlags(); @@ -639,13 +643,13 @@ class MethodVerifier { uint32_t dex_method_idx_; // The method we're working on. // Its object representation if known. - AbstractMethod* foo_method_ GUARDED_BY(Locks::mutator_lock_); + mirror::AbstractMethod* foo_method_ GUARDED_BY(Locks::mutator_lock_); uint32_t method_access_flags_; // Method's access flags. const DexFile* dex_file_; // The dex file containing the method. // The dex_cache for the declaring class of the method. - DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_); + mirror::DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_); // The class loader for the declaring class of the method. - ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_); + mirror::ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_); uint32_t class_def_idx_; // The class def index of the declaring class of the method. const DexFile::CodeItem* code_item_; // The code item containing the code for the method. UniquePtr insn_flags_; // Instruction widths and flags, one entry per code unit. diff --git a/src/verifier/method_verifier_test.cc b/src/verifier/method_verifier_test.cc index 8d4c5134dc..5cb39749ed 100644 --- a/src/verifier/method_verifier_test.cc +++ b/src/verifier/method_verifier_test.cc @@ -30,7 +30,7 @@ class MethodVerifierTest : public CommonTest { void VerifyClass(const std::string& descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); - Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); + mirror::Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); // Verify the class std::string error_msg; diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc index ab1da1e2c3..56de1795b1 100644 --- a/src/verifier/reg_type.cc +++ b/src/verifier/reg_type.cc @@ -16,9 +16,14 @@ #include "reg_type.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" #include "object_utils.h" #include "reg_type_cache.h" +#include + namespace art { namespace verifier { @@ -165,7 +170,7 @@ std::set RegType::GetMergedTypes(const RegTypeCache* cache) const { const RegType& RegType::GetSuperClass(RegTypeCache* cache) const { if (!IsUnresolvedTypes()) { - Class* super_klass = GetClass()->GetSuperClass(); + mirror::Class* super_klass = GetClass()->GetSuperClass(); if (super_klass != NULL) { return cache->FromClass(super_klass, IsPreciseReference()); } else { @@ -198,7 +203,7 @@ bool RegType::CanAccess(const RegType& other) const { } } -bool RegType::CanAccessMember(Class* klass, uint32_t access_flags) const { +bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const { if (access_flags & kAccPublic) { return true; } @@ -209,6 +214,62 @@ bool RegType::CanAccessMember(Class* klass, uint32_t access_flags) const { } } +bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { + // Primitive arrays will always resolve + DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '['); + return descriptor_[0] == '['; + } else if (HasClass()) { + mirror::Class* type = GetClass(); + return type->IsArrayClass() && !type->GetComponentType()->IsPrimitive(); + } else { + return false; + } +} + +bool RegType::IsConstantByte() const { + return IsConstant() && + ConstantValue() >= std::numeric_limits::min() && + ConstantValue() <= std::numeric_limits::max(); +} + +bool RegType::IsConstantShort() const { + return IsConstant() && + ConstantValue() >= std::numeric_limits::min() && + ConstantValue() <= std::numeric_limits::max(); +} + +bool RegType::IsConstantChar() const { + return IsConstant() && ConstantValue() >= 0 && + ConstantValue() <= std::numeric_limits::max(); +} + +bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return IsReference() && GetClass()->IsObjectClass(); +} + +bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { + return descriptor_[0] == '['; + } else if (HasClass()) { + return GetClass()->IsArrayClass(); + } else { + return false; + } +} + +bool RegType::IsJavaLangObjectArray() const { + if (HasClass()) { + mirror::Class* type = GetClass(); + return type->IsArrayClass() && type->GetComponentType()->IsObjectClass(); + } + return false; +} + +bool RegType::IsInstantiableTypes() const { + return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable()); +} + bool RegType::IsAssignableFrom(const RegType& src) const { if (Equals(src)) { return true; @@ -363,11 +424,11 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty // with itself, 0 or Object are handled above. return reg_types->Conflict(); } else { // Two reference types, compute Join - Class* c1 = GetClass(); - Class* c2 = incoming_type.GetClass(); + mirror::Class* c1 = GetClass(); + mirror::Class* c2 = incoming_type.GetClass(); DCHECK(c1 != NULL && !c1->IsPrimitive()); DCHECK(c2 != NULL && !c2->IsPrimitive()); - Class* join_class = ClassJoin(c1, c2); + mirror::Class* join_class = ClassJoin(c1, c2); if (c1 == join_class && !IsPreciseReference()) { return *this; } else if (c2 == join_class && !incoming_type.IsPreciseReference()) { @@ -382,7 +443,7 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty } // See comment in reg_type.h -Class* RegType::ClassJoin(Class* s, Class* t) { +mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) { DCHECK(!s->IsPrimitive()) << PrettyClass(s); DCHECK(!t->IsPrimitive()) << PrettyClass(t); if (s == t) { @@ -392,21 +453,21 @@ Class* RegType::ClassJoin(Class* s, Class* t) { } else if (t->IsAssignableFrom(s)) { return t; } else if (s->IsArrayClass() && t->IsArrayClass()) { - Class* s_ct = s->GetComponentType(); - Class* t_ct = t->GetComponentType(); + mirror::Class* s_ct = s->GetComponentType(); + mirror::Class* t_ct = t->GetComponentType(); if (s_ct->IsPrimitive() || t_ct->IsPrimitive()) { // Given the types aren't the same, if either array is of primitive types then the only // common parent is java.lang.Object - Class* result = s->GetSuperClass(); // short-cut to java.lang.Object + mirror::Class* result = s->GetSuperClass(); // short-cut to java.lang.Object DCHECK(result->IsObjectClass()); return result; } - Class* common_elem = ClassJoin(s_ct, t_ct); + mirror::Class* common_elem = ClassJoin(s_ct, t_ct); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ClassLoader* class_loader = s->GetClassLoader(); + mirror::ClassLoader* class_loader = s->GetClassLoader(); std::string descriptor("["); descriptor += ClassHelper(common_elem).GetDescriptor(); - Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader); + mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader); DCHECK(array_class != NULL); return array_class; } else { @@ -433,8 +494,44 @@ Class* RegType::ClassJoin(Class* s, Class* t) { } } -std::ostream& operator<<(std::ostream& os, const RegType& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void RegType::CheckInvariants() const { + bool checked = false; + if (IsConstant() || IsConstantLo() || IsConstantHi()) { + // Constants: allocation_pc_or_constant_or_merged_types_ will hold the constant value, nothing + // else should be defined. + CHECK(descriptor_.empty()) << *this; + CHECK(klass_ == NULL) << *this; + checked = true; + } + if (IsUnresolvedTypes()) { + if (IsUnresolvedMergedReference()) { + // Unresolved merged types: allocation pc/merged types should be defined. + CHECK(descriptor_.empty()) << *this; + CHECK(klass_ == NULL) << *this; + CHECK_NE(allocation_pc_or_constant_or_merged_types_, 0U) << *this; + } else { + // Unresolved types: have a descriptor and no allocation pc/merged types. + CHECK(!descriptor_.empty()) << *this; + CHECK(klass_ == NULL) << *this; + if (!IsUnresolvedAndUninitializedReference()) { + CHECK_EQ(allocation_pc_or_constant_or_merged_types_, 0U) << *this; + } + } + checked = true; + } + if (IsReferenceTypes() && !checked) { + // A resolved reference type. + CHECK(descriptor_.empty()) << *this; + CHECK(klass_ != NULL) << *this; + CHECK(klass_->IsClass()) << *this; + if (!IsUninitializedReference()) { + CHECK_EQ(allocation_pc_or_constant_or_merged_types_, 0U) << *this; + } + } + CHECK(checked = true); +} + +std::ostream& operator<<(std::ostream& os, const RegType& rhs) { os << rhs.Dump(); return os; } diff --git a/src/verifier/reg_type.h b/src/verifier/reg_type.h index 65ee88a639..dc9a33a691 100644 --- a/src/verifier/reg_type.h +++ b/src/verifier/reg_type.h @@ -18,11 +18,18 @@ #define ART_SRC_VERIFIER_REG_TYPE_H_ #include "base/macros.h" -#include "object.h" +#include "primitive.h" + +#include "jni.h" #include +#include +#include namespace art { +namespace mirror { +class Class; +} // namespace mirror namespace verifier { class RegTypeCache; @@ -189,20 +196,9 @@ class RegType { bool IsConstantBoolean() const { return IsConstant() && ConstantValue() >= 0 && ConstantValue() <= 1; } - bool IsConstantByte() const { - return IsConstant() && - ConstantValue() >= std::numeric_limits::min() && - ConstantValue() <= std::numeric_limits::max(); - } - bool IsConstantShort() const { - return IsConstant() && - ConstantValue() >= std::numeric_limits::min() && - ConstantValue() <= std::numeric_limits::max(); - } - bool IsConstantChar() const { - return IsConstant() && ConstantValue() >= 0 && - ConstantValue() <= std::numeric_limits::max(); - } + bool IsConstantByte() const; + bool IsConstantShort() const; + bool IsConstantChar() const; bool IsReferenceTypes() const { return IsNonZeroReferenceTypes() || IsZero(); @@ -261,38 +257,17 @@ class RegType { return IsReference() || IsPreciseReference(); } - Class* GetClass() const { + mirror::Class* GetClass() const { DCHECK(!IsUnresolvedReference()); DCHECK(klass_ != NULL); return klass_; } - bool IsJavaLangObject() const { - return IsReference() && GetClass()->IsObjectClass(); - } + bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { - return descriptor_[0] == '['; - } else if (HasClass()) { - return GetClass()->IsArrayClass(); - } else { - return false; - } - } + bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { - // Primitive arrays will always resolve - DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '['); - return descriptor_[0] == '['; - } else if (HasClass()) { - Class* type = GetClass(); - return type->IsArrayClass() && !type->GetComponentType()->IsPrimitive(); - } else { - return false; - } - } + bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Primitive::Type GetPrimitiveType() const { if (IsNonZeroReferenceTypes()) { @@ -317,17 +292,9 @@ class RegType { } } - bool IsJavaLangObjectArray() const { - if (HasClass()) { - Class* type = GetClass(); - return type->IsArrayClass() && type->GetComponentType()->IsObjectClass(); - } - return false; - } + bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsInstantiableTypes() const { - return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable()); - } + bool IsInstantiableTypes() const; std::string GetDescriptor() const { DCHECK(IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()); @@ -364,7 +331,7 @@ class RegType { bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type access a member with the given properties? - bool CanAccessMember(Class* klass, uint32_t access_flags) const + bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type be assigned by src? @@ -393,47 +360,48 @@ class RegType { * * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ - static Class* ClassJoin(Class* s, Class* t) + static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: friend class RegTypeCache; - RegType(Type type, Class* klass, + RegType(Type type, mirror::Class* klass, uint32_t allocation_pc_or_constant_or_merged_types, uint16_t cache_id) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : type_(type), klass_(klass), allocation_pc_or_constant_or_merged_types_(allocation_pc_or_constant_or_merged_types), cache_id_(cache_id) { - DCHECK(IsConstant() || IsConstantLo() || IsConstantHi() || - IsUninitializedTypes() || IsUnresolvedMergedReference() || IsUnresolvedSuperClass() || - allocation_pc_or_constant_or_merged_types == 0); - if (!IsConstant() && !IsLongConstant() && !IsLongConstantHigh() && !IsUndefined() && - !IsConflict() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { - DCHECK(klass_ != NULL); - DCHECK(klass_->IsClass()); - DCHECK(!IsUnresolvedTypes()); - } +#ifndef NDEBUG + CheckInvariants(); +#endif } RegType(Type type, const std::string& descriptor, uint32_t allocation_pc, uint16_t cache_id) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : type_(type), klass_(NULL), descriptor_(descriptor), allocation_pc_or_constant_or_merged_types_(allocation_pc), cache_id_(cache_id) { +#ifndef NDEBUG + CheckInvariants(); +#endif } + void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const Type type_; // The current type of the register - // If known the type of the register... - Class* klass_; + // For reference types, if known the type of the register... + mirror::Class* klass_; // ...else a String for the descriptor. std::string descriptor_; // Overloaded field that: // - if IsConstant() holds a 32bit constant value - // - is IsReference() holds the allocation_pc or kInitArgAddr for an initialized reference or - // kUninitThisArgAddr for an uninitialized this ptr + // - is IsUninitializedReference()/IsUnresolvedAndUninitializedReference() holds the pc the + // instance in the register was being allocated. const uint32_t allocation_pc_or_constant_or_merged_types_; // A RegType cache densely encodes types, this is the location in the cache for this type @@ -441,7 +409,8 @@ class RegType { DISALLOW_COPY_AND_ASSIGN(RegType); }; -std::ostream& operator<<(std::ostream& os, const RegType& rhs); +std::ostream& operator<<(std::ostream& os, const RegType& rhs) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace verifier } // namespace art diff --git a/src/verifier/reg_type_cache.cc b/src/verifier/reg_type_cache.cc index 3bf5ad886a..6ca54dea9a 100644 --- a/src/verifier/reg_type_cache.cc +++ b/src/verifier/reg_type_cache.cc @@ -16,6 +16,8 @@ #include "reg_type_cache.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" #include "object_utils.h" namespace art { @@ -57,7 +59,7 @@ static RegType::Type RegTypeFromDescriptor(const std::string& descriptor) { } } -const RegType& RegTypeCache::FromDescriptor(ClassLoader* loader, const char* descriptor, +const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) { return From(RegTypeFromDescriptor(descriptor), loader, descriptor, precise); } @@ -67,14 +69,14 @@ static bool MatchingPrecisionForClass(RegType* entry, bool precise) return (entry->IsPreciseReference() == precise) || (entry->GetClass()->IsFinal() && !precise); } -const RegType& RegTypeCache::From(RegType::Type type, ClassLoader* loader, const char* descriptor, - bool precise) { +const RegType& RegTypeCache::From(RegType::Type type, mirror::ClassLoader* loader, + const char* descriptor, bool precise) { if (type <= RegType::kRegTypeLastFixedLocation) { // entries should be sized greater than primitive types DCHECK_GT(entries_.size(), static_cast(type)); RegType* entry = entries_[type]; if (entry == NULL) { - Class* c = NULL; + mirror::Class* c = NULL; if (strlen(descriptor) != 0) { c = Runtime::Current()->GetClassLinker()->FindSystemClass(descriptor); } @@ -100,7 +102,7 @@ const RegType& RegTypeCache::From(RegType::Type type, ClassLoader* loader, const } } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Class* c; + mirror::Class* c; if (can_load_classes_) { c = class_linker->FindClass(descriptor, loader); } else { @@ -125,7 +127,8 @@ const RegType& RegTypeCache::From(RegType::Type type, ClassLoader* loader, const DCHECK(!Thread::Current()->IsExceptionPending()); } if (IsValidDescriptor(descriptor)) { - RegType* entry = new RegType(RegType::kRegTypeUnresolvedReference, descriptor, 0, entries_.size()); + RegType* entry = + new RegType(RegType::kRegTypeUnresolvedReference, descriptor, 0, entries_.size()); entries_.push_back(entry); return *entry; } else { @@ -137,7 +140,7 @@ const RegType& RegTypeCache::From(RegType::Type type, ClassLoader* loader, const } } -const RegType& RegTypeCache::FromClass(Class* klass, bool precise) { +const RegType& RegTypeCache::FromClass(mirror::Class* klass, bool precise) { if (klass->IsPrimitive()) { RegType::Type type = RegTypeFromPrimitiveType(klass->GetPrimitiveType()); // entries should be sized greater than primitive types @@ -233,7 +236,7 @@ const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocat entry = new RegType(RegType::kRegTypeUnresolvedAndUninitializedReference, descriptor, allocation_pc, entries_.size()); } else { - Class* klass = type.GetClass(); + mirror::Class* klass = type.GetClass(); for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; if (cur_entry->IsUninitializedReference() && @@ -261,7 +264,7 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) { } entry = new RegType(RegType::kRegTypeUnresolvedReference, descriptor, 0, entries_.size()); } else { - Class* klass = uninit_type.GetClass(); + mirror::Class* klass = uninit_type.GetClass(); for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) { @@ -274,6 +277,18 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) { return *entry; } +const RegType& RegTypeCache::ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FromCat1Const(std::numeric_limits::min(), false); +} + +const RegType& RegTypeCache::ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FromCat1Const(std::numeric_limits::min(), false); +} + +const RegType& RegTypeCache::IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FromCat1Const(std::numeric_limits::max(), false); +} + const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) { // TODO: implement descriptor version. RegType* entry; @@ -289,7 +304,7 @@ const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) { entry = new RegType(RegType::kRegTypeUnresolvedAndUninitializedThisReference, descriptor, 0, entries_.size()); } else { - Class* klass = type.GetClass(); + mirror::Class* klass = type.GetClass(); for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) { @@ -320,7 +335,8 @@ const RegType& RegTypeCache::FromType(RegType::Type type) { } const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) { - RegType::Type wanted_type = precise ? RegType::kRegTypePreciseConst : RegType::kRegTypeImpreciseConst; + RegType::Type wanted_type = + precise ? RegType::kRegTypePreciseConst : RegType::kRegTypeImpreciseConst; for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; if (cur_entry->GetType() == wanted_type && cur_entry->ConstantValue() == value) { @@ -333,7 +349,8 @@ const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) { } const RegType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) { - RegType::Type wanted_type = precise ? RegType::kRegTypePreciseConstLo : RegType::kRegTypeImpreciseConstLo; + RegType::Type wanted_type = + precise ? RegType::kRegTypePreciseConstLo : RegType::kRegTypeImpreciseConstLo; for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; if (cur_entry->GetType() == wanted_type && cur_entry->ConstantValueLo() == value) { @@ -346,7 +363,8 @@ const RegType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) { } const RegType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) { - RegType::Type wanted_type = precise ? RegType::kRegTypePreciseConstHi : RegType::kRegTypeImpreciseConstHi; + RegType::Type wanted_type = + precise ? RegType::kRegTypePreciseConstHi : RegType::kRegTypeImpreciseConstHi; for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; if (cur_entry->GetType() == wanted_type && cur_entry->ConstantValueHi() == value) { @@ -358,14 +376,14 @@ const RegType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) { return *entry; } -const RegType& RegTypeCache::GetComponentType(const RegType& array, ClassLoader* loader) { +const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) { CHECK(array.IsArrayTypes()); if (array.IsUnresolvedTypes()) { std::string descriptor(array.GetDescriptor()); std::string component(descriptor.substr(1, descriptor.size() - 1)); return FromDescriptor(loader, component.c_str(), false); } else { - Class* klass = array.GetClass()->GetComponentType(); + mirror::Class* klass = array.GetClass()->GetComponentType(); return FromClass(klass, klass->IsFinal()); } } diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h index 54f42fd857..adab18c9bd 100644 --- a/src/verifier/reg_type_cache.h +++ b/src/verifier/reg_type_cache.h @@ -21,7 +21,13 @@ #include "base/stl_util.h" #include "reg_type.h" +#include + namespace art { +namespace mirror { +class Class; +class ClassLoader; +} // namespace mirror namespace verifier { class RegTypeCache { @@ -41,20 +47,25 @@ class RegTypeCache { return *result; } - const RegType& From(RegType::Type type, ClassLoader* loader, const char* descriptor, bool precise) + const RegType& From(RegType::Type type, mirror::ClassLoader* loader, const char* descriptor, + bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& FromClass(Class* klass, bool precise) + const RegType& FromClass(mirror::Class* klass, bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromCat1Const(int32_t value, bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& FromCat2ConstLo(int32_t value, bool precise); - const RegType& FromCat2ConstHi(int32_t value, bool precise); - const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor, bool precise) + const RegType& FromCat2ConstLo(int32_t value, bool precise) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& FromCat2ConstHi(int32_t value, bool precise) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromType(RegType::Type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right); - const RegType& FromUnresolvedSuperClass(const RegType& child); + const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& FromUnresolvedSuperClass(const RegType& child) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeBoolean); @@ -117,25 +128,22 @@ class RegTypeCache { return FromCat1Const(0, true); } - const RegType& Uninitialized(const RegType& type, uint32_t allocation_pc); + const RegType& Uninitialized(const RegType& type, uint32_t allocation_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Create an uninitialized 'this' argument for the given type. - const RegType& UninitializedThisArgument(const RegType& type); - const RegType& FromUninitialized(const RegType& uninit_type); + const RegType& UninitializedThisArgument(const RegType& type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& FromUninitialized(const RegType& uninit_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Representatives of various constant types. When merging constants we can't infer a type, // (an int may later be used as a float) so we select these representative values meaning future // merges won't know the exact constant value but have some notion of its size. - const RegType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FromCat1Const(std::numeric_limits::min(), false); - } - const RegType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FromCat1Const(std::numeric_limits::min(), false); - } - const RegType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FromCat1Const(std::numeric_limits::max(), false); - } + const RegType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& GetComponentType(const RegType& array, ClassLoader* loader) + const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/src/verifier/register_line.h b/src/verifier/register_line.h index c6c19f8ded..8253f516c2 100644 --- a/src/verifier/register_line.h +++ b/src/verifier/register_line.h @@ -23,6 +23,7 @@ #include "dex_instruction.h" #include "reg_type.h" #include "safe_map.h" +#include "UniquePtr.h" namespace art { namespace verifier { @@ -135,7 +136,8 @@ class RegisterLine { * reference type. This is called when an appropriate constructor is invoked -- all copies of * the reference must be marked as initialized. */ - void MarkRefsAsInitialized(const RegType& uninit_type); + void MarkRefsAsInitialized(const RegType& uninit_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Check constraints on constructor return. Specifically, make sure that the "this" argument got @@ -245,10 +247,10 @@ class RegisterLine { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx. - void PushMonitor(uint32_t reg_idx, int32_t insn_idx); + void PushMonitor(uint32_t reg_idx, int32_t insn_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked - void PopMonitor(uint32_t reg_idx); + void PopMonitor(uint32_t reg_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Stack of currently held monitors and where they were locked size_t MonitorStackDepth() const { diff --git a/src/well_known_classes.cc b/src/well_known_classes.cc index 9752c749d0..5012f1b9c9 100644 --- a/src/well_known_classes.cc +++ b/src/well_known_classes.cc @@ -19,6 +19,7 @@ #include #include "base/logging.h" +#include "mirror/class.h" #include "ScopedLocalRef.h" #include "thread.h" @@ -198,8 +199,8 @@ void WellKnownClasses::LateInit(JNIEnv* env) { java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"); } -Class* WellKnownClasses::ToClass(jclass global_jclass) { - return reinterpret_cast(Thread::Current()->DecodeJObject(global_jclass)); +mirror::Class* WellKnownClasses::ToClass(jclass global_jclass) { + return reinterpret_cast(Thread::Current()->DecodeJObject(global_jclass)); } } // namespace art diff --git a/src/well_known_classes.h b/src/well_known_classes.h index 10afca91fd..92a207a4c2 100644 --- a/src/well_known_classes.h +++ b/src/well_known_classes.h @@ -21,8 +21,9 @@ #include "jni.h" namespace art { - +namespace mirror { class Class; +} // namespace mirror // Various classes used in JNI. We cache them so we don't have to keep looking // them up. Similar to libcore's JniConstants (except there's no overlap, so @@ -33,7 +34,7 @@ struct WellKnownClasses { static void Init(JNIEnv* env); // Run before native methods are registered. static void LateInit(JNIEnv* env); // Run after native methods are registered. - static Class* ToClass(jclass global_jclass) + static mirror::Class* ToClass(jclass global_jclass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static jclass com_android_dex_Dex; diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 60182e2fae..5109b82c8d 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -19,7 +19,9 @@ #include "UniquePtr.h" #include "class_linker.h" #include "gc_map.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" @@ -48,7 +50,7 @@ struct ReferenceMap2Visitor : public StackVisitor { } bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) { return true; } diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index dccd69fe0d..a16d89615e 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -19,7 +19,9 @@ #include "UniquePtr.h" #include "class_linker.h" #include "gc_map.h" -#include "object.h" +#include "mirror/abstract_method.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "jni.h" #include "scoped_thread_state_change.h" @@ -46,7 +48,7 @@ struct TestReferenceMapVisitor : public StackVisitor { } bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* m = GetMethod(); + mirror::AbstractMethod* m = GetMethod(); CHECK(m != NULL); LOG(INFO) << "At " << PrettyMethod(m, false); -- cgit v1.2.3-59-g8ed1b From 693ff61274cd2c9b8eb7e68c370f84a911b8ca52 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 1 Feb 2013 10:56:12 -0800 Subject: Performance improvements by removing a DMB and inlining. Correct the version of CAS used by Mutex::Lock to be acquire and not release. Don't do a memory barrier in thread transitions when there is already a barrier associated with the mutator lock. Force inlining of the hot thread and shared lock code, heavily used by down calls and JNI. Force inlining of mirror routines that are used by runtime support and hot. Performance was measured and improved using perf and maps. Change-Id: I012580e337143236d8b6d06c1e270183ae51083c --- src/base/mutex-inl.h | 149 +++++++++++++++++++++ src/base/mutex.cc | 139 ++----------------- src/base/mutex.h | 4 +- src/gc/garbage_collector.cc | 2 + src/gc/mark_sweep.cc | 1 + src/gc/space_bitmap-inl.h | 2 +- src/jdwp/jdwp_event.cc | 2 +- src/jdwp/jdwp_handler.cc | 2 +- src/mirror/class-inl.h | 83 ++++++++++++ src/mirror/class.cc | 83 ------------ src/mirror/class.h | 2 +- src/native/java_lang_System.cc | 1 + src/oat/runtime/arm/oat_support_entrypoints_arm.cc | 13 -- .../runtime/mips/oat_support_entrypoints_mips.cc | 24 +--- src/oat/runtime/oat_support_entrypoints.h | 20 +++ src/oat/runtime/support_cast.cc | 1 + src/oat/runtime/support_jni.cc | 20 ++- src/oat/runtime/x86/oat_support_entrypoints_x86.cc | 13 -- src/runtime_support.h | 1 + src/scoped_thread_state_change.h | 2 +- src/thread-inl.h | 119 ++++++++++++++++ src/thread.cc | 88 +----------- src/thread.h | 12 +- src/verifier/reg_type.cc | 1 + 24 files changed, 414 insertions(+), 370 deletions(-) create mode 100644 src/base/mutex-inl.h create mode 100644 src/thread-inl.h (limited to 'src/native/java_lang_System.cc') diff --git a/src/base/mutex-inl.h b/src/base/mutex-inl.h new file mode 100644 index 0000000000..03ec6f84b7 --- /dev/null +++ b/src/base/mutex-inl.h @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_BASE_MUTEX_INL_H_ +#define ART_SRC_BASE_MUTEX_INL_H_ + +#include "mutex.h" + +#include "cutils/atomic-inline.h" +#include "runtime.h" +#include "thread.h" + +namespace art { + +#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_) + +#if ART_USE_FUTEXES +#include "linux/futex.h" +#include "sys/syscall.h" +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) { + return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3); +} +#endif // ART_USE_FUTEXES + +class ScopedContentionRecorder { + public: + ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid) : + mutex_(mutex), blocked_tid_(blocked_tid), owner_tid_(owner_tid), + start_milli_time_(MilliTime()) { + } + + ~ScopedContentionRecorder() { + uint64_t end_milli_time = MilliTime(); + mutex_->RecordContention(blocked_tid_, owner_tid_, end_milli_time - start_milli_time_); + } + + private: + BaseMutex* const mutex_; + uint64_t blocked_tid_; + uint64_t owner_tid_; + const uint64_t start_milli_time_; +}; + +static inline uint64_t SafeGetTid(const Thread* self) { + if (self != NULL) { + return static_cast(self->GetTid()); + } else { + return static_cast(GetTid()); + } +} + +static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS { + // The check below enumerates the cases where we expect not to be able to sanity check locks + // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock. + // TODO: tighten this check. + if (kDebugLocking) { + Runtime* runtime = Runtime::Current(); + CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown() || + level == kDefaultMutexLevel || level == kRuntimeShutdownLock || + level == kThreadListLock || level == kLoggingLock || level == kAbortLock); + } +} + +inline void BaseMutex::RegisterAsUnlocked(Thread* self) { + if (UNLIKELY(self == NULL)) { + CheckUnattachedThread(level_); + return; + } + if (level_ != kMonitorLock) { + if (kDebugLocking && !gAborting) { + CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_; + } + self->SetHeldMutex(level_, NULL); + } +} + +inline void ReaderWriterMutex::SharedLock(Thread* self) { + DCHECK(self == NULL || self == Thread::Current()); +#if ART_USE_FUTEXES + bool done = false; + do { + int32_t cur_state = state_; + if (cur_state >= 0) { + // Add as an extra reader. + done = android_atomic_acquire_cas(cur_state, cur_state + 1, &state_) == 0; + } else { + // Owner holds it exclusively, hang up. + ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self)); + android_atomic_inc(&num_pending_readers_); + if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) { + if (errno != EAGAIN) { + PLOG(FATAL) << "futex wait failed for " << name_; + } + } + android_atomic_dec(&num_pending_readers_); + } + } while(!done); +#else + CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); +#endif + RegisterAsLocked(self); + AssertSharedHeld(self); +} + +inline void ReaderWriterMutex::SharedUnlock(Thread* self) { + DCHECK(self == NULL || self == Thread::Current()); + AssertSharedHeld(self); + RegisterAsUnlocked(self); +#if ART_USE_FUTEXES + bool done = false; + do { + int32_t cur_state = state_; + if (LIKELY(cur_state > 0)) { + // Reduce state by 1. + done = android_atomic_release_cas(cur_state, cur_state - 1, &state_) == 0; + if (done && (cur_state - 1) == 0) { // cas may fail due to noise? + if (num_pending_writers_ > 0 || num_pending_readers_ > 0) { + // Wake any exclusive waiters as there are now no readers. + futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0); + } + } + } else { + LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; + } + } while(!done); +#else + CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); +#endif +} + +} // namespace art + +#endif // ART_SRC_BASE_MUTEX_INL_H_ diff --git a/src/base/mutex.cc b/src/base/mutex.cc index d09a6a27f6..fa7a617cd6 100644 --- a/src/base/mutex.cc +++ b/src/base/mutex.cc @@ -21,28 +21,13 @@ #include "base/logging.h" #include "cutils/atomic.h" +#include "cutils/atomic-inline.h" +#include "mutex-inl.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "utils.h" -#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_) - -extern int pthread_mutex_lock(pthread_mutex_t* mutex) EXCLUSIVE_LOCK_FUNCTION(mutex); -extern int pthread_mutex_unlock(pthread_mutex_t* mutex) UNLOCK_FUNCTION(1); -extern int pthread_mutex_trylock(pthread_mutex_t* mutex) EXCLUSIVE_TRYLOCK_FUNCTION(0, mutex); - -#if ART_USE_FUTEXES -#include "linux/futex.h" -#include "sys/syscall.h" -#ifndef SYS_futex -#define SYS_futex __NR_futex -#endif -int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) { - return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3); -} -#endif // ART_USE_FUTEXES - namespace art { // This works on Mac OS 10.6 but hasn't been tested on older releases. @@ -85,14 +70,6 @@ struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t { // ...other stuff we don't care about. }; -static uint64_t SafeGetTid(const Thread* self) { - if (self != NULL) { - return static_cast(self->GetTid()); - } else { - return static_cast(GetTid()); - } -} - #if ART_USE_FUTEXES static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { const long int one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. @@ -164,18 +141,6 @@ void BaseMutex::DumpAll(std::ostream& os) { #endif } -static void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS { - // The check below enumerates the cases where we expect not to be able to sanity check locks - // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock. - // TODO: tighten this check. - if (kDebugLocking) { - Runtime* runtime = Runtime::Current(); - CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown() || - level == kDefaultMutexLevel || level == kRuntimeShutdownLock || - level == kThreadListLock || level == kLoggingLock || level == kAbortLock); - } -} - void BaseMutex::RegisterAsLocked(Thread* self) { if (UNLIKELY(self == NULL)) { CheckUnattachedThread(level_); @@ -204,19 +169,6 @@ void BaseMutex::RegisterAsLocked(Thread* self) { } } -void BaseMutex::RegisterAsUnlocked(Thread* self) { - if (UNLIKELY(self == NULL)) { - CheckUnattachedThread(level_); - return; - } - if (level_ != kMonitorLock) { - if (kDebugLocking && !gAborting) { - CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_; - } - self->SetHeldMutex(level_, NULL); - } -} - void BaseMutex::CheckSafeToWait(Thread* self) { if (self == NULL) { CheckUnattachedThread(level_); @@ -262,25 +214,6 @@ void BaseMutex::RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint6 #endif } -class ScopedContentionRecorder { - public: - ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid) : - mutex_(mutex), blocked_tid_(blocked_tid), owner_tid_(owner_tid), - start_milli_time_(MilliTime()) { - } - - ~ScopedContentionRecorder() { - uint64_t end_milli_time = MilliTime(); - mutex_->RecordContention(blocked_tid_, owner_tid_, end_milli_time - start_milli_time_); - } - - private: - BaseMutex* const mutex_; - uint64_t blocked_tid_; - uint64_t owner_tid_; - const uint64_t start_milli_time_; -}; - void BaseMutex::DumpContention(std::ostream& os) const { #if CONTENTION_LOGGING uint32_t wait_time = wait_time_; @@ -395,7 +328,7 @@ void Mutex::ExclusiveLock(Thread* self) { int32_t cur_state = state_; if (cur_state == 0) { // Change state from 0 to 1. - done = android_atomic_cmpxchg(0, 1, &state_) == 0; + done = android_atomic_acquire_cas(0, 1, &state_) == 0; } else { // Failed to acquire, hang up. ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self)); @@ -435,7 +368,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { int32_t cur_state = state_; if (cur_state == 0) { // Change state from 0 to 1. - done = android_atomic_cmpxchg(0, 1, &state_) == 0; + done = android_atomic_acquire_cas(0, 1, &state_) == 0; } else { return false; } @@ -481,7 +414,7 @@ void Mutex::ExclusiveUnlock(Thread* self) { // We're no longer the owner. exclusive_owner_ = 0; // Change state to 0. - done = android_atomic_cmpxchg(cur_state, 0, &state_) == 0; + done = android_atomic_release_cas(cur_state, 0, &state_) == 0; if (done) { // Spurious fail? // Wake a contender if (num_contenders_ > 0) { @@ -588,7 +521,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) { int32_t cur_state = state_; if (cur_state == 0) { // Change state from 0 to -1. - done = android_atomic_cmpxchg(0, -1, &state_) == 0; + done = android_atomic_acquire_cas(0, -1, &state_) == 0; } else { // Failed to acquire, hang up. ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self)); @@ -622,7 +555,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { // We're no longer the owner. exclusive_owner_ = 0; // Change state from -1 to 0. - done = android_atomic_cmpxchg(-1, 0, &state_) == 0; + done = android_atomic_release_cas(-1, 0, &state_) == 0; if (done) { // cmpxchg may fail due to noise? // Wake any waiters. if (num_pending_readers_ > 0 || num_pending_writers_ > 0) { @@ -649,7 +582,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 int32_t cur_state = state_; if (cur_state == 0) { // Change state from 0 to -1. - done = android_atomic_cmpxchg(0, -1, &state_) == 0; + done = android_atomic_acquire_cas(0, -1, &state_) == 0; } else { // Failed to acquire, hang up. timespec now_abs_ts; @@ -690,34 +623,6 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 } #endif -void ReaderWriterMutex::SharedLock(Thread* self) { - DCHECK(self == NULL || self == Thread::Current()); -#if ART_USE_FUTEXES - bool done = false; - do { - int32_t cur_state = state_; - if (cur_state >= 0) { - // Add as an extra reader. - done = android_atomic_cmpxchg(cur_state, cur_state + 1, &state_) == 0; - } else { - // Owner holds it exclusively, hang up. - ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self)); - android_atomic_inc(&num_pending_readers_); - if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) { - if (errno != EAGAIN) { - PLOG(FATAL) << "futex wait failed for " << name_; - } - } - android_atomic_dec(&num_pending_readers_); - } - } while(!done); -#else - CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); -#endif - RegisterAsLocked(self); - AssertSharedHeld(self); -} - bool ReaderWriterMutex::SharedTryLock(Thread* self) { DCHECK(self == NULL || self == Thread::Current()); #if ART_USE_FUTEXES @@ -726,7 +631,7 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) { int32_t cur_state = state_; if (cur_state >= 0) { // Add as an extra reader. - done = android_atomic_cmpxchg(cur_state, cur_state + 1, &state_) == 0; + done = android_atomic_acquire_cas(cur_state, cur_state + 1, &state_) == 0; } else { // Owner holds it exclusively. return false; @@ -747,32 +652,6 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) { return true; } -void ReaderWriterMutex::SharedUnlock(Thread* self) { - DCHECK(self == NULL || self == Thread::Current()); - AssertSharedHeld(self); - RegisterAsUnlocked(self); -#if ART_USE_FUTEXES - bool done = false; - do { - int32_t cur_state = state_; - if (LIKELY(cur_state > 0)) { - // Reduce state by 1. - done = android_atomic_cmpxchg(cur_state, cur_state - 1, &state_) == 0; - if (done && (cur_state - 1) == 0) { // cmpxchg may fail due to noise? - if (num_pending_writers_ > 0 || num_pending_readers_ > 0) { - // Wake any exclusive waiters as there are now no readers. - futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0); - } - } - } else { - LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; - } - } while(!done); -#else - CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); -#endif -} - bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const { DCHECK(self == NULL || self == Thread::Current()); bool result = (GetExclusiveOwnerTid() == SafeGetTid(self)); diff --git a/src/base/mutex.h b/src/base/mutex.h index 8576c03eee..b530b752dc 100644 --- a/src/base/mutex.h +++ b/src/base/mutex.h @@ -223,14 +223,14 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { #endif // Block until ReaderWriterMutex is shared or free then acquire a share on the access. - void SharedLock(Thread* self) SHARED_LOCK_FUNCTION(); + void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() __attribute__ ((always_inline)); void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); } // Try to acquire share of ReaderWriterMutex. bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); // Release a share of the access. - void SharedUnlock(Thread* self) UNLOCK_FUNCTION(); + void SharedUnlock(Thread* self) UNLOCK_FUNCTION() __attribute__ ((always_inline)); void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); } // Is the current thread the exclusive holder of the ReaderWriterMutex. diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc index fbcdbafd0a..94daec7c31 100644 --- a/src/gc/garbage_collector.cc +++ b/src/gc/garbage_collector.cc @@ -15,6 +15,8 @@ */ #include "garbage_collector.h" + +#include "base/mutex-inl.h" #include "thread.h" #include "thread_list.h" diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc index 40102b2036..81d5e170a2 100644 --- a/src/gc/mark_sweep.cc +++ b/src/gc/mark_sweep.cc @@ -24,6 +24,7 @@ #include "barrier.h" #include "base/logging.h" #include "base/macros.h" +#include "base/mutex-inl.h" #include "card_table.h" #include "card_table-inl.h" #include "heap.h" diff --git a/src/gc/space_bitmap-inl.h b/src/gc/space_bitmap-inl.h index e1fdd29d8d..dd91403756 100644 --- a/src/gc/space_bitmap-inl.h +++ b/src/gc/space_bitmap-inl.h @@ -18,7 +18,7 @@ #define ART_SRC_GC_SPACE_BITMAP_INL_H_ #include "base/logging.h" -#include "cutils/atomic.h" +#include "cutils/atomic-inline.h" namespace art { diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc index 71e91d4958..a2c10b5eac 100644 --- a/src/jdwp/jdwp_event.cc +++ b/src/jdwp/jdwp_event.cc @@ -28,7 +28,7 @@ #include "jdwp/jdwp_expand_buf.h" #include "jdwp/jdwp_handler.h" #include "jdwp/jdwp_priv.h" -#include "thread.h" +#include "thread-inl.h" /* General notes: diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc index aa5a8a05d7..dd80089dc6 100644 --- a/src/jdwp/jdwp_handler.cc +++ b/src/jdwp/jdwp_handler.cc @@ -43,7 +43,7 @@ #include "jdwp/jdwp_expand_buf.h" #include "jdwp/jdwp_priv.h" #include "runtime.h" -#include "thread.h" +#include "thread-inl.h" #include "UniquePtr.h" namespace art { diff --git a/src/mirror/class-inl.h b/src/mirror/class-inl.h index 7eb8601cd6..3ca4c3023c 100644 --- a/src/mirror/class-inl.h +++ b/src/mirror/class-inl.h @@ -130,6 +130,89 @@ inline void Class::SetVTable(ObjectArray* new_vtable) SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable, false); } +inline bool Class::Implements(const Class* klass) const { + DCHECK(klass != NULL); + DCHECK(klass->IsInterface()) << PrettyClass(this); + // All interfaces implemented directly and by our superclass, and + // recursively all super-interfaces of those interfaces, are listed + // in iftable_, so we can just do a linear scan through that. + int32_t iftable_count = GetIfTableCount(); + IfTable* iftable = GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + if (iftable->GetInterface(i) == klass) { + return true; + } + } + return false; +} + +// Determine whether "this" is assignable from "src", where both of these +// are array classes. +// +// Consider an array class, e.g. Y[][], where Y is a subclass of X. +// Y[][] = Y[][] --> true (identity) +// X[][] = Y[][] --> true (element superclass) +// Y = Y[][] --> false +// Y[] = Y[][] --> false +// Object = Y[][] --> true (everything is an object) +// Object[] = Y[][] --> true +// Object[][] = Y[][] --> true +// Object[][][] = Y[][] --> false (too many []s) +// Serializable = Y[][] --> true (all arrays are Serializable) +// Serializable[] = Y[][] --> true +// Serializable[][] = Y[][] --> false (unless Y is Serializable) +// +// Don't forget about primitive types. +// Object[] = int[] --> false +// +inline bool Class::IsArrayAssignableFromArray(const Class* src) const { + DCHECK(IsArrayClass()) << PrettyClass(this); + DCHECK(src->IsArrayClass()) << PrettyClass(src); + return GetComponentType()->IsAssignableFrom(src->GetComponentType()); +} + +inline bool Class::IsAssignableFromArray(const Class* src) const { + DCHECK(!IsInterface()) << PrettyClass(this); // handled first in IsAssignableFrom + DCHECK(src->IsArrayClass()) << PrettyClass(src); + if (!IsArrayClass()) { + // If "this" is not also an array, it must be Object. + // src's super should be java_lang_Object, since it is an array. + Class* java_lang_Object = src->GetSuperClass(); + DCHECK(java_lang_Object != NULL) << PrettyClass(src); + DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src); + return this == java_lang_Object; + } + return IsArrayAssignableFromArray(src); +} + +inline bool Class::IsSubClass(const Class* klass) const { + DCHECK(!IsInterface()) << PrettyClass(this); + DCHECK(!IsArrayClass()) << PrettyClass(this); + const Class* current = this; + do { + if (current == klass) { + return true; + } + current = current->GetSuperClass(); + } while (current != NULL); + return false; +} + +inline AbstractMethod* Class::FindVirtualMethodForInterface(AbstractMethod* method) const { + Class* declaring_class = method->GetDeclaringClass(); + DCHECK(declaring_class != NULL) << PrettyClass(this); + DCHECK(declaring_class->IsInterface()) << PrettyMethod(method); + // TODO cache to improve lookup speed + int32_t iftable_count = GetIfTableCount(); + IfTable* iftable = GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + if (iftable->GetInterface(i) == declaring_class) { + return iftable->GetMethodArray(i)->Get(method->GetMethodIndex()); + } + } + return NULL; +} + inline AbstractMethod* Class::FindVirtualMethodForVirtual(AbstractMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(!method->GetDeclaringClass()->IsInterface()); diff --git a/src/mirror/class.cc b/src/mirror/class.cc index 7f52d17616..e3347a88f4 100644 --- a/src/mirror/class.cc +++ b/src/mirror/class.cc @@ -238,74 +238,6 @@ void Class::SetReferenceStaticOffsets(uint32_t new_reference_offsets) { new_reference_offsets, false); } -bool Class::Implements(const Class* klass) const { - DCHECK(klass != NULL); - DCHECK(klass->IsInterface()) << PrettyClass(this); - // All interfaces implemented directly and by our superclass, and - // recursively all super-interfaces of those interfaces, are listed - // in iftable_, so we can just do a linear scan through that. - int32_t iftable_count = GetIfTableCount(); - IfTable* iftable = GetIfTable(); - for (int32_t i = 0; i < iftable_count; i++) { - if (iftable->GetInterface(i) == klass) { - return true; - } - } - return false; -} - -// Determine whether "this" is assignable from "src", where both of these -// are array classes. -// -// Consider an array class, e.g. Y[][], where Y is a subclass of X. -// Y[][] = Y[][] --> true (identity) -// X[][] = Y[][] --> true (element superclass) -// Y = Y[][] --> false -// Y[] = Y[][] --> false -// Object = Y[][] --> true (everything is an object) -// Object[] = Y[][] --> true -// Object[][] = Y[][] --> true -// Object[][][] = Y[][] --> false (too many []s) -// Serializable = Y[][] --> true (all arrays are Serializable) -// Serializable[] = Y[][] --> true -// Serializable[][] = Y[][] --> false (unless Y is Serializable) -// -// Don't forget about primitive types. -// Object[] = int[] --> false -// -bool Class::IsArrayAssignableFromArray(const Class* src) const { - DCHECK(IsArrayClass()) << PrettyClass(this); - DCHECK(src->IsArrayClass()) << PrettyClass(src); - return GetComponentType()->IsAssignableFrom(src->GetComponentType()); -} - -bool Class::IsAssignableFromArray(const Class* src) const { - DCHECK(!IsInterface()) << PrettyClass(this); // handled first in IsAssignableFrom - DCHECK(src->IsArrayClass()) << PrettyClass(src); - if (!IsArrayClass()) { - // If "this" is not also an array, it must be Object. - // src's super should be java_lang_Object, since it is an array. - Class* java_lang_Object = src->GetSuperClass(); - DCHECK(java_lang_Object != NULL) << PrettyClass(src); - DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src); - return this == java_lang_Object; - } - return IsArrayAssignableFromArray(src); -} - -bool Class::IsSubClass(const Class* klass) const { - DCHECK(!IsInterface()) << PrettyClass(this); - DCHECK(!IsArrayClass()) << PrettyClass(this); - const Class* current = this; - do { - if (current == klass) { - return true; - } - current = current->GetSuperClass(); - } while (current != NULL); - return false; -} - bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) { size_t i = 0; while (descriptor1[i] != '\0' && descriptor1[i] == descriptor2[i]) { @@ -378,21 +310,6 @@ void Class::SetClassLoader(ClassLoader* new_class_loader) { SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false); } -AbstractMethod* Class::FindVirtualMethodForInterface(AbstractMethod* method) const { - Class* declaring_class = method->GetDeclaringClass(); - DCHECK(declaring_class != NULL) << PrettyClass(this); - DCHECK(declaring_class->IsInterface()) << PrettyMethod(method); - // TODO cache to improve lookup speed - int32_t iftable_count = GetIfTableCount(); - IfTable* iftable = GetIfTable(); - for (int32_t i = 0; i < iftable_count; i++) { - if (iftable->GetInterface(i) == declaring_class) { - return iftable->GetMethodArray(i)->Get(method->GetMethodIndex()); - } - } - return NULL; -} - AbstractMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) const { // Check the current class before checking the interfaces. AbstractMethod* method = FindDeclaredVirtualMethod(name, signature); diff --git a/src/mirror/class.h b/src/mirror/class.h index 843e07cb37..9e440b46a7 100644 --- a/src/mirror/class.h +++ b/src/mirror/class.h @@ -542,7 +542,7 @@ class MANAGED Class : public StaticStorageBase { // super class or interface, return the specific implementation // method for this class. AbstractMethod* FindVirtualMethodForInterface(AbstractMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) __attribute__ ((always_inline, hot)); AbstractMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index 54ee2e914e..79614aeab4 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -20,6 +20,7 @@ #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "scoped_thread_state_change.h" /* diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc index c43b7e279e..dea2600aaa 100644 --- a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc +++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc @@ -63,19 +63,6 @@ extern "C" void* art_get_obj_static_from_code(uint32_t); // FillArray entrypoint. extern "C" void art_handle_fill_data_from_code(void*, void*); -// JNI entrypoints. -extern void* FindNativeMethod(Thread* thread); -extern uint32_t JniMethodStart(Thread* self); -extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); -extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, - Thread* self); -extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self); -extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self); - // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); extern "C" void art_unlock_object_from_code(void*); diff --git a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc index db773bad59..9c84a8f666 100644 --- a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc +++ b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc @@ -28,12 +28,13 @@ extern "C" void* art_check_and_alloc_array_from_code(uint32_t, void*, int32_t); extern "C" void* art_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); // Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class); +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); extern "C" void art_can_put_array_element_from_code(void*, void*); extern "C" void art_check_cast_from_code(void*, void*); // Debug entrypoints. -extern void DebugMe(AbstractMethod* method, uint32_t info); +extern void DebugMe(mirror::AbstractMethod* method, uint32_t info); extern "C" void art_update_debugger(void*, void*, int32_t, void*); // DexCache entrypoints. @@ -62,19 +63,6 @@ extern "C" void* art_get_obj_static_from_code(uint32_t); // FillArray entrypoint. extern "C" void art_handle_fill_data_from_code(void*, void*); -// JNI entrypoints. -extern void* FindNativeMethod(Thread* thread); -extern uint32_t JniMethodStart(Thread* self); -extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); -extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, - Thread* self); -extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self); -extern Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self); - // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); extern "C" void art_unlock_object_from_code(void*); @@ -115,7 +103,8 @@ extern "C" int32_t art_indexof(void*, uint32_t, uint32_t, uint32_t); extern "C" int32_t art_string_compareto(void*, void*); // Invoke entrypoints. -const void* UnresolvedDirectMethodTrampolineFromCode(AbstractMethod*, AbstractMethod**, Thread*, +const void* UnresolvedDirectMethodTrampolineFromCode(mirror::AbstractMethod*, + mirror::AbstractMethod**, Thread*, Runtime::TrampolineType); extern "C" void art_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_invoke_interface_trampoline(uint32_t, void*); @@ -129,7 +118,8 @@ extern void CheckSuspendFromCode(Thread* thread); extern "C" void art_test_suspend(); // Throw entrypoints. -extern void ThrowAbstractMethodErrorFromCode(AbstractMethod* method, Thread* thread, AbstractMethod** sp); +extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, + mirror::AbstractMethod** sp); extern "C" void art_deliver_exception_from_code(void*); extern "C" void art_throw_array_bounds_from_code(int32_t index, int32_t limit); extern "C" void art_throw_div_zero_from_code(); diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h index a08a584660..ee59df495c 100644 --- a/src/oat/runtime/oat_support_entrypoints.h +++ b/src/oat/runtime/oat_support_entrypoints.h @@ -141,6 +141,26 @@ struct PACKED(4) EntryPoints { void (*pThrowStackOverflowFromCode)(void*); }; +// JNI entrypoints. +extern void* FindNativeMethod(Thread* thread) LOCKS_EXCLUDED(Locks::mutator_lock_); +extern uint32_t JniMethodStart(Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot)); +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot)); +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot)); +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot)); +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot)); + +extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot)); + // Initialize an entry point data structure. void InitEntryPoints(EntryPoints* points); diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc index 71a37efe2c..0b1fb742ef 100644 --- a/src/oat/runtime/support_cast.cc +++ b/src/oat/runtime/support_cast.cc @@ -17,6 +17,7 @@ #include "callee_save_frame.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "runtime_support.h" namespace art { diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc index 0e21700cac..6799159fdc 100644 --- a/src/oat/runtime/support_jni.cc +++ b/src/oat/runtime/support_jni.cc @@ -27,7 +27,7 @@ namespace art { // Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_) { +extern void* FindNativeMethod(Thread* self) { Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. DCHECK(Thread::Current() == self); ScopedObjectAccess soa(self); @@ -49,7 +49,7 @@ extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_) } // Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. -extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { +extern uint32_t JniMethodStart(Thread* self) { JNIEnvExt* env = self->GetJniEnv(); DCHECK(env != NULL); uint32_t saved_local_ref_cookie = env->local_ref_cookie; @@ -58,8 +58,7 @@ extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(GlobalSynchronizati return saved_local_ref_cookie; } -extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) { +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) { self->DecodeJObject(to_lock)->MonitorEnter(self); return JniMethodStart(self); } @@ -71,23 +70,21 @@ static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { self->PopSirt(); } -extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) { self->TransitionFromSuspendedToRunnable(); PopLocalReferences(saved_local_ref_cookie, self); } -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, + Thread* self) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); } extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + Thread* self) { self->TransitionFromSuspendedToRunnable(); mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -103,8 +100,7 @@ extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, uint32_t saved_local_ref_cookie, - jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + jobject locked, Thread* self) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. mirror::Object* o = self->DecodeJObject(result); diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc index 445ae2a5a7..48ec5bfc3d 100644 --- a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc +++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc @@ -60,19 +60,6 @@ extern "C" void* art_get_obj_static_from_code(uint32_t); // FillArray entrypoint. extern "C" void art_handle_fill_data_from_code(void*, void*); -// JNI entrypoints. -extern void* FindNativeMethod(Thread* thread); -extern uint32_t JniMethodStart(Thread* self); -extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self); -extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self); -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, - Thread* self); -extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self); -extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self); - // Lock entrypoints. extern "C" void art_lock_object_from_code(void*); extern "C" void art_unlock_object_from_code(void*); diff --git a/src/runtime_support.h b/src/runtime_support.h index a504237044..09ca0aab51 100644 --- a/src/runtime_support.h +++ b/src/runtime_support.h @@ -25,6 +25,7 @@ #include "jni_internal.h" #include "mirror/abstract_method.h" #include "mirror/array.h" +#include "mirror/class-inl.h" #include "mirror/throwable.h" #include "object_utils.h" #include "thread.h" diff --git a/src/scoped_thread_state_change.h b/src/scoped_thread_state_change.h index 80d47c5528..31f178d079 100644 --- a/src/scoped_thread_state_change.h +++ b/src/scoped_thread_state_change.h @@ -19,7 +19,7 @@ #include "base/casts.h" #include "jni_internal.h" -#include "thread.h" +#include "thread-inl.h" namespace art { diff --git a/src/thread-inl.h b/src/thread-inl.h new file mode 100644 index 0000000000..93aa10e240 --- /dev/null +++ b/src/thread-inl.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_THREAD_INL_H_ +#define ART_SRC_THREAD_INL_H_ + +#include "thread.h" + +#include "base/mutex-inl.h" +#include "cutils/atomic-inline.h" + +namespace art { + +inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const { +#ifdef NDEBUG + UNUSED(check_locks); // Keep GCC happy about unused parameters. +#else + CHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_; + if (check_locks) { + bool bad_mutexes_held = false; + for (int i = kMaxMutexLevel; i >= 0; --i) { + // We expect no locks except the mutator_lock_. + if (i != kMutatorLock) { + BaseMutex* held_mutex = GetHeldMutex(static_cast(i)); + if (held_mutex != NULL) { + LOG(ERROR) << "holding \"" << held_mutex->GetName() + << "\" at point where thread suspension is expected"; + bad_mutexes_held = true; + } + } + } + CHECK(!bad_mutexes_held); + } +#endif +} + +inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { + AssertThreadSuspensionIsAllowable(); + DCHECK_NE(new_state, kRunnable); + DCHECK_EQ(this, Thread::Current()); + // Change to non-runnable state, thereby appearing suspended to the system. + DCHECK_EQ(GetState(), kRunnable); + union StateAndFlags old_state_and_flags; + union StateAndFlags new_state_and_flags; + do { + old_state_and_flags = state_and_flags_; + // Copy over flags and try to clear the checkpoint bit if it is set. + new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags & ~kCheckpointRequest; + new_state_and_flags.as_struct.state = new_state; + // CAS the value without a memory barrier, that will occur in the unlock below. + } while (UNLIKELY(android_atomic_cas(old_state_and_flags.as_int, new_state_and_flags.as_int, + &state_and_flags_.as_int) != 0)); + // If we toggled the checkpoint flag we must have cleared it. + uint16_t flag_change = new_state_and_flags.as_struct.flags ^ old_state_and_flags.as_struct.flags; + if (UNLIKELY((flag_change & kCheckpointRequest) != 0)) { + RunCheckpointFunction(); + } + // Release share on mutator_lock_. + Locks::mutator_lock_->SharedUnlock(this); +} + +inline ThreadState Thread::TransitionFromSuspendedToRunnable() { + bool done = false; + union StateAndFlags old_state_and_flags = state_and_flags_; + int16_t old_state = old_state_and_flags.as_struct.state; + DCHECK_NE(static_cast(old_state), kRunnable); + do { + Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC.. + old_state_and_flags = state_and_flags_; + DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); + if (UNLIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0)) { + // Wait while our suspend count is non-zero. + MutexLock mu(this, *Locks::thread_suspend_count_lock_); + old_state_and_flags = state_and_flags_; + DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); + while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { + // Re-check when Thread::resume_cond_ is notified. + Thread::resume_cond_->Wait(this); + old_state_and_flags = state_and_flags_; + DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); + } + DCHECK_EQ(GetSuspendCount(), 0); + } + // Re-acquire shared mutator_lock_ access. + Locks::mutator_lock_->SharedLock(this); + // Atomically change from suspended to runnable if no suspend request pending. + old_state_and_flags = state_and_flags_; + DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); + if (LIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0)) { + union StateAndFlags new_state_and_flags = old_state_and_flags; + new_state_and_flags.as_struct.state = kRunnable; + // CAS the value without a memory barrier, that occurred in the lock above. + done = android_atomic_cas(old_state_and_flags.as_int, new_state_and_flags.as_int, + &state_and_flags_.as_int) == 0; + } + if (UNLIKELY(!done)) { + // Failed to transition to Runnable. Release shared mutator_lock_ access and try again. + Locks::mutator_lock_->SharedUnlock(this); + } + } while (UNLIKELY(!done)); + return static_cast(old_state); +} + +} // namespace art + +#endif // ART_SRC_THREAD_INL_H_ diff --git a/src/thread.cc b/src/thread.cc index 01d6072f12..5b1a3251e6 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -56,6 +56,7 @@ #include "gc/space.h" #include "stack.h" #include "stack_indirect_reference_table.h" +#include "thread-inl.h" #include "thread_list.h" #include "utils.h" #include "verifier/dex_gc_map.h" @@ -608,72 +609,6 @@ void Thread::FullSuspendCheck() { VLOG(threads) << this << " self-reviving"; } -void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { - AssertThreadSuspensionIsAllowable(); - DCHECK_NE(new_state, kRunnable); - DCHECK_EQ(this, Thread::Current()); - // Change to non-runnable state, thereby appearing suspended to the system. - DCHECK_EQ(GetState(), kRunnable); - union StateAndFlags old_state_and_flags; - union StateAndFlags new_state_and_flags; - do { - old_state_and_flags = state_and_flags_; - // Copy over flags and try to clear the checkpoint bit if it is set. - new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags & ~kCheckpointRequest; - new_state_and_flags.as_struct.state = new_state; - } while (android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int, - &state_and_flags_.as_int) != 0); - // If we toggled the checkpoint flag we must have cleared it. - uint16_t flag_change = new_state_and_flags.as_struct.flags ^ old_state_and_flags.as_struct.flags; - if ((flag_change & kCheckpointRequest) != 0) { - RunCheckpointFunction(); - } - // Release share on mutator_lock_. - Locks::mutator_lock_->SharedUnlock(this); -} - -ThreadState Thread::TransitionFromSuspendedToRunnable() { - bool done = false; - union StateAndFlags old_state_and_flags = state_and_flags_; - int16_t old_state = old_state_and_flags.as_struct.state; - DCHECK_NE(static_cast(old_state), kRunnable); - do { - Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC.. - old_state_and_flags = state_and_flags_; - DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); - if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { - // Wait while our suspend count is non-zero. - MutexLock mu(this, *Locks::thread_suspend_count_lock_); - old_state_and_flags = state_and_flags_; - DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); - while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { - // Re-check when Thread::resume_cond_ is notified. - Thread::resume_cond_->Wait(this); - old_state_and_flags = state_and_flags_; - DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); - } - DCHECK_EQ(GetSuspendCount(), 0); - } - // Re-acquire shared mutator_lock_ access. - Locks::mutator_lock_->SharedLock(this); - // Atomically change from suspended to runnable if no suspend request pending. - old_state_and_flags = state_and_flags_; - DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); - if ((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0) { - union StateAndFlags new_state_and_flags = old_state_and_flags; - new_state_and_flags.as_struct.state = kRunnable; - done = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int, - &state_and_flags_.as_int) - == 0; - } - if (!done) { - // Failed to transition to Runnable. Release shared mutator_lock_ access and try again. - Locks::mutator_lock_->SharedUnlock(this); - } - } while (!done); - return static_cast(old_state); -} - Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) { static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. useconds_t total_delay_us = 0; @@ -2112,25 +2047,4 @@ std::ostream& operator<<(std::ostream& os, const Thread& thread) { return os; } -#ifndef NDEBUG -void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const { - CHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_; - if (check_locks) { - bool bad_mutexes_held = false; - for (int i = kMaxMutexLevel; i >= 0; --i) { - // We expect no locks except the mutator_lock_. - if (i != kMutatorLock) { - BaseMutex* held_mutex = GetHeldMutex(static_cast(i)); - if (held_mutex != NULL) { - LOG(ERROR) << "holding \"" << held_mutex->GetName() - << "\" at point where thread suspension is expected"; - bad_mutexes_held = true; - } - } - } - CHECK(!bad_mutexes_held); - } -} -#endif - } // namespace art diff --git a/src/thread.h b/src/thread.h index 5e424c1cd5..58de45de65 100644 --- a/src/thread.h +++ b/src/thread.h @@ -169,13 +169,15 @@ class PACKED(4) Thread { // Transition from non-runnable to runnable state acquiring share on mutator_lock_. ThreadState TransitionFromSuspendedToRunnable() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_); + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) + __attribute__ ((always_inline)); // Transition from runnable into a state where mutator privileges are denied. Releases share of // mutator lock. void TransitionFromRunnableToSuspended(ThreadState new_state) LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - UNLOCK_FUNCTION(Locks::mutator_lock_); + UNLOCK_FUNCTION(Locks::mutator_lock_) + __attribute__ ((always_inline)); // Wait for a debugger suspension on the thread associated with the given peer. Returns the // thread on success, else NULL. If the thread should be suspended then request_suspension should @@ -215,13 +217,7 @@ class PACKED(4) Thread { #endif -#ifndef NDEBUG void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; -#else - void AssertThreadSuspensionIsAllowable(bool check_locks = true) const { - UNUSED(check_locks); // Keep GCC happy about unused parameters. - } -#endif bool IsDaemon() const { return daemon_; diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc index a18c8b1837..f4125812eb 100644 --- a/src/verifier/reg_type.cc +++ b/src/verifier/reg_type.cc @@ -19,6 +19,7 @@ #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_utils.h" #include "reg_type_cache.h" -- cgit v1.2.3-59-g8ed1b From 6324e2174e60e951f4e7dab22dc65bafd730e097 Mon Sep 17 00:00:00 2001 From: Elliott Hughes Date: Fri, 15 Feb 2013 17:55:35 -0800 Subject: Fix art's System.arraycopy now bionic doesn't have _memmove_words. Change-Id: Ib4fc502eda7f5902e0cbe50ed9df892314a49017 --- src/native/java_lang_System.cc | 142 +++++++++++++++++++++++++++++------------ 1 file changed, 102 insertions(+), 40 deletions(-) (limited to 'src/native/java_lang_System.cc') diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index 79614aeab4..5572623a0c 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -48,61 +48,123 @@ * appropriately for the element type, and that n is a multiple of the * element size. */ -#ifdef __BIONIC__ -#define HAVE_MEMMOVE_WORDS -#endif - -#ifdef HAVE_MEMMOVE_WORDS -extern "C" void _memmove_words(void* dst, const void* src, size_t n); -#define move16 _memmove_words -#define move32 _memmove_words -#else -static void move16(void* dst, const void* src, size_t n) { - DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x01), 0U); - uint16_t* d = reinterpret_cast(dst); - const uint16_t* s = reinterpret_cast(src); +/* + * Works like memmove(), except: + * - if all arguments are at least 32-bit aligned, we guarantee that we + * will use operations that preserve atomicity of 32-bit values + * - if not, we guarantee atomicity of 16-bit values + * + * If all three arguments are not at least 16-bit aligned, the behavior + * of this function is undefined. (We could remove this restriction by + * testing for unaligned values and punting to memmove(), but that's + * not currently useful.) + * + * TODO: add loop for 64-bit alignment + * TODO: use __builtin_prefetch + * TODO: write ARM/MIPS/x86 optimized versions + */ +void MemmoveWords(void* dst, const void* src, size_t n) { + DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x01), 0U); - n /= sizeof(uint16_t); + char* d = reinterpret_cast(dst); + const char* s = reinterpret_cast(src); + size_t copyCount; - if (d < s) { - // Copy forwards. - while (n--) { - *d++ = *s++; - } - } else { - // Copy backwards. - d += n; - s += n; - while (n--) { - *--d = *--s; - } + // If the source and destination pointers are the same, this is + // an expensive no-op. Testing for an empty move now allows us + // to skip a check later. + if (n == 0 || d == s) { + return; } -} -static void move32(void* dst, const void* src, size_t n) { - DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x03), 0U); + // Determine if the source and destination buffers will overlap if + // we copy data forward (i.e. *dst++ = *src++). + // + // It's okay if the destination buffer starts before the source and + // there is some overlap, because the reader is always ahead of the + // writer. + if (LIKELY((d < s) || ((size_t)(d - s) >= n))) { + // Copy forward. We prefer 32-bit loads and stores even for 16-bit + // data, so sort that out. + if (((reinterpret_cast(d) | reinterpret_cast(s)) & 0x03) != 0) { + // Not 32-bit aligned. Two possibilities: + // (1) Congruent, we can align to 32-bit by copying one 16-bit val + // (2) Non-congruent, we can do one of: + // a. copy whole buffer as a series of 16-bit values + // b. load/store 32 bits, using shifts to ensure alignment + // c. just copy the as 32-bit values and assume the CPU + // will do a reasonable job + // + // We're currently using (a), which is suboptimal. + if (((reinterpret_cast(d) ^ reinterpret_cast(s)) & 0x03) != 0) { + copyCount = n; + } else { + copyCount = 2; + } + n -= copyCount; + copyCount /= sizeof(uint16_t); - uint32_t* d = reinterpret_cast(dst); - const uint32_t* s = reinterpret_cast(src); + while (copyCount--) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += sizeof(uint16_t); + s += sizeof(uint16_t); + } + } - n /= sizeof(uint32_t); + // Copy 32-bit aligned words. + copyCount = n / sizeof(uint32_t); + while (copyCount--) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += sizeof(uint32_t); + s += sizeof(uint32_t); + } - if (d < s) { - // Copy forwards. - while (n--) { - *d++ = *s++; + // Check for leftovers. Either we finished exactly, or we have one remaining 16-bit chunk. + if ((n & 0x02) != 0) { + *(uint16_t*)d = *(uint16_t*)s; } } else { - // Copy backwards. + // Copy backward, starting at the end. d += n; s += n; - while (n--) { - *--d = *--s; + + if (((reinterpret_cast(d) | reinterpret_cast(s)) & 0x03) != 0) { + // try for 32-bit alignment. + if (((reinterpret_cast(d) ^ reinterpret_cast(s)) & 0x03) != 0) { + copyCount = n; + } else { + copyCount = 2; + } + n -= copyCount; + copyCount /= sizeof(uint16_t); + + while (copyCount--) { + d -= sizeof(uint16_t); + s -= sizeof(uint16_t); + *reinterpret_cast(d) = *reinterpret_cast(s); + } + } + + // Copy 32-bit aligned words. + copyCount = n / sizeof(uint32_t); + while (copyCount--) { + d -= sizeof(uint32_t); + s -= sizeof(uint32_t); + *reinterpret_cast(d) = *reinterpret_cast(s); + } + + // Copy leftovers. + if ((n & 0x02) != 0) { + d -= sizeof(uint16_t); + s -= sizeof(uint16_t); + *reinterpret_cast(d) = *reinterpret_cast(s); } } } -#endif // HAVE_MEMMOVE_WORDS + +#define move16 MemmoveWords +#define move32 MemmoveWords namespace art { -- cgit v1.2.3-59-g8ed1b From 62d6c772205b8859f0ebf7ad105402ec4c3e2e01 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 27 Feb 2013 08:32:07 -0800 Subject: Interpreter entries and instrumentation as a listener. Make the instrumentation responsible for whether we want method entry/exit stubs, and allow it to use interpreter entry stubs when instruction by instruction instrumentation is required. Improve deoptimization so more JDWP test cases are passing. Refactor exception debug posting, in particular improve reporting in the interpreter. Improve class linker exception throwing so that broken dex files are more likely to be reported. Fixes the performance issue Bug: 8410519. Fix some error reporting lock level errors for the large object space. Make fast object verification faster. Add some debug mode robustness to finding dex PCs in GC maps. Add printf attributes to JniAbortF and fix errors. Expand run-test 044 to test return behaviors and fix issues with not throwing appropriate exceptions for proxies. Ensure causes are reported with a class linker NoClassDefFoundError and JNI NoSuchFieldError. Remove unused debugMe and updateDebuggerFromCode. There's a minor sizing tweak to the arg array builder, and an extra reference array check in the interpreter. Some clean-up of trace code. Fix reg type cache destructor if it is called after the reg type cache is shutdown (as is the case in oatdump). Change-Id: I6519c7b35df77f978d011999354c864f4918e8ce --- Android.mk | 2 +- build/Android.common.mk | 2 +- src/base/mutex-inl.h | 5 +- src/check_jni.cc | 24 +- src/class_linker.cc | 221 ++++----- src/class_linker_test.cc | 2 +- src/common_throws.cc | 392 ++++++++++----- src/common_throws.h | 98 +++- src/compiler/dex/frontend.h | 1 - src/compiler/dex/quick/gen_common.cc | 11 - src/compiler/dex/quick/gen_invoke.cc | 3 - src/compiler/dex/quick/mir_to_lir.h | 1 - src/compiler/driver/compiler_driver.cc | 22 +- src/compiler/llvm/runtime_support_llvm.cc | 92 ++-- src/debugger.cc | 210 ++++---- src/debugger.h | 13 +- src/dex_file.h | 5 +- src/gc/large_object_space.cc | 2 +- src/gc/mark_sweep.cc | 4 +- src/gc_map.h | 3 + src/heap.cc | 66 +-- src/hprof/hprof.cc | 11 +- src/instrumentation.cc | 547 ++++++++++++++++----- src/instrumentation.h | 260 ++++++++-- src/interpreter/interpreter.cc | 189 +++---- src/interpreter/interpreter.h | 8 +- src/invoke_arg_array_builder.h | 21 +- src/jdwp/jdwp_event.cc | 24 +- src/jdwp/jdwp_main.cc | 6 +- src/jni_internal.cc | 92 ++-- src/jni_internal.h | 3 +- src/jvalue.h | 2 + src/locks.cc | 4 + src/locks.h | 12 +- src/mirror/abstract_method-inl.h | 4 + src/mirror/abstract_method.cc | 46 +- src/mirror/abstract_method.h | 5 + src/mirror/array.cc | 19 +- src/mirror/class-inl.h | 2 +- src/mirror/class.cc | 31 +- src/mirror/object_test.cc | 10 +- src/mirror/string.cc | 5 +- src/mirror/throwable.cc | 4 +- src/monitor.cc | 14 +- src/monitor_android.cc | 2 +- src/native/dalvik_system_DexFile.cc | 15 +- src/native/dalvik_system_VMDebug.cc | 32 +- src/native/dalvik_system_VMRuntime.cc | 7 +- src/native/java_lang_Class.cc | 5 +- src/native/java_lang_String.cc | 6 +- src/native/java_lang_System.cc | 45 +- src/native/java_lang_Thread.cc | 3 +- src/native/java_lang_reflect_Array.cc | 3 +- src/native/java_lang_reflect_Constructor.cc | 9 +- src/native/java_lang_reflect_Field.cc | 30 +- src/nth_caller_visitor.h | 32 +- src/oat/runtime/arm/oat_support_entrypoints_arm.cc | 12 - src/oat/runtime/arm/runtime_support_arm.S | 79 ++- .../runtime/mips/oat_support_entrypoints_mips.cc | 12 - src/oat/runtime/mips/runtime_support_mips.S | 104 ++-- src/oat/runtime/oat_support_entrypoints.h | 4 - src/oat/runtime/support_cast.cc | 20 +- src/oat/runtime/support_debug.cc | 44 -- src/oat/runtime/support_deoptimize.cc | 83 +--- src/oat/runtime/support_field.cc | 18 +- src/oat/runtime/support_fillarray.cc | 11 +- src/oat/runtime/support_instrumentation.cc | 65 ++- src/oat/runtime/support_jni.cc | 4 +- src/oat/runtime/support_stubs.cc | 13 +- src/oat/runtime/support_throw.cc | 47 +- src/oat/runtime/x86/oat_support_entrypoints_x86.cc | 12 - src/oat/runtime/x86/runtime_support_x86.S | 140 +++--- src/reflection.cc | 112 +++-- src/reflection.h | 9 +- src/runtime.cc | 47 +- src/runtime.h | 19 +- src/runtime_support.cc | 80 +-- src/runtime_support.h | 20 +- src/stack.cc | 81 ++- src/stack.h | 10 +- src/thread.cc | 416 ++++++++++------ src/thread.h | 87 ++-- src/throw_location.cc | 41 ++ src/throw_location.h | 78 +++ src/trace.cc | 323 ++++++++---- src/trace.h | 75 ++- src/verifier/reg_type_cache.cc | 12 +- src/verifier/reg_type_cache.h | 3 +- test/003-omnibus-opcodes/expected.txt | 8 - test/003-omnibus-opcodes/src/UnresTest2.java | 11 +- test/044-proxy/src/ReturnsAndArgPassing.java | 302 +++++++++++- test/100-reflect2/expected.txt | 2 +- .../src/Main.java | 6 +- test/StackWalk/stack_walk_jni.cc | 1 + 94 files changed, 3210 insertions(+), 1888 deletions(-) delete mode 100644 src/oat/runtime/support_debug.cc create mode 100644 src/throw_location.cc create mode 100644 src/throw_location.h (limited to 'src/native/java_lang_System.cc') diff --git a/Android.mk b/Android.mk index 145f8b0eff..27cce2232b 100644 --- a/Android.mk +++ b/Android.mk @@ -305,7 +305,7 @@ oat-target-sync: oat-target # oatdump targets .PHONY: dump-oat -dump-oat: dump-oat-core dump-oat-boot dump-oat-Calculator +dump-oat: dump-oat-core dump-oat-boot .PHONY: dump-oat-core dump-oat-core: dump-oat-core-host dump-oat-core-target diff --git a/build/Android.common.mk b/build/Android.common.mk index 2472158033..7faf452466 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -246,6 +246,7 @@ LIBART_COMMON_SRC_FILES := \ src/thread.cc \ src/thread_list.cc \ src/thread_pool.cc \ + src/throw_location.cc \ src/trace.cc \ src/utf.cc \ src/utils.cc \ @@ -263,7 +264,6 @@ LIBART_COMMON_SRC_FILES += \ src/oat/runtime/context.cc \ src/oat/runtime/support_alloc.cc \ src/oat/runtime/support_cast.cc \ - src/oat/runtime/support_debug.cc \ src/oat/runtime/support_deoptimize.cc \ src/oat/runtime/support_dexcache.cc \ src/oat/runtime/support_field.cc \ diff --git a/src/base/mutex-inl.h b/src/base/mutex-inl.h index 3cb43a8ca1..f911054b86 100644 --- a/src/base/mutex-inl.h +++ b/src/base/mutex-inl.h @@ -97,8 +97,9 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { BaseMutex* held_mutex = self->GetHeldMutex(static_cast(i)); if (UNLIKELY(held_mutex != NULL)) { LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" " - << "(level " << LockLevel(i) << ") while locking \"" << name_ << "\" " - << "(level " << level_ << ")"; + << "(level " << LockLevel(i) << " - " << i + << ") while locking \"" << name_ << "\" " + << "(level " << level_ << " - " << static_cast(level_) << ")"; if (i > kAbortLock) { // Only abort in the check below if this is more than abort level lock. bad_mutexes_held = true; diff --git a/src/check_jni.cc b/src/check_jni.cc index 57ce43265e..30d5099232 100644 --- a/src/check_jni.cc +++ b/src/check_jni.cc @@ -44,7 +44,7 @@ namespace art { static void JniAbort(const char* jni_function_name, const char* msg) { Thread* self = Thread::Current(); ScopedObjectAccess soa(self); - mirror::AbstractMethod* current_method = self->GetCurrentMethod(); + mirror::AbstractMethod* current_method = self->GetCurrentMethod(NULL); std::ostringstream os; os << "JNI DETECTED ERROR IN APPLICATION: " << msg; @@ -401,8 +401,7 @@ class ScopedCheck { * * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ - void Check(bool entry, const char* fmt0, ...) - SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) { + void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) { va_list ap; const mirror::AbstractMethod* traceMethod = NULL; @@ -411,7 +410,7 @@ class ScopedCheck { // use DetachCurrentThread or GetEnv on a thread that's not yet attached. Thread* self = Thread::Current(); if ((flags_ & kFlag_Invocation) == 0 || self != NULL) { - traceMethod = self->GetCurrentMethod(); + traceMethod = self->GetCurrentMethod(NULL); } } @@ -812,14 +811,19 @@ class ScopedCheck { // Verify that, if an exception has been raised, the native code doesn't // make any JNI calls other than the Exception* methods. if ((flags & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) { - std::string type(PrettyTypeOf(self->GetException())); + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); + std::string type(PrettyTypeOf(exception)); // TODO: write native code that doesn't require allocation for dumping an exception. // TODO: do we care any more? art always dumps pending exceptions on aborting threads. - if (type != "java.lang.OutOfMemoryError") { - JniAbortF(function_name_, "JNI %s called with pending exception: %s", - function_name_, type.c_str(), jniGetStackTrace(soa_.Env()).c_str()); + bool with_stack_trace = (type != "java.lang.OutOfMemoryError"); + if (with_stack_trace) { + JniAbortF(function_name_, "JNI %s called with pending exception '%s' thrown in %s\n%s", + function_name_, type.c_str(), throw_location.Dump().c_str(), + jniGetStackTrace(soa_.Env()).c_str()); } else { - JniAbortF(function_name_, "JNI %s called with %s pending", function_name_, type.c_str()); + JniAbortF(function_name_, "JNI %s called with pending exception '%s' thrown in %s", + function_name_, type.c_str(), throw_location.Dump().c_str()); } return; } @@ -1772,7 +1776,7 @@ PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D'); JniAbortF(__FUNCTION__, "non-nullable address is NULL"); } if (capacity <= 0) { - JniAbortF(__FUNCTION__, "capacity must be greater than 0: %d", capacity); + JniAbortF(__FUNCTION__, "capacity must be greater than 0: %lld", capacity); } return CHECK_JNI_EXIT("L", baseEnv(env)->NewDirectByteBuffer(env, address, capacity)); } diff --git a/src/class_linker.cc b/src/class_linker.cc index a9e17b24b6..4774c63628 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -80,51 +80,9 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_list args; va_start(args, fmt); - Thread::Current()->ThrowNewExceptionV("Ljava/lang/NoClassDefFoundError;", fmt, args); - va_end(args); -} - -static void ThrowClassFormatError(const char* fmt, ...) - __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -static void ThrowClassFormatError(const char* fmt, ...) { - va_list args; - va_start(args, fmt); - Thread::Current()->ThrowNewExceptionV("Ljava/lang/ClassFormatError;", fmt, args); - va_end(args); -} - -static void ThrowLinkageError(const char* fmt, ...) - __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -static void ThrowLinkageError(const char* fmt, ...) { - va_list args; - va_start(args, fmt); - Thread::Current()->ThrowNewExceptionV("Ljava/lang/LinkageError;", fmt, args); - va_end(args); -} - -static void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, const StringPiece& type, - const StringPiece& name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassHelper kh(c); - std::ostringstream msg; - msg << "No " << scope << "field " << name << " of type " << type - << " in class " << kh.GetDescriptor() << " or its superclasses"; - std::string location(kh.GetLocation()); - if (!location.empty()) { - msg << " (defined in " << location << ")"; - } - Thread::Current()->ThrowNewException("Ljava/lang/NoSuchFieldError;", msg.str().c_str()); -} - -static void ThrowNullPointerException(const char* fmt, ...) - __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -static void ThrowNullPointerException(const char* fmt, ...) { - va_list args; - va_start(args, fmt); - Thread::Current()->ThrowNewExceptionV("Ljava/lang/NullPointerException;", fmt, args); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionV(throw_location, "Ljava/lang/NoClassDefFoundError;", fmt, args); va_end(args); } @@ -139,18 +97,19 @@ static void ThrowEarlierClassFailure(mirror::Class* c) } CHECK(c->IsErroneous()) << PrettyClass(c) << " " << c->GetStatus(); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); if (c->GetVerifyErrorClass() != NULL) { // TODO: change the verifier to store an _instance_, with a useful detail message? ClassHelper ve_ch(c->GetVerifyErrorClass()); - std::string error_descriptor(ve_ch.GetDescriptor()); - Thread::Current()->ThrowNewException(error_descriptor.c_str(), PrettyDescriptor(c).c_str()); + self->ThrowNewException(throw_location, ve_ch.GetDescriptor(), PrettyDescriptor(c).c_str()); } else { - ThrowNoClassDefFoundError("%s", PrettyDescriptor(c).c_str()); + self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;", + PrettyDescriptor(c).c_str()); } } -static void WrapExceptionInInitializer() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static void WrapExceptionInInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); @@ -163,7 +122,8 @@ static void WrapExceptionInInitializer() // We only wrap non-Error exceptions; an Error can just be used as-is. if (!is_error) { - self->ThrowNewWrappedException("Ljava/lang/ExceptionInInitializerError;", NULL); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewWrappedException(throw_location, "Ljava/lang/ExceptionInInitializerError;", NULL); } } @@ -1244,8 +1204,7 @@ static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass) ObjectLock lock(self, klass); // Check for circular dependencies between classes. if (!klass->IsResolved() && klass->GetClinitThreadId() == self->GetTid()) { - self->ThrowNewException("Ljava/lang/ClassCircularityError;", - PrettyDescriptor(klass).c_str()); + ThrowClassCircularityError(klass); klass->SetStatus(mirror::Class::kStatusError); return NULL; } @@ -1260,9 +1219,7 @@ static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass) } // Return the loaded class. No exceptions should be pending. CHECK(klass->IsResolved()) << PrettyClass(klass); - CHECK(!self->IsExceptionPending()) - << PrettyClass(klass) << " " << PrettyTypeOf(self->GetException()) << "\n" - << self->GetException()->Dump(); + self->AssertNoPendingException(); return klass; } @@ -1335,13 +1292,13 @@ mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoade WellKnownClasses::java_lang_ClassLoader_loadClass, class_name_object.get())); } - if (soa.Env()->ExceptionCheck()) { + if (soa.Self()->IsExceptionPending()) { // If the ClassLoader threw, pass that exception up. return NULL; } else if (result.get() == NULL) { // broken loader - throw NPE to be compatible with Dalvik - ThrowNullPointerException("ClassLoader.loadClass returned null for %s", - class_name_string.c_str()); + ThrowNullPointerException(NULL, StringPrintf("ClassLoader.loadClass returned null for %s", + class_name_string.c_str()).c_str()); return NULL; } else { // success, return mirror::Class* @@ -1560,7 +1517,7 @@ const OatFile::OatMethod ClassLinker::GetOatMethodFor(const mirror::AbstractMeth // Special case to get oat code without overwriting a trampoline. const void* ClassLinker::GetOatCodeFor(const mirror::AbstractMethod* method) { - CHECK(Runtime::Current()->IsCompiler() || method->GetDeclaringClass()->IsInitializing()); + CHECK(!method->IsAbstract()) << PrettyMethod(method); const void* result = GetOatMethodFor(method).GetCode(); if (result == NULL) { // No code? You must mean to go into the interpreter. @@ -1587,7 +1544,8 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { if (class_data == NULL) { return; // no fields or methods - for example a marker interface } - if (!Runtime::Current()->IsStarted() || Runtime::Current()->UseCompileTimeClassPath()) { + Runtime* runtime = Runtime::Current(); + if (!runtime->IsStarted() || runtime->UseCompileTimeClassPath()) { // OAT file unavailable return; } @@ -1603,32 +1561,26 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { } size_t method_index = 0; // Link the code of methods skipped by LinkCode - const void* trampoline = Runtime::Current()->GetResolutionStubArray(Runtime::kStaticMethod)->GetData(); for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { mirror::AbstractMethod* method = klass->GetDirectMethod(i); - if (Runtime::Current()->IsMethodTracingActive()) { - Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - if (instrumentation->GetSavedCodeFromMap(method) == trampoline) { - const void* code = oat_class->GetOatMethod(method_index).GetCode(); - instrumentation->ResetSavedCode(method); - method->SetCode(code); - instrumentation->SaveAndUpdateCode(method); - } - } else if (method->GetCode() == trampoline) { + if (method->IsStatic()) { const void* code = oat_class->GetOatMethod(method_index).GetCode(); if (code == NULL) { // No code? You must mean to go into the interpreter. code = GetInterpreterEntryPoint(); } - method->SetCode(code); + runtime->GetInstrumentation()->UpdateMethodsCode(method, code); } method_index++; } + // Ignore virtual methods on the iterator. } static void LinkCode(SirtRef& method, const OatFile::OatClass* oat_class, uint32_t method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Method shouldn't have already been linked. + DCHECK(method->GetCode() == NULL); // Every kind of method should at least get an invoke stub from the oat_method. // non-abstract methods also get their code pointers. const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index); @@ -1641,23 +1593,22 @@ static void LinkCode(SirtRef& method, const OatFile::Oat } if (method->IsStatic() && !method->IsConstructor()) { - // For static methods excluding the class initializer, install the trampoline + // For static methods excluding the class initializer, install the trampoline. method->SetCode(runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()); } + if (method->IsNative()) { - // unregistering restores the dlsym lookup stub + // Unregistering restores the dlsym lookup stub. method->UnregisterNative(Thread::Current()); } - if (Runtime::Current()->IsMethodTracingActive()) { - Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - instrumentation->SaveAndUpdateCode(method.get()); - } - if (method->GetCode() == NULL) { // No code? You must mean to go into the interpreter. method->SetCode(GetInterpreterEntryPoint()); } + + // Allow instrumentation its chance to hijack code. + runtime->GetInstrumentation()->UpdateMethodsCode(method.get(), method->GetCode()); } void ClassLinker::LoadClass(const DexFile& dex_file, @@ -2229,7 +2180,6 @@ void ClassLinker::VerifyClass(mirror::Class* klass) { // Verify super class. mirror::Class* super = klass->GetSuperClass(); - std::string error_msg; if (super != NULL) { // Acquire lock to prevent races on verifying the super class. ObjectLock lock(self, super); @@ -2238,18 +2188,17 @@ void ClassLinker::VerifyClass(mirror::Class* klass) { Runtime::Current()->GetClassLinker()->VerifyClass(super); } if (!super->IsCompileTimeVerified()) { - error_msg = "Rejecting class "; - error_msg += PrettyDescriptor(klass); - error_msg += " that attempts to sub-class erroneous class "; - error_msg += PrettyDescriptor(super); + std::string error_msg(StringPrintf("Rejecting class %s that attempts to sub-class erroneous class %s", + PrettyDescriptor(klass).c_str(), + PrettyDescriptor(super).c_str())); LOG(ERROR) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8(); - SirtRef cause(self, self->GetException()); + SirtRef cause(self, self->GetException(NULL)); if (cause.get() != NULL) { self->ClearException(); } - self->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str()); + ThrowVerifyError(klass, "%s", error_msg.c_str()); if (cause.get() != NULL) { - self->GetException()->SetCause(cause.get()); + self->GetException(NULL)->SetCause(cause.get()); } klass->SetStatus(mirror::Class::kStatusError); return; @@ -2264,13 +2213,12 @@ void ClassLinker::VerifyClass(mirror::Class* klass) { if (oat_file_class_status == mirror::Class::kStatusError) { LOG(WARNING) << "Skipping runtime verification of erroneous class " << PrettyDescriptor(klass) << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8(); - error_msg = "Rejecting class "; - error_msg += PrettyDescriptor(klass); - error_msg += " because it failed compile-time verification"; - Thread::Current()->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str()); + ThrowVerifyError(klass, "Rejecting class %s because it failed compile-time verification", + PrettyDescriptor(klass).c_str()); klass->SetStatus(mirror::Class::kStatusError); return; } + std::string error_msg; if (!preverified) { verifier_failure = verifier::MethodVerifier::VerifyClass(klass, error_msg); } @@ -2301,7 +2249,7 @@ void ClassLinker::VerifyClass(mirror::Class* klass) { << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8() << " because: " << error_msg; self->AssertNoPendingException(); - self->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str()); + ThrowVerifyError(klass, "%s", error_msg.c_str()); klass->SetStatus(mirror::Class::kStatusError); } } @@ -2463,7 +2411,7 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name, klass->SetSuperClass(proxy_class); // The super class is java.lang.reflect.Proxy klass->SetStatus(mirror::Class::kStatusLoaded); // Class is now effectively in the loaded state - DCHECK(!Thread::Current()->IsExceptionPending()); + self->AssertNoPendingException(); // Link the fields and virtual methods, creating vtable and iftables if (!LinkClass(klass, interfaces)) { @@ -2798,7 +2746,7 @@ bool ClassLinker::ValidateSuperClassDescriptors(const mirror::Class* klass) { const mirror::AbstractMethod* method = klass->GetVTable()->Get(i); if (method != super->GetVTable()->Get(i) && !IsSameMethodSignatureInDifferentClassContexts(method, super, klass)) { - ThrowLinkageError("Class %s method %s resolves differently in superclass %s", + ThrowLinkageError(klass, "Class %s method %s resolves differently in superclass %s", PrettyDescriptor(klass).c_str(), PrettyMethod(method).c_str(), PrettyDescriptor(super).c_str()); return false; @@ -2813,7 +2761,7 @@ bool ClassLinker::ValidateSuperClassDescriptors(const mirror::Class* klass) { const mirror::AbstractMethod* method = iftable->GetMethodArray(i)->Get(j); if (!IsSameMethodSignatureInDifferentClassContexts(method, interface, method->GetDeclaringClass())) { - ThrowLinkageError("Class %s method %s resolves differently in interface %s", + ThrowLinkageError(klass, "Class %s method %s resolves differently in interface %s", PrettyDescriptor(method->GetDeclaringClass()).c_str(), PrettyMethod(method).c_str(), PrettyDescriptor(interface).c_str()); @@ -2996,10 +2944,9 @@ bool ClassLinker::LoadSuperAndInterfaces(SirtRef& klass, const De } // Verify if (!klass->CanAccess(super_class)) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalAccessError;", - "Class %s extended by class %s is inaccessible", - PrettyDescriptor(super_class).c_str(), - PrettyDescriptor(klass.get()).c_str()); + ThrowIllegalAccessError(klass.get(), "Class %s extended by class %s is inaccessible", + PrettyDescriptor(super_class).c_str(), + PrettyDescriptor(klass.get()).c_str()); return false; } klass->SetSuperClass(super_class); @@ -3016,10 +2963,9 @@ bool ClassLinker::LoadSuperAndInterfaces(SirtRef& klass, const De // Verify if (!klass->CanAccess(interface)) { // TODO: the RI seemed to ignore this in my testing. - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalAccessError;", - "Interface %s implemented by class %s is inaccessible", - PrettyDescriptor(interface).c_str(), - PrettyDescriptor(klass.get()).c_str()); + ThrowIllegalAccessError(klass.get(), "Interface %s implemented by class %s is inaccessible", + PrettyDescriptor(interface).c_str(), + PrettyDescriptor(klass.get()).c_str()); return false; } } @@ -3034,31 +2980,28 @@ bool ClassLinker::LinkSuperClass(SirtRef& klass) { mirror::Class* super = klass->GetSuperClass(); if (klass.get() == GetClassRoot(kJavaLangObject)) { if (super != NULL) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassFormatError;", - "java.lang.Object must not have a superclass"); + ThrowClassFormatError(klass.get(), "java.lang.Object must not have a superclass"); return false; } return true; } if (super == NULL) { - ThrowLinkageError("No superclass defined for class %s", PrettyDescriptor(klass.get()).c_str()); + ThrowLinkageError(klass.get(), "No superclass defined for class %s", + PrettyDescriptor(klass.get()).c_str()); return false; } // Verify if (super->IsFinal() || super->IsInterface()) { - Thread* self = Thread::Current(); - self->ThrowNewExceptionF("Ljava/lang/IncompatibleClassChangeError;", - "Superclass %s of %s is %s", - PrettyDescriptor(super).c_str(), - PrettyDescriptor(klass.get()).c_str(), - super->IsFinal() ? "declared final" : "an interface"); + ThrowIncompatibleClassChangeError(klass.get(), "Superclass %s of %s is %s", + PrettyDescriptor(super).c_str(), + PrettyDescriptor(klass.get()).c_str(), + super->IsFinal() ? "declared final" : "an interface"); return false; } if (!klass->CanAccess(super)) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalAccessError;", - "Superclass %s is inaccessible by %s", - PrettyDescriptor(super).c_str(), - PrettyDescriptor(klass.get()).c_str()); + ThrowIllegalAccessError(klass.get(), "Superclass %s is inaccessible to class %s", + PrettyDescriptor(super).c_str(), + PrettyDescriptor(klass.get()).c_str()); return false; } @@ -3074,8 +3017,9 @@ bool ClassLinker::LinkSuperClass(SirtRef& klass) { } // Disallow custom direct subclasses of java.lang.ref.Reference. if (init_done_ && super == GetClassRoot(kJavaLangRefReference)) { - ThrowLinkageError("Class %s attempts to subclass java.lang.ref.Reference, which is not allowed", - PrettyDescriptor(klass.get()).c_str()); + ThrowLinkageError(klass.get(), + "Class %s attempts to subclass java.lang.ref.Reference, which is not allowed", + PrettyDescriptor(klass.get()).c_str()); return false; } @@ -3096,7 +3040,7 @@ bool ClassLinker::LinkMethods(SirtRef& klass, // No vtable. size_t count = klass->NumVirtualMethods(); if (!IsUint(16, count)) { - ThrowClassFormatError("Too many methods on interface: %zd", count); + ThrowClassFormatError(klass.get(), "Too many methods on interface: %zd", count); return false; } for (size_t i = 0; i < count; ++i) { @@ -3133,7 +3077,7 @@ bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { if (local_mh.HasSameNameAndSignature(&super_mh)) { if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) { if (super_method->IsFinal()) { - ThrowLinkageError("Method %s overrides final method in class %s", + ThrowLinkageError(klass.get(), "Method %s overrides final method in class %s", PrettyMethod(local_method).c_str(), super_mh.GetDeclaringClassDescriptor()); return false; @@ -3156,7 +3100,7 @@ bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { } } if (!IsUint(16, actual_count)) { - ThrowClassFormatError("Too many methods defined on class: %zd", actual_count); + ThrowClassFormatError(klass.get(), "Too many methods defined on class: %zd", actual_count); return false; } // Shrink vtable if possible @@ -3169,7 +3113,7 @@ bool ClassLinker::LinkVirtualMethods(SirtRef& klass) { CHECK(klass.get() == GetClassRoot(kJavaLangObject)); uint32_t num_virtual_methods = klass->NumVirtualMethods(); if (!IsUint(16, num_virtual_methods)) { - ThrowClassFormatError("Too many methods: %d", num_virtual_methods); + ThrowClassFormatError(klass.get(), "Too many methods: %d", num_virtual_methods); return false; } SirtRef > @@ -3238,10 +3182,9 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, DCHECK(interface != NULL); if (!interface->IsInterface()) { ClassHelper ih(interface); - self->ThrowNewExceptionF("Ljava/lang/IncompatibleClassChangeError;", - "Class %s implements non-interface class %s", - PrettyDescriptor(klass.get()).c_str(), - PrettyDescriptor(ih.GetDescriptor()).c_str()); + ThrowIncompatibleClassChangeError(klass.get(), "Class %s implements non-interface class %s", + PrettyDescriptor(klass.get()).c_str(), + PrettyDescriptor(ih.GetDescriptor()).c_str()); return false; } // Check if interface is already in iftable @@ -3297,7 +3240,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, AllocMethodArray(self, num_methods); iftable->SetMethodArray(i, method_array); mirror::ObjectArray* vtable = klass->GetVTableDuringLinking(); - for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) { + for (size_t j = 0; j < num_methods; ++j) { mirror::AbstractMethod* interface_method = interface->GetVirtualMethod(j); interface_mh.ChangeMethod(interface_method); int32_t k; @@ -3314,9 +3257,10 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef& klass, vtable_mh.ChangeMethod(vtable_method); if (interface_mh.HasSameNameAndSignature(&vtable_mh)) { if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) { - self->ThrowNewExceptionF("Ljava/lang/IllegalAccessError;", - "Implementation not public: %s", - PrettyMethod(vtable_method).c_str()); + ThrowIllegalAccessError(klass.get(), + "Method '%s' implementing interface method '%s' is not public", + PrettyMethod(vtable_method).c_str(), + PrettyMethod(interface_method).c_str()); return false; } method_array->Set(j, vtable_method); @@ -3657,12 +3601,15 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, // same name to be loaded simultaneously by different loaders dex_cache->SetResolvedType(type_idx, resolved); } else { - CHECK(Thread::Current()->IsExceptionPending()) + Thread* self = Thread::Current(); + CHECK(self->IsExceptionPending()) << "Expected pending exception for failed resolution of: " << descriptor; - // Convert a ClassNotFoundException to a NoClassDefFoundError - if (Thread::Current()->GetException()->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) { + // Convert a ClassNotFoundException to a NoClassDefFoundError. + SirtRef cause(self, self->GetException(NULL)); + if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) { Thread::Current()->ClearException(); ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor); + self->GetException(NULL)->SetCause(cause.get()); } } } @@ -3779,7 +3726,7 @@ mirror::AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, if (resolved != NULL) { ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer); } else { - ThrowNoSuchMethodError(type, klass, name, signature, referrer); + ThrowNoSuchMethodError(type, klass, name, signature); } } break; @@ -3791,12 +3738,12 @@ mirror::AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, if (resolved != NULL) { ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer); } else { - ThrowNoSuchMethodError(type, klass, name, signature, referrer); + ThrowNoSuchMethodError(type, klass, name, signature); } } break; case kSuper: - ThrowNoSuchMethodError(type, klass, name, signature, referrer); + ThrowNoSuchMethodError(type, klass, name, signature); break; case kVirtual: if (resolved != NULL) { @@ -3806,7 +3753,7 @@ mirror::AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, if (resolved != NULL) { ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer); } else { - ThrowNoSuchMethodError(type, klass, name, signature, referrer); + ThrowNoSuchMethodError(type, klass, name, signature); } } break; diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc index 6b8083f4b6..c47ce4af8f 100644 --- a/src/class_linker_test.cc +++ b/src/class_linker_test.cc @@ -46,7 +46,7 @@ class ClassLinkerTest : public CommonTest { EXPECT_TRUE(class_linker_->FindSystemClass(descriptor.c_str()) == NULL); Thread* self = Thread::Current(); EXPECT_TRUE(self->IsExceptionPending()); - Object* exception = self->GetException(); + Object* exception = self->GetException(NULL); self->ClearException(); Class* exception_class = class_linker_->FindSystemClass("Ljava/lang/NoClassDefFoundError;"); EXPECT_TRUE(exception->InstanceOf(exception_class)); diff --git a/src/common_throws.cc b/src/common_throws.cc index 8673d11894..0bb9da274b 100644 --- a/src/common_throws.cc +++ b/src/common_throws.cc @@ -32,135 +32,98 @@ namespace art { -static void AddReferrerLocation(std::ostream& os, const mirror::AbstractMethod* referrer) +static void AddReferrerLocation(std::ostream& os, const mirror::Class* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { - ClassHelper kh(referrer->GetDeclaringClass()); + ClassHelper kh(referrer); std::string location(kh.GetLocation()); if (!location.empty()) { - os << " (accessed from " << location << ")"; + os << " (declaration of '" << PrettyDescriptor(referrer) + << "' appears in " << location << ")"; } } } -static void AddReferrerLocationFromClass(std::ostream& os, mirror::Class* referrer) +static void ThrowException(const ThrowLocation* throw_location, const char* exception_descriptor, + const mirror::Class* referrer, const char* fmt, va_list* args = NULL) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (referrer != NULL) { - ClassHelper kh(referrer); - std::string location(kh.GetLocation()); - if (!location.empty()) { - os << " (declaration of '" << PrettyDescriptor(referrer) - << "' appears in " << location << ")"; - } + std::ostringstream msg; + if (args != NULL) { + std::string vmsg; + StringAppendV(&vmsg, fmt, *args); + msg << vmsg; + } else { + msg << fmt; + } + AddReferrerLocation(msg, referrer); + Thread* self = Thread::Current(); + if (throw_location == NULL) { + ThrowLocation computed_throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(computed_throw_location, exception_descriptor, msg.str().c_str()); + } else { + self->ThrowNewException(*throw_location, exception_descriptor, msg.str().c_str()); } } -// NullPointerException +// ArithmeticException -void ThrowNullPointerExceptionForFieldAccess(mirror::Field* field, bool is_read) { - std::ostringstream msg; - msg << "Attempt to " << (is_read ? "read from" : "write to") - << " field '" << PrettyField(field, true) << "' on a null object reference"; - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str()); +void ThrowArithmeticExceptionDivideByZero(Thread* self) { + ThrowException(NULL, "Ljava/lang/ArithmeticException;", NULL, "divide by zero"); } -void ThrowNullPointerExceptionForMethodAccess(mirror::AbstractMethod* caller, uint32_t method_idx, - InvokeType type) { - mirror::DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - std::ostringstream msg; - msg << "Attempt to invoke " << type << " method '" - << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference"; - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str()); +// ArrayIndexOutOfBoundsException + +void ThrowArrayIndexOutOfBoundsException(int index, int length) { + ThrowException(NULL, "Ljava/lang/ArrayIndexOutOfBoundsException;", NULL, + StringPrintf("length=%d; index=%d", length, index).c_str()); } -void ThrowNullPointerExceptionFromDexPC(mirror::AbstractMethod* throw_method, uint32_t dex_pc) { - const DexFile::CodeItem* code = MethodHelper(throw_method).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - DecodedInstruction dec_insn(instr); - switch (instr->Opcode()) { - case Instruction::INVOKE_DIRECT: - case Instruction::INVOKE_DIRECT_RANGE: - ThrowNullPointerExceptionForMethodAccess(throw_method, dec_insn.vB, kDirect); - break; - case Instruction::INVOKE_VIRTUAL: - case Instruction::INVOKE_VIRTUAL_RANGE: - ThrowNullPointerExceptionForMethodAccess(throw_method, dec_insn.vB, kVirtual); - break; - case Instruction::INVOKE_INTERFACE: - case Instruction::INVOKE_INTERFACE_RANGE: - ThrowNullPointerExceptionForMethodAccess(throw_method, dec_insn.vB, kInterface); - break; - case Instruction::IGET: - case Instruction::IGET_WIDE: - case Instruction::IGET_OBJECT: - case Instruction::IGET_BOOLEAN: - case Instruction::IGET_BYTE: - case Instruction::IGET_CHAR: - case Instruction::IGET_SHORT: { - mirror::Field* field = - Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false); - ThrowNullPointerExceptionForFieldAccess(field, true /* read */); - break; - } - case Instruction::IPUT: - case Instruction::IPUT_WIDE: - case Instruction::IPUT_OBJECT: - case Instruction::IPUT_BOOLEAN: - case Instruction::IPUT_BYTE: - case Instruction::IPUT_CHAR: - case Instruction::IPUT_SHORT: { - mirror::Field* field = - Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false); - ThrowNullPointerExceptionForFieldAccess(field, false /* write */); - break; - } - case Instruction::AGET: - case Instruction::AGET_WIDE: - case Instruction::AGET_OBJECT: - case Instruction::AGET_BOOLEAN: - case Instruction::AGET_BYTE: - case Instruction::AGET_CHAR: - case Instruction::AGET_SHORT: - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", - "Attempt to read from null array"); - break; - case Instruction::APUT: - case Instruction::APUT_WIDE: - case Instruction::APUT_OBJECT: - case Instruction::APUT_BOOLEAN: - case Instruction::APUT_BYTE: - case Instruction::APUT_CHAR: - case Instruction::APUT_SHORT: - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", - "Attempt to write to null array"); - break; - case Instruction::ARRAY_LENGTH: - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", - "Attempt to get length of null array"); - break; - default: { - // TODO: We should have covered all the cases where we expect a NPE above, this - // message/logging is so we can improve any cases we've missed in the future. - const DexFile& dex_file = *throw_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); - std::string message("Null pointer exception during instruction '"); - message += instr->DumpString(&dex_file); - message += "'"; - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", message.c_str()); - break; - } - } +// ArrayStoreException + +void ThrowArrayStoreException(const mirror::Class* element_class, + const mirror::Class* array_class) { + ThrowException(NULL, "Ljava/lang/ArrayStoreException;", NULL, + StringPrintf("%s cannot be stored in an array of type %s", + PrettyDescriptor(element_class).c_str(), + PrettyDescriptor(array_class).c_str()).c_str()); +} + +// ClassCastException + +void ThrowClassCastException(const mirror::Class* dest_type, const mirror::Class* src_type) { + ThrowException(NULL, "Ljava/lang/ClassCastException;", NULL, + StringPrintf("%s cannot be cast to %s", + PrettyDescriptor(src_type).c_str(), + PrettyDescriptor(dest_type).c_str()).c_str()); +} + +void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg) { + ThrowException(throw_location, "Ljava/lang/ClassCastException;", NULL, msg); +} + +// ClassCircularityError + +void ThrowClassCircularityError(mirror::Class* c) { + std::ostringstream msg; + msg << PrettyDescriptor(c); + ThrowException(NULL, "Ljava/lang/ClassCircularityError;", c, msg.str().c_str()); } +// ClassFormatError + +void ThrowClassFormatError(const mirror::Class* referrer, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ThrowException(NULL, "Ljava/lang/ClassFormatError;", referrer, fmt, &args); + va_end(args);} + // IllegalAccessError void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) { std::ostringstream msg; msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '" << PrettyDescriptor(accessed) << "'"; - AddReferrerLocationFromClass(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str()); } void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, @@ -171,24 +134,21 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirr msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '" << PrettyDescriptor(accessed) << "') in attempt to invoke " << type << " method " << PrettyMethod(called).c_str(); - AddReferrerLocation(msg, caller); - Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str()); } void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::AbstractMethod* accessed) { std::ostringstream msg; msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '" << PrettyDescriptor(referrer) << "'"; - AddReferrerLocationFromClass(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str()); } void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::Field* accessed) { std::ostringstream msg; msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '" << PrettyDescriptor(referrer) << "'"; - AddReferrerLocationFromClass(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str()); } void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, @@ -196,10 +156,24 @@ void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, std::ostringstream msg; msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '" << PrettyMethod(referrer) << "'"; - AddReferrerLocation(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer != NULL ? referrer->GetClass() : NULL, + msg.str().c_str()); +} + +void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...){ + va_list args; + va_start(args, fmt); + ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, fmt, &args); + va_end(args); } +// IllegalArgumentException + +void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg) { + ThrowException(throw_location, "Ljava/lang/IllegalArgumentException;", NULL, msg); +} + + // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, @@ -208,9 +182,9 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun std::ostringstream msg; msg << "The method '" << PrettyMethod(method) << "' was expected to be of type " << expected_type << " but instead was found to be of type " << found_type; - AddReferrerLocation(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IncompatibleClassChangeError;", - msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", + referrer != NULL ? referrer->GetClass() : NULL, + msg.str().c_str()); } void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::AbstractMethod* interface_method, @@ -224,9 +198,9 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::Ab << "' does not implement interface '" << PrettyDescriptor(interface_method->GetDeclaringClass()) << "' in call to '" << PrettyMethod(interface_method) << "'"; - AddReferrerLocation(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IncompatibleClassChangeError;", - msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", + referrer != NULL ? referrer->GetClass() : NULL, + msg.str().c_str()); } void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, bool is_static, @@ -235,30 +209,192 @@ void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, msg << "Expected '" << PrettyField(resolved_field) << "' to be a " << (is_static ? "static" : "instance") << " field" << " rather than a " << (is_static ? "instance" : "static") << " field"; - AddReferrerLocation(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/IncompatibleClassChangeError;", - msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(), + msg.str().c_str()); +} + +void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...){ + va_list args; + va_start(args, fmt); + ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args); + va_end(args); +} + +// LinkageError + +void ThrowLinkageError(const mirror::Class* referrer, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ThrowException(NULL, "Ljava/lang/LinkageError;", referrer, fmt, &args); + va_end(args); +} + +// NegativeArraySizeException + +void ThrowNegativeArraySizeException(int size) { + ThrowException(NULL, "Ljava/lang/NegativeArraySizeException;", NULL, StringPrintf("%d", size).c_str()); +} + +void ThrowNegativeArraySizeException(const char* msg) { + ThrowException(NULL, "Ljava/lang/NegativeArraySizeException;", NULL, msg); +} + +// NoSuchFieldError + +void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, + const StringPiece& type, const StringPiece& name) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassHelper kh(c); + std::ostringstream msg; + msg << "No " << scope << "field " << name << " of type " << type + << " in class " << kh.GetDescriptor() << " or its superclasses"; + ThrowException(NULL, "Ljava/lang/NoSuchFieldError;", c, msg.str().c_str()); } // NoSuchMethodError void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name, - const StringPiece& signature, const mirror::AbstractMethod* referrer) { + const StringPiece& signature) { std::ostringstream msg; ClassHelper kh(c); msg << "No " << type << " method " << name << signature << " in class " << kh.GetDescriptor() << " or its super classes"; - AddReferrerLocation(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/NoSuchMethodError;", msg.str().c_str()); + ThrowException(NULL, "Ljava/lang/NoSuchMethodError;", c, msg.str().c_str()); } -void ThrowNoSuchMethodError(uint32_t method_idx, const mirror::AbstractMethod* referrer) { - mirror::DexCache* dex_cache = referrer->GetDeclaringClass()->GetDexCache(); +void ThrowNoSuchMethodError(uint32_t method_idx) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + mirror::DexCache* dex_cache = throw_location.GetMethod()->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); std::ostringstream msg; msg << "No method '" << PrettyMethod(method_idx, dex_file, true) << "'"; - AddReferrerLocation(msg, referrer); - Thread::Current()->ThrowNewException("Ljava/lang/NoSuchMethodError;", msg.str().c_str()); + ThrowException(&throw_location, "Ljava/lang/NoSuchMethodError;", + throw_location.GetMethod()->GetDeclaringClass(), msg.str().c_str()); +} + +// NullPointerException + +void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location, + mirror::Field* field, bool is_read) { + std::ostringstream msg; + msg << "Attempt to " << (is_read ? "read from" : "write to") + << " field '" << PrettyField(field, true) << "' on a null object reference"; + ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, msg.str().c_str()); +} + +void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location, uint32_t method_idx, + InvokeType type) { + mirror::DexCache* dex_cache = throw_location.GetMethod()->GetDeclaringClass()->GetDexCache(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + std::ostringstream msg; + msg << "Attempt to invoke " << type << " method '" + << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference"; + ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, msg.str().c_str()); +} + +void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) { + const DexFile::CodeItem* code = MethodHelper(throw_location.GetMethod()).GetCodeItem(); + uint32_t throw_dex_pc = throw_location.GetDexPc(); + CHECK_LT(throw_dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[throw_dex_pc]); + DecodedInstruction dec_insn(instr); + switch (instr->Opcode()) { + case Instruction::INVOKE_DIRECT: + case Instruction::INVOKE_DIRECT_RANGE: + ThrowNullPointerExceptionForMethodAccess(throw_location, dec_insn.vB, kDirect); + break; + case Instruction::INVOKE_VIRTUAL: + case Instruction::INVOKE_VIRTUAL_RANGE: + ThrowNullPointerExceptionForMethodAccess(throw_location, dec_insn.vB, kVirtual); + break; + case Instruction::INVOKE_INTERFACE: + case Instruction::INVOKE_INTERFACE_RANGE: + ThrowNullPointerExceptionForMethodAccess(throw_location, dec_insn.vB, kInterface); + break; + case Instruction::IGET: + case Instruction::IGET_WIDE: + case Instruction::IGET_OBJECT: + case Instruction::IGET_BOOLEAN: + case Instruction::IGET_BYTE: + case Instruction::IGET_CHAR: + case Instruction::IGET_SHORT: { + mirror::Field* field = + Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, + throw_location.GetMethod(), false); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true /* read */); + break; + } + case Instruction::IPUT: + case Instruction::IPUT_WIDE: + case Instruction::IPUT_OBJECT: + case Instruction::IPUT_BOOLEAN: + case Instruction::IPUT_BYTE: + case Instruction::IPUT_CHAR: + case Instruction::IPUT_SHORT: { + mirror::Field* field = + Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, + throw_location.GetMethod(), false); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false /* write */); + break; + } + case Instruction::AGET: + case Instruction::AGET_WIDE: + case Instruction::AGET_OBJECT: + case Instruction::AGET_BOOLEAN: + case Instruction::AGET_BYTE: + case Instruction::AGET_CHAR: + case Instruction::AGET_SHORT: + ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, + "Attempt to read from null array"); + break; + case Instruction::APUT: + case Instruction::APUT_WIDE: + case Instruction::APUT_OBJECT: + case Instruction::APUT_BOOLEAN: + case Instruction::APUT_BYTE: + case Instruction::APUT_CHAR: + case Instruction::APUT_SHORT: + ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, + "Attempt to write to null array"); + break; + case Instruction::ARRAY_LENGTH: + ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, + "Attempt to get length of null array"); + break; + default: { + // TODO: We should have covered all the cases where we expect a NPE above, this + // message/logging is so we can improve any cases we've missed in the future. + const DexFile& dex_file = + *throw_location.GetMethod()->GetDeclaringClass()->GetDexCache()->GetDexFile(); + ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, + StringPrintf("Null pointer exception during instruction '%s'", + instr->DumpString(&dex_file).c_str()).c_str()); + break; + } + } +} + +void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg) { + ThrowException(throw_location, "Ljava/lang/NullPointerException;", NULL, msg); +} + +// RuntimeException + +void ThrowRuntimeException(const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ThrowException(NULL, "Ljava/lang/RuntimeException;", NULL, fmt, &args); + va_end(args); +} + +// VerifyError + +void ThrowVerifyError(const mirror::Class* referrer, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ThrowException(NULL, "Ljava/lang/VerifyError;", referrer, fmt, &args); + va_end(args); } } // namespace art diff --git a/src/common_throws.h b/src/common_throws.h index 9e28bd72e3..5555435051 100644 --- a/src/common_throws.h +++ b/src/common_throws.h @@ -28,17 +28,39 @@ class Field; class Object; } // namespace mirror class StringPiece; +class ThrowLocation; -// NullPointerException +// ArithmeticException + +void ThrowArithmeticExceptionDivideByZero(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// ArrayIndexOutOfBoundsException -void ThrowNullPointerExceptionForFieldAccess(mirror::Field* field, bool is_read) +void ThrowArrayIndexOutOfBoundsException(int index, int length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowNullPointerExceptionForMethodAccess(mirror::AbstractMethod* caller, uint32_t method_idx, - InvokeType type) +// ArrayStoreException + +void ThrowArrayStoreException(const mirror::Class* element_class, + const mirror::Class* array_class) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// ClassCircularityError + +void ThrowClassCircularityError(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// ClassCastException + +void ThrowClassCastException(const mirror::Class* dest_type, const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowNullPointerExceptionFromDexPC(mirror::AbstractMethod* throw_method, uint32_t dex_pc) +// ClassFormatError + +void ThrowClassFormatError(const mirror::Class* referrer, const char* fmt, ...) + __attribute__((__format__(__printf__, 2, 3))) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IllegalAccessError @@ -62,6 +84,15 @@ void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, mirror::Field* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) + __attribute__((__format__(__printf__, 2, 3))) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// IllegalArgumentException + +void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, @@ -78,13 +109,66 @@ void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, const mirror::AbstractMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...) + __attribute__((__format__(__printf__, 2, 3))) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// LinkageError + +void ThrowLinkageError(const mirror::Class* referrer, const char* fmt, ...) + __attribute__((__format__(__printf__, 2, 3))) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// NegativeArraySizeException + +void ThrowNegativeArraySizeException(int size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +void ThrowNegativeArraySizeException(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + +// NoSuchFieldError + +void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, + const StringPiece& type, const StringPiece& name) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // NoSuchMethodError void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name, - const StringPiece& signature, const mirror::AbstractMethod* referrer) + const StringPiece& signature) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +void ThrowNoSuchMethodError(uint32_t method_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// NullPointerException + +void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location, + mirror::Field* field, + bool is_read) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void ThrowNoSuchMethodError(uint32_t method_idx, const mirror::AbstractMethod* referrer) +void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location, + uint32_t method_idx, + InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// RuntimeException + +void ThrowRuntimeException(const char* fmt, ...) + __attribute__((__format__(__printf__, 1, 2))) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// VerifyError + +void ThrowVerifyError(const mirror::Class* referrer, const char* fmt, ...) + __attribute__((__format__(__printf__, 2, 3))) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/compiler/dex/frontend.h b/src/compiler/dex/frontend.h index 2e62dc8c7b..49e085270c 100644 --- a/src/compiler/dex/frontend.h +++ b/src/compiler/dex/frontend.h @@ -55,7 +55,6 @@ enum opt_control_vector { // Force code generation paths for testing. enum debugControlVector { - kDebugDisplayMissingTargets, kDebugVerbose, kDebugDumpCFG, kDebugSlowFieldPath, diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc index c13e79715a..4fadc9d361 100644 --- a/src/compiler/dex/quick/gen_common.cc +++ b/src/compiler/dex/quick/gen_common.cc @@ -507,17 +507,6 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } - -// Debugging routine - if null target, branch to DebugMe -void Mir2Lir::GenShowTarget() -{ - DCHECK_NE(cu_->instruction_set, kX86) << "unimplemented GenShowTarget"; - LIR* branch_over = OpCmpImmBranch(kCondNe, TargetReg(kInvokeTgt), 0, NULL); - LoadWordDisp(TargetReg(kSelf), ENTRYPOINT_OFFSET(pDebugMe), TargetReg(kInvokeTgt)); - LIR* target = NewLIR0(kPseudoTargetLabel); - branch_over->target = target; -} - void Mir2Lir::HandleSuspendLaunchPads() { LIR** suspend_label = reinterpret_cast(suspend_launchpads_.elem_list); diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc index 3e946f89aa..c0bae29a95 100644 --- a/src/compiler/dex/quick/gen_invoke.cc +++ b/src/compiler/dex/quick/gen_invoke.cc @@ -1375,9 +1375,6 @@ void Mir2Lir::GenInvoke(CallInfo* info) vtable_idx, direct_code, direct_method, original_type); } - if (cu_->enable_debug & (1 << kDebugDisplayMissingTargets)) { - GenShowTarget(); - } LIR* call_inst; if (cu_->instruction_set != kX86) { call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt)); diff --git a/src/compiler/dex/quick/mir_to_lir.h b/src/compiler/dex/quick/mir_to_lir.h index 69ebc7e21c..aec0cc1dc3 100644 --- a/src/compiler/dex/quick/mir_to_lir.h +++ b/src/compiler/dex/quick/mir_to_lir.h @@ -396,7 +396,6 @@ class Mir2Lir : public Backend { bool is_long_or_double, bool is_object); void GenSget(uint32_t field_idx, RegLocation rl_dest, bool is_long_or_double, bool is_object); - void GenShowTarget(); void GenIGet(uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); void GenIPut(uint32_t field_idx, int opt_flags, OpSize size, diff --git a/src/compiler/driver/compiler_driver.cc b/src/compiler/driver/compiler_driver.cc index 700936c881..4e8ebbd4ef 100644 --- a/src/compiler/driver/compiler_driver.cc +++ b/src/compiler/driver/compiler_driver.cc @@ -1202,9 +1202,8 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_ manager->GetClassLinker()->FindClass(descriptor, soa.Decode(manager->GetClassLoader())); if (klass == NULL) { - Thread* self = Thread::Current(); - CHECK(self->IsExceptionPending()); - self->ClearException(); + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); /* * At compile time, we can still structurally verify the class even if FindClass fails. @@ -1230,13 +1229,13 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_ if (klass->IsErroneous()) { // ClassLinker::VerifyClass throws, which isn't useful in the compiler. - CHECK(Thread::Current()->IsExceptionPending()); - Thread::Current()->ClearException(); + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); } CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) << PrettyDescriptor(klass) << ": state=" << klass->GetStatus(); - CHECK(!Thread::Current()->IsExceptionPending()) << PrettyTypeOf(Thread::Current()->GetException()); + soa.Self()->AssertNoPendingException(); } void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, @@ -1435,7 +1434,6 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl mirror::ClassLoader* class_loader = soa.Decode(manager->GetClassLoader()); const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def); mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor, class_loader); - Thread* self = Thread::Current(); bool compiling_boot = Runtime::Current()->GetHeap()->GetSpaces().size() == 1; bool can_init_static_fields = compiling_boot && manager->GetCompiler()->IsImageClass(descriptor); @@ -1447,9 +1445,9 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl // its parents, whose locks are acquired. This leads to a parent-to-child and a child-to-parent // lock ordering and consequent potential deadlock. static Mutex lock1("Initializer lock", kMonitorLock); - MutexLock mu(self, lock1); + MutexLock mu(soa.Self(), lock1); // The lock required to initialize the class. - ObjectLock lock2(self, klass); + ObjectLock lock2(soa.Self(), klass); // Only try to initialize classes that were successfully verified. if (klass->IsVerified()) { manager->GetClassLinker()->EnsureInitialized(klass, false, can_init_static_fields); @@ -1473,7 +1471,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } else { manager->GetClassLinker()->EnsureInitialized(klass, true, can_init_static_fields); } - CHECK(!self->IsExceptionPending()) << self->GetException()->Dump(); + soa.Self()->AssertNoPendingException(); } } } @@ -1494,7 +1492,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } } // Clear any class not found or verification exceptions. - self->ClearException(); + soa.Self()->ClearException(); } void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, @@ -1651,7 +1649,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t if (self->IsExceptionPending()) { ScopedObjectAccess soa(self); LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n" - << self->GetException()->Dump(); + << self->GetException(NULL)->Dump(); } } diff --git a/src/compiler/llvm/runtime_support_llvm.cc b/src/compiler/llvm/runtime_support_llvm.cc index d9b879afbd..d6c81813ad 100644 --- a/src/compiler/llvm/runtime_support_llvm.cc +++ b/src/compiler/llvm/runtime_support_llvm.cc @@ -135,15 +135,16 @@ void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread) obj->MonitorExit(thread); } -void art_portable_test_suspend_from_code(Thread* thread) +void art_portable_test_suspend_from_code(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckSuspend(thread); - if (thread->ReadFlag(kEnterInterpreter)) { + CheckSuspend(self); + if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { // Save out the shadow frame to the heap - ShadowFrameCopyVisitor visitor(thread); + ShadowFrameCopyVisitor visitor(self); visitor.WalkStack(true); - thread->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy(), JValue()); - thread->SetException(reinterpret_cast(-1)); + self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); + self->SetDeoptimizationReturnValue(JValue()); + self->SetException(ThrowLocation(), reinterpret_cast(-1)); } } @@ -175,51 +176,59 @@ bool art_portable_is_exception_pending_from_code() { } void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread::Current()->ThrowNewException("Ljava/lang/ArithmeticException;", - "divide by zero"); + ThrowArithmeticExceptionDivideByZero(Thread::Current()); } void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "length=%d; index=%d", length, index); + ThrowArrayIndexOutOfBoundsException(index, length); } void art_portable_throw_no_such_method_from_code(int32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // We need the calling method as context for the method_idx. - mirror::AbstractMethod* method = Thread::Current()->GetCurrentMethod(); - ThrowNoSuchMethodError(method_idx, method); + ThrowNoSuchMethodError(method_idx); } void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* throw_method = - Thread::Current()->GetManagedStack()->GetTopShadowFrame()->GetMethod(); - ThrowNullPointerExceptionFromDexPC(throw_method, dex_pc); + // TODO: remove dex_pc argument from caller. + UNUSED(dex_pc); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionFromDexPC(throw_location); } void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ThrowStackOverflowError(Thread::Current()); } -void art_portable_throw_exception_from_code(mirror::Object* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread::Current()->DeliverException(static_cast(exception)); +void art_portable_throw_exception_from_code(mirror::Throwable* exception) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + if (exception == NULL) { + ThrowNullPointerException(NULL, "throw with null exception"); + } else { + self->SetException(throw_location, exception); + } } void* art_portable_get_and_clear_exception(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(self->IsExceptionPending()); - mirror::Throwable* exception = self->GetException(); + // TODO: make this inline. + mirror::Throwable* exception = self->GetException(NULL); self->ClearException(); return exception; } int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method, uint32_t ti_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Throwable* exception = Thread::Current()->GetException(); - // Check for magic deoptimization exception. - if (reinterpret_cast(exception) == -1) { + Thread* self = Thread::Current(); // TODO: make an argument. + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); + // Check for special deoptimization exception. + if (UNLIKELY(reinterpret_cast(exception) == -1)) { return -1; } mirror::Class* exception_type = exception->GetClass(); @@ -229,26 +238,40 @@ int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_ const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); int iter_index = 0; + int result = -1; + uint32_t catch_dex_pc = -1; // Iterate over the catch handlers associated with dex_pc for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { uint16_t iter_type_idx = it.GetHandlerTypeIndex(); // Catch all case if (iter_type_idx == DexFile::kDexNoIndex16) { - return iter_index; + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; } // Does this catch exception type apply? mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); - if (iter_exception_type == NULL) { - // The verifier should take care of resolving all exception classes early + if (UNLIKELY(iter_exception_type == NULL)) { + // TODO: check, the verifier (class linker?) should take care of resolving all exception + // classes early. LOG(WARNING) << "Unresolved exception class when finding catch block: " << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); } else if (iter_exception_type->IsAssignableFrom(exception_type)) { - return iter_index; + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; } ++iter_index; } - // Handler not found - return -1; + if (result != -1) { + // Handler found. + Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, + throw_location, + current_method, + catch_dex_pc, + exception); + } + return result; } @@ -640,14 +663,12 @@ void art_portable_check_cast_from_code(const mirror::Class* dest_type, const mir DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); DCHECK(src_type->IsClass()) << PrettyClass(src_type); if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassCastException;", - "%s cannot be cast to %s", - PrettyDescriptor(src_type).c_str(), - PrettyDescriptor(dest_type).c_str()); + ThrowClassCastException(dest_type, src_type); } } -void art_portable_check_put_array_element_from_code(const mirror::Object* element, const mirror::Object* array) +void art_portable_check_put_array_element_from_code(const mirror::Object* element, + const mirror::Object* array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (element == NULL) { return; @@ -658,10 +679,7 @@ void art_portable_check_put_array_element_from_code(const mirror::Object* elemen mirror::Class* component_type = array_class->GetComponentType(); mirror::Class* element_class = element->GetClass(); if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "%s cannot be stored in an array of type %s", - PrettyDescriptor(element_class).c_str(), - PrettyDescriptor(array_class).c_str()); + ThrowArrayStoreException(element_class, array_class); } return; } diff --git a/src/debugger.cc b/src/debugger.cc index 080288fc02..9bd1eb57d5 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -46,6 +46,7 @@ #include "sirt_ref.h" #include "stack_indirect_reference_table.h" #include "thread_list.h" +#include "throw_location.h" #include "utf.h" #include "well_known_classes.h" @@ -104,6 +105,55 @@ struct SingleStepControl { int stack_depth; }; +class DebugInstrumentationListener : public instrumentation::InstrumentationListener { + public: + DebugInstrumentationListener() {} + virtual ~DebugInstrumentationListener() {} + + virtual void MethodEntered(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (method->IsNative()) { + // TODO: post location events is a suspension point and native method entry stubs aren't. + return; + } + Dbg::PostLocationEvent(method, 0, this_object, Dbg::kMethodEntry); + } + + virtual void MethodExited(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc, const JValue& return_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + UNUSED(return_value); + if (method->IsNative()) { + // TODO: post location events is a suspension point and native method entry stubs aren't. + return; + } + Dbg::PostLocationEvent(method, dex_pc, this_object, Dbg::kMethodExit); + } + + virtual void MethodUnwind(Thread* thread, const mirror::AbstractMethod* method, + uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // We're not recorded to listen to this kind of event, so complain. + LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method) + << " " << dex_pc; + } + + virtual void DexPcMoved(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t new_dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc); + } + + virtual void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, + mirror::Throwable* exception_object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object); + } + +} gDebugInstrumentationListener; + // JDWP is allowed unless the Zygote forbids it. static bool gJdwpAllowed = true; @@ -140,7 +190,7 @@ static size_t gAllocRecordCount GUARDED_BY(gAllocTrackerLock) = 0; static std::vector gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); static SingleStepControl gSingleStepControl GUARDED_BY(Locks::breakpoint_lock_); -static bool IsBreakpoint(mirror::AbstractMethod* m, uint32_t dex_pc) +static bool IsBreakpoint(const mirror::AbstractMethod* m, uint32_t dex_pc) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); @@ -466,15 +516,6 @@ bool Dbg::IsDisposed() { return gDisposed; } -static void SetDebuggerUpdatesEnabledCallback(Thread* t, void* user_data) { - t->SetDebuggerUpdatesEnabled(*reinterpret_cast(user_data)); -} - -static void SetDebuggerUpdatesEnabled(bool enabled) { - MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); - Runtime::Current()->GetThreadList()->ForEach(SetDebuggerUpdatesEnabledCallback, &enabled); -} - void Dbg::GoActive() { // Enable all debugging features, including scans for breakpoints. // This is a no-op if we're already active. @@ -483,16 +524,26 @@ void Dbg::GoActive() { return; } - LOG(INFO) << "Debugger is active"; - { // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected? MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); CHECK_EQ(gBreakpoints.size(), 0U); } + Runtime* runtime = Runtime::Current(); + runtime->GetThreadList()->SuspendAll(); + Thread* self = Thread::Current(); + ThreadState old_state = self->SetStateUnsafe(kRunnable); + CHECK_NE(old_state, kRunnable); + runtime->GetInstrumentation()->AddListener(&gDebugInstrumentationListener, + instrumentation::Instrumentation::kMethodEntered | + instrumentation::Instrumentation::kMethodExited | + instrumentation::Instrumentation::kDexPcMoved); gDebuggerActive = true; - SetDebuggerUpdatesEnabled(true); + CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); + runtime->GetThreadList()->ResumeAll(); + + LOG(INFO) << "Debugger is active"; } void Dbg::Disconnected() { @@ -500,11 +551,22 @@ void Dbg::Disconnected() { LOG(INFO) << "Debugger is no longer active"; + // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread + // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener + // and clear the object registry. + Runtime* runtime = Runtime::Current(); + runtime->GetThreadList()->SuspendAll(); + Thread* self = Thread::Current(); + ThreadState old_state = self->SetStateUnsafe(kRunnable); + runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, + instrumentation::Instrumentation::kMethodEntered | + instrumentation::Instrumentation::kMethodExited | + instrumentation::Instrumentation::kDexPcMoved); gDebuggerActive = false; - SetDebuggerUpdatesEnabled(false); - gRegistry->Clear(); gDebuggerConnected = false; + CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); + runtime->GetThreadList()->ResumeAll(); } bool Dbg::IsDebuggerActive() { @@ -1902,34 +1964,16 @@ struct GetThisVisitor : public StackVisitor { virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { if (frame_id != GetFrameId()) { return true; // continue - } - mirror::AbstractMethod* m = GetMethod(); - if (m->IsNative() || m->IsStatic()) { - this_object = NULL; } else { - uint16_t reg = DemangleSlot(0, m); - this_object = reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); + this_object = GetThisObject(); + return false; } - return false; } mirror::Object* this_object; JDWP::FrameId frame_id; }; -static mirror::Object* GetThis(Thread* self, mirror::AbstractMethod* m, size_t frame_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // TODO: should we return the 'this' we passed through to non-static native methods? - if (m->IsNative() || m->IsStatic()) { - return NULL; - } - - UniquePtr context(Context::Create()); - GetThisVisitor visitor(self, context.get(), frame_id); - visitor.WalkStack(); - return visitor.this_object; -} - JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result) { ScopedObjectAccessUnchecked soa(Thread::Current()); @@ -2176,7 +2220,8 @@ void Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl visitor.WalkStack(); } -void Dbg::PostLocationEvent(const mirror::AbstractMethod* m, int dex_pc, mirror::Object* this_object, int event_flags) { +void Dbg::PostLocationEvent(const mirror::AbstractMethod* m, int dex_pc, + mirror::Object* this_object, int event_flags) { mirror::Class* c = m->GetDeclaringClass(); JDWP::JdwpLocation location; @@ -2194,29 +2239,25 @@ void Dbg::PostLocationEvent(const mirror::AbstractMethod* m, int dex_pc, mirror: gJdwpState->PostLocationEvent(&location, this_id, event_flags); } -void Dbg::PostException(Thread* thread, - JDWP::FrameId throw_frame_id, mirror::AbstractMethod* throw_method, - uint32_t throw_dex_pc, mirror::AbstractMethod* catch_method, +void Dbg::PostException(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, mirror::Throwable* exception_object) { if (!IsDebuggerActive()) { return; } - JDWP::JdwpLocation throw_location; - SetLocation(throw_location, throw_method, throw_dex_pc); + JDWP::JdwpLocation jdwp_throw_location; + SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc()); JDWP::JdwpLocation catch_location; SetLocation(catch_location, catch_method, catch_dex_pc); // We need 'this' for InstanceOnly filters. - UniquePtr context(Context::Create()); - GetThisVisitor visitor(thread, context.get(), throw_frame_id); - visitor.WalkStack(); - JDWP::ObjectId this_id = gRegistry->Add(visitor.this_object); - + JDWP::ObjectId this_id = gRegistry->Add(throw_location.GetThis()); JDWP::ObjectId exception_id = gRegistry->Add(exception_object); JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass()); - gJdwpState->PostException(&throw_location, exception_id, exception_class_id, &catch_location, this_id); + gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location, + this_id); } void Dbg::PostClassPrepare(mirror::Class* c) { @@ -2232,20 +2273,9 @@ void Dbg::PostClassPrepare(mirror::Class* c) { gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), ClassHelper(c).GetDescriptor(), state); } -void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { - if (!IsDebuggerActive() || dex_pc == -2 /* fake method exit */) { - return; - } - - size_t frame_id; - mirror::AbstractMethod* m = self->GetCurrentMethod(NULL, &frame_id); - //LOG(INFO) << "UpdateDebugger " << PrettyMethod(m) << "@" << dex_pc << " frame " << frame_id; - - if (dex_pc == -1) { - // We use a pc of -1 to represent method entry, since we might branch back to pc 0 later. - // This means that for this special notification, there can't be anything else interesting - // going on, so we're done already. - Dbg::PostLocationEvent(m, 0, GetThis(self, m, frame_id), kMethodEntry); +void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* m, uint32_t dex_pc) { + if (!IsDebuggerActive() || dex_pc == static_cast(-2) /* fake method exit */) { return; } @@ -2259,7 +2289,7 @@ void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { // If the debugger is single-stepping one of our threads, check to // see if we're that thread and we've reached a step point. MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); - if (gSingleStepControl.is_active && gSingleStepControl.thread == self) { + if (gSingleStepControl.is_active && gSingleStepControl.thread == thread) { CHECK(!m->IsNative()); if (gSingleStepControl.step_depth == JDWP::SD_INTO) { // Step into method calls. We break when the line number @@ -2282,7 +2312,7 @@ void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { // might get unrolled past it by an exception, and it's tricky // to identify recursion.) - int stack_depth = GetStackDepth(self); + int stack_depth = GetStackDepth(thread); if (stack_depth < gSingleStepControl.stack_depth) { // popped up one or more frames, always trigger @@ -2307,7 +2337,7 @@ void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { // with the PC at the next instruction in the returned-to // function, rather than the end of the returning function. - int stack_depth = GetStackDepth(self); + int stack_depth = GetStackDepth(thread); if (stack_depth < gSingleStepControl.stack_depth) { event_flags |= kSingleStep; VLOG(jdwp) << "SS method pop"; @@ -2316,27 +2346,10 @@ void Dbg::UpdateDebugger(int32_t dex_pc, Thread* self) { } } - // Check to see if this is a "return" instruction. JDWP says we should - // send the event *after* the code has been executed, but it also says - // the location we provide is the last instruction. Since the "return" - // instruction has no interesting side effects, we should be safe. - // (We can't just move this down to the returnFromMethod label because - // we potentially need to combine it with other events.) - // We're also not supposed to generate a method exit event if the method - // terminates "with a thrown exception". - if (dex_pc >= 0) { - const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem(); - CHECK(code_item != NULL) << PrettyMethod(m) << " @" << dex_pc; - CHECK_LT(dex_pc, static_cast(code_item->insns_size_in_code_units_)); - if (Instruction::At(&code_item->insns_[dex_pc])->IsReturn()) { - event_flags |= kMethodExit; - } - } - // If there's something interesting going on, see if it matches one // of the debugger filters. if (event_flags != 0) { - Dbg::PostLocationEvent(m, dex_pc, GetThis(self, m, frame_id), event_flags); + Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags); } } @@ -2706,8 +2719,19 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { // We can be called while an exception is pending. We need // to preserve that across the method invocation. - SirtRef old_exception(soa.Self(), soa.Self()->GetException()); - soa.Self()->ClearException(); + SirtRef old_throw_this_object(soa.Self(), NULL); + SirtRef old_throw_method(soa.Self(), NULL); + SirtRef old_exception(soa.Self(), NULL); + uint32_t old_throw_dex_pc; + { + ThrowLocation old_throw_location; + mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location); + old_throw_this_object.reset(old_throw_location.GetThis()); + old_throw_method.reset(old_throw_location.GetMethod()); + old_exception.reset(old_exception_obj); + old_throw_dex_pc = old_throw_location.GetDexPc(); + soa.Self()->ClearException(); + } // Translate the method through the vtable, unless the debugger wants to suppress it. mirror::AbstractMethod* m = pReq->method_; @@ -2731,12 +2755,13 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { arg_array.BuildArgArray(soa, pReq->receiver_, reinterpret_cast(pReq->arg_values_)); InvokeWithArgArray(soa, m, &arg_array, &pReq->result_value, mh.GetShorty()[0]); - pReq->exception = gRegistry->Add(soa.Self()->GetException()); + mirror::Throwable* exception = soa.Self()->GetException(NULL); + soa.Self()->ClearException(); + pReq->exception = gRegistry->Add(exception); pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty()); if (pReq->exception != 0) { - mirror::Object* exc = soa.Self()->GetException(); - VLOG(jdwp) << " JDWP invocation returning with exception=" << exc << " " << PrettyTypeOf(exc); - soa.Self()->ClearException(); + VLOG(jdwp) << " JDWP invocation returning with exception=" << exception + << " " << exception->Dump(); pReq->result_value.SetJ(0); } else if (pReq->result_tag == JDWP::JT_OBJECT) { /* if no exception thrown, examine object result more closely */ @@ -2759,7 +2784,9 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { } if (old_exception.get() != NULL) { - soa.Self()->SetException(old_exception.get()); + ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(), + old_throw_dex_pc); + soa.Self()->SetException(gc_safe_throw_location, old_exception.get()); } } @@ -2943,9 +2970,6 @@ void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) { ScopedObjectAccessUnchecked soa(Thread::Current()); JDWP::ObjectId id = gRegistry->Add(t->GetPeer()); gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR")); - // If this thread's just joined the party while we're already debugging, make sure it knows - // to give us updates when it's running. - t->SetDebuggerUpdatesEnabled(true); } Dbg::DdmSendThreadNotification(t, type); } @@ -3326,9 +3350,9 @@ void Dbg::DdmSendHeapSegments(bool native) { Heap* heap = Runtime::Current()->GetHeap(); const Spaces& spaces = heap->GetSpaces(); Thread* self = Thread::Current(); + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) { if ((*cur)->IsAllocSpace()) { - ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); (*cur)->AsAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); } } diff --git a/src/debugger.h b/src/debugger.h index ad01011b84..eb17695249 100644 --- a/src/debugger.h +++ b/src/debugger.h @@ -39,6 +39,7 @@ class Throwable; } // namespace mirror struct AllocRecord; class Thread; +class ThrowLocation; /* * Invoke-during-breakpoint support. @@ -101,8 +102,8 @@ class Dbg { * when the debugger attaches. */ static void Connected(); - static void GoActive() LOCKS_EXCLUDED(Locks::breakpoint_lock_); - static void Disconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void GoActive() LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::mutator_lock_); + static void Disconnected() LOCKS_EXCLUDED(Locks::mutator_lock_); static void Disposed(); // Returns true if we're actually debugging with a real debugger, false if it's @@ -326,9 +327,8 @@ class Dbg { static void PostLocationEvent(const mirror::AbstractMethod* method, int pcOffset, mirror::Object* thisPtr, int eventFlags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, - mirror::AbstractMethod* throw_method, - uint32_t throw_dex_pc, mirror::AbstractMethod* catch_method, + static void PostException(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStart(Thread* t) @@ -338,7 +338,8 @@ class Dbg { static void PostClassPrepare(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void UpdateDebugger(int32_t dex_pc, Thread* self) + static void UpdateDebugger(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t new_dex_pc) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/src/dex_file.h b/src/dex_file.h index 2da3e32552..002d79c357 100644 --- a/src/dex_file.h +++ b/src/dex_file.h @@ -866,10 +866,10 @@ class DexFile { Index index_; // The base address of the memory mapping. - const byte* begin_; + const byte* const begin_; // The size of the underlying memory allocation in bytes. - size_t size_; + const size_t size_; // Typically the dex file name when available, alternatively some identifying string. // @@ -883,6 +883,7 @@ class DexFile { UniquePtr mem_map_; // A cached com.android.dex.Dex instance, possibly NULL. Use GetDexObject. + // TODO: this is mutable as it shouldn't be here. We should move it to the dex cache or similar. mutable jobject dex_object_; // Points to the header section. diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc index df7e68d7df..c3bf3824ad 100644 --- a/src/gc/large_object_space.cc +++ b/src/gc/large_object_space.cc @@ -50,7 +50,7 @@ void LargeObjectSpace::CopyLiveToMarked() { LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) : LargeObjectSpace(name), - lock_("large object space lock", kAllocSpaceLock) + lock_("large object map space lock", kAllocSpaceLock) { } diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc index 055a7e7972..35e75cbb4b 100644 --- a/src/gc/mark_sweep.cc +++ b/src/gc/mark_sweep.cc @@ -135,8 +135,8 @@ void MarkSweep::BindBitmaps() { MarkSweep::MarkSweep(Heap* heap, bool is_concurrent) : GarbageCollector(heap), gc_barrier_(new Barrier(0)), - large_object_lock_("large object lock"), - mark_stack_expand_lock_("mark stack expand lock"), + large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), + mark_stack_expand_lock_("mark sweep mark stack expand lock"), timings_(GetName(), true), cumulative_timings_(GetName()), is_concurrent_(is_concurrent) { diff --git a/src/gc_map.h b/src/gc_map.h index 8e4dbdbee0..473b39a629 100644 --- a/src/gc_map.h +++ b/src/gc_map.h @@ -66,8 +66,11 @@ class NativePcOffsetToReferenceMap { const uint8_t* FindBitMap(uintptr_t native_pc_offset) { size_t num_entries = NumEntries(); size_t index = Hash(native_pc_offset) % num_entries; + size_t misses = 0; while (GetNativePcOffset(index) != native_pc_offset) { index = (index + 1) % num_entries; + misses++; + DCHECK_LT(misses, num_entries) << "Failed to find offset: " << native_pc_offset; } return GetBitMap(index); } diff --git a/src/heap.cc b/src/heap.cc index 2f7cb24064..468e800a67 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -611,44 +611,46 @@ void Heap::DumpSpaces() { } void Heap::VerifyObjectBody(const mirror::Object* obj) { - if (!IsAligned(obj)) { + if (UNLIKELY(!IsAligned(obj))) { LOG(FATAL) << "Object isn't aligned: " << obj; } - - // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the - // heap_bitmap_lock_. - if (!GetLiveBitmap()->Test(obj)) { - // Check the allocation stack / live stack. - if (!std::binary_search(live_stack_->Begin(), live_stack_->End(), obj) && - std::find(allocation_stack_->Begin(), allocation_stack_->End(), obj) == - allocation_stack_->End()) { - if (large_object_space_->GetLiveObjects()->Test(obj)) { - DumpSpaces(); - LOG(FATAL) << "Object is dead: " << obj; + if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications. + return; + } + const byte* raw_addr = reinterpret_cast(obj) + + mirror::Object::ClassOffset().Int32Value(); + const mirror::Class* c = *reinterpret_cast(raw_addr); + if (UNLIKELY(c == NULL)) { + LOG(FATAL) << "Null class in object: " << obj; + } else if (UNLIKELY(!IsAligned(c))) { + LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; + } + // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() + // Note: we don't use the accessors here as they have internal sanity checks + // that we don't want to run + raw_addr = reinterpret_cast(c) + mirror::Object::ClassOffset().Int32Value(); + const mirror::Class* c_c = *reinterpret_cast(raw_addr); + raw_addr = reinterpret_cast(c_c) + mirror::Object::ClassOffset().Int32Value(); + const mirror::Class* c_c_c = *reinterpret_cast(raw_addr); + CHECK_EQ(c_c, c_c_c); + + if (verify_object_mode_ != kVerifyAllFast) { + // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the + // heap_bitmap_lock_. + if (!GetLiveBitmap()->Test(obj)) { + // Check the allocation stack / live stack. + if (!std::binary_search(live_stack_->Begin(), live_stack_->End(), obj) && + std::find(allocation_stack_->Begin(), allocation_stack_->End(), obj) == + allocation_stack_->End()) { + if (large_object_space_->GetLiveObjects()->Test(obj)) { + DumpSpaces(); + LOG(FATAL) << "Object is dead: " << obj; + } } } - } - - // Ignore early dawn of the universe verifications - if (verify_object_mode_ != kVerifyAllFast && GetObjectsAllocated() > 10) { - const byte* raw_addr = reinterpret_cast(obj) + - mirror::Object::ClassOffset().Int32Value(); - const mirror::Class* c = *reinterpret_cast(raw_addr); - if (c == NULL) { - LOG(FATAL) << "Null class in object: " << obj; - } else if (!IsAligned(c)) { - LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; - } else if (!GetLiveBitmap()->Test(c)) { + if (!GetLiveBitmap()->Test(c)) { LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; } - // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() - // Note: we don't use the accessors here as they have internal sanity checks - // that we don't want to run - raw_addr = reinterpret_cast(c) + mirror::Object::ClassOffset().Int32Value(); - const mirror::Class* c_c = *reinterpret_cast(raw_addr); - raw_addr = reinterpret_cast(c_c) + mirror::Object::ClassOffset().Int32Value(); - const mirror::Class* c_c_c = *reinterpret_cast(raw_addr); - CHECK_EQ(c_c, c_c_c); } } diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc index 472cc67c16..7539066c85 100644 --- a/src/hprof/hprof.cc +++ b/src/hprof/hprof.cc @@ -41,6 +41,7 @@ #include "base/stringprintf.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" +#include "common_throws.h" #include "debugger.h" #include "dex_file-inl.h" #include "globals.h" @@ -450,16 +451,14 @@ class Hprof { if (fd_ >= 0) { out_fd = dup(fd_); if (out_fd < 0) { - self->ThrowNewExceptionF("Ljava/lang/RuntimeException;", - "Couldn't dump heap; dup(%d) failed: %s", fd_, strerror(errno)); + ThrowRuntimeException("Couldn't dump heap; dup(%d) failed: %s", fd_, strerror(errno)); return; } } else { out_fd = open(filename_.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0644); if (out_fd < 0) { - self->ThrowNewExceptionF("Ljava/lang/RuntimeException;", - "Couldn't dump heap; open(\"%s\") failed: %s", filename_.c_str(), - strerror(errno)); + ThrowRuntimeException("Couldn't dump heap; open(\"%s\") failed: %s", filename_.c_str(), + strerror(errno)); return; } } @@ -470,7 +469,7 @@ class Hprof { if (!okay) { std::string msg(StringPrintf("Couldn't dump heap; writing \"%s\" failed: %s", filename_.c_str(), strerror(errno))); - self->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str()); + ThrowRuntimeException("%s", msg.c_str()); LOG(ERROR) << msg; } } diff --git a/src/instrumentation.cc b/src/instrumentation.cc index 81fe63777f..55e93cbc1f 100644 --- a/src/instrumentation.cc +++ b/src/instrumentation.cc @@ -18,14 +18,17 @@ #include +#include "atomic_integer.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "debugger.h" +#include "dex_file-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" +#include "nth_caller_visitor.h" #if !defined(ART_USE_PORTABLE_COMPILER) #include "oat/runtime/oat_support_entrypoints.h" #endif @@ -34,205 +37,521 @@ #include "scoped_thread_state_change.h" #include "thread.h" #include "thread_list.h" -#include "trace.h" namespace art { +namespace instrumentation { -static bool InstallStubsClassVisitor(mirror::Class* klass, void*) +static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - mirror::AbstractMethod* method = klass->GetDirectMethod(i); - if (instrumentation->GetSavedCodeFromMap(method) == NULL) { - instrumentation->SaveAndUpdateCode(method); - } - } - - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - mirror::AbstractMethod* method = klass->GetVirtualMethod(i); - if (instrumentation->GetSavedCodeFromMap(method) == NULL) { - instrumentation->SaveAndUpdateCode(method); - } - } - return true; + Instrumentation* instrumentation = reinterpret_cast(arg); + return instrumentation->InstallStubsForClass(klass); } -static bool UninstallStubsClassVisitor(mirror::Class* klass, void*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); +bool Instrumentation::InstallStubsForClass(mirror::Class* klass) { + bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_; + ClassLinker* class_linker = NULL; + if (uninstall) { + class_linker = Runtime::Current()->GetClassLinker(); + } + bool is_initialized = klass->IsInitialized(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { mirror::AbstractMethod* method = klass->GetDirectMethod(i); - if (instrumentation->GetSavedCodeFromMap(method) != NULL) { - instrumentation->ResetSavedCode(method); + if (!method->IsAbstract()) { + const void* new_code; + if (uninstall) { + if (is_initialized || !method->IsStatic() || method->IsConstructor()) { + new_code = class_linker->GetOatCodeFor(method); + } else { + new_code = Runtime::Current()->GetResolutionStubArray(Runtime::kStaticMethod)->GetData(); + } + } else { // !uninstall + if (!interpreter_stubs_installed_ || method->IsNative()) { + new_code = GetInstrumentationEntryPoint(); + } else { + new_code = GetInterpreterEntryPoint(); + } + } + method->SetCode(new_code); } } - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { mirror::AbstractMethod* method = klass->GetVirtualMethod(i); - if (instrumentation->GetSavedCodeFromMap(method) != NULL) { - instrumentation->ResetSavedCode(method); + if (!method->IsAbstract()) { + const void* new_code; + if (uninstall) { + new_code = class_linker->GetOatCodeFor(method); + } else { // !uninstall + if (!interpreter_stubs_installed_ || method->IsNative()) { + new_code = GetInstrumentationEntryPoint(); + } else { + new_code = GetInterpreterEntryPoint(); + } + } + method->SetCode(new_code); } } return true; } -void InstrumentationInstallStack(Thread* self, void* arg) +// Places the instrumentation exit pc as the return PC for every quick frame. This also allows +// deoptimization of quick frames to interpreter frames. +static void InstrumentationInstallStack(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct InstallStackVisitor : public StackVisitor { - InstallStackVisitor(Thread* self, uintptr_t instrumentation_exit_pc) - : StackVisitor(self, NULL), self_(self), - instrumentation_exit_pc_(instrumentation_exit_pc) {} + InstallStackVisitor(Thread* thread, Context* context, uintptr_t instrumentation_exit_pc) + : StackVisitor(thread, context), instrumentation_stack_(thread->GetInstrumentationStack()), + instrumentation_exit_pc_(instrumentation_exit_pc), last_return_pc_(0) {} virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* m = GetMethod(); if (GetCurrentQuickFrame() == NULL) { + if (kVerboseInstrumentation) { + LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId() + << " Method=" << PrettyMethod(m); + } return true; // Ignore shadow frames. } - mirror::AbstractMethod* m = GetMethod(); if (m == NULL) { + if (kVerboseInstrumentation) { + LOG(INFO) << " Skipping upcall. Frame " << GetFrameId(); + } + last_return_pc_ = 0; return true; // Ignore upcalls. } - if (m->GetDexMethodIndex() == DexFile::kDexNoIndex16) { + if (m->IsRuntimeMethod()) { + if (kVerboseInstrumentation) { + LOG(INFO) << " Skipping runtime method. Frame " << GetFrameId(); + } + last_return_pc_ = GetReturnPc(); return true; // Ignore unresolved methods since they will be instrumented after resolution. } - uintptr_t pc = GetReturnPc(); - InstrumentationStackFrame instrumentation_frame(m, pc, GetFrameId()); - self_->PushBackInstrumentationStackFrame(instrumentation_frame); + if (kVerboseInstrumentation) { + LOG(INFO) << " Installing exit stub in " << DescribeLocation(); + } + uintptr_t return_pc = GetReturnPc(); + CHECK_NE(return_pc, instrumentation_exit_pc_); + CHECK_NE(return_pc, 0U); + InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, return_pc, GetFrameId()); + if (kVerboseInstrumentation) { + LOG(INFO) << "Pushing frame " << instrumentation_frame.Dump(); + } + instrumentation_stack_->push_back(instrumentation_frame); + dex_pcs_.push_back(m->ToDexPc(last_return_pc_)); SetReturnPc(instrumentation_exit_pc_); + last_return_pc_ = return_pc; return true; // Continue. } - Thread* const self_; + std::deque* const instrumentation_stack_; + std::vector dex_pcs_; const uintptr_t instrumentation_exit_pc_; + uintptr_t last_return_pc_; }; + if (kVerboseInstrumentation) { + std::string thread_name; + thread->GetThreadName(thread_name); + LOG(INFO) << "Installing exit stubs in " << thread_name; + } + UniquePtr context(Context::Create()); uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc(); - InstallStackVisitor visitor(self, instrumentation_exit_pc); + InstallStackVisitor visitor(thread, context.get(), instrumentation_exit_pc); visitor.WalkStack(true); - Trace* trace = reinterpret_cast(arg); - if (trace != NULL) { - std::deque::const_reverse_iterator it = - self->GetInstrumentationStack()->rbegin(); - std::deque::const_reverse_iterator end = - self->GetInstrumentationStack()->rend(); - for (; it != end; ++it) { - trace->LogMethodTraceEvent(self, (*it).method_, Trace::kMethodTraceEnter); - } + + // Create method enter events for all methods current on the thread's stack. + Instrumentation* instrumentation = reinterpret_cast(arg); + typedef std::deque::const_reverse_iterator It; + for (It it = thread->GetInstrumentationStack()->rbegin(), + end = thread->GetInstrumentationStack()->rend(); it != end; ++it) { + mirror::Object* this_object = (*it).this_object_; + mirror::AbstractMethod* method = (*it).method_; + uint32_t dex_pc = visitor.dex_pcs_.back(); + visitor.dex_pcs_.pop_back(); + instrumentation->MethodEnterEvent(thread, this_object, method, dex_pc); } + thread->VerifyStack(); } -static void InstrumentationRestoreStack(Thread* self, void*) +// Removes the instrumentation exit pc as the return PC for every quick frame. +static void InstrumentationRestoreStack(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct RestoreStackVisitor : public StackVisitor { - RestoreStackVisitor(Thread* self, uintptr_t instrumentation_exit_pc) - : StackVisitor(self, NULL), self_(self), - instrumentation_exit_pc_(instrumentation_exit_pc) {} + RestoreStackVisitor(Thread* thread, uintptr_t instrumentation_exit_pc, + Instrumentation* instrumentation) + : StackVisitor(thread, NULL), thread_(thread), + instrumentation_exit_pc_(instrumentation_exit_pc), + instrumentation_(instrumentation), + instrumentation_stack_(thread->GetInstrumentationStack()), + frames_removed_(0) {} virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (self_->IsInstrumentationStackEmpty()) { + if (instrumentation_stack_->size() == 0) { return false; // Stop. } mirror::AbstractMethod* m = GetMethod(); + if (GetCurrentQuickFrame() == NULL) { + if (kVerboseInstrumentation) { + LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId() << " Method=" << PrettyMethod(m); + } + return true; // Ignore shadow frames. + } if (m == NULL) { + if (kVerboseInstrumentation) { + LOG(INFO) << " Skipping upcall. Frame " << GetFrameId(); + } return true; // Ignore upcalls. } - uintptr_t pc = GetReturnPc(); - if (pc == instrumentation_exit_pc_) { - InstrumentationStackFrame instrumentation_frame = self_->PopInstrumentationStackFrame(); - SetReturnPc(instrumentation_frame.return_pc_); - CHECK(m == instrumentation_frame.method_); - CHECK_EQ(GetFrameId(), instrumentation_frame.frame_id_); - Runtime* runtime = Runtime::Current(); - if (runtime->IsMethodTracingActive()) { - Trace* trace = runtime->GetInstrumentation()->GetTrace(); - trace->LogMethodTraceEvent(self_, m, Trace::kMethodTraceExit); + typedef std::deque::const_iterator It; // TODO: C++0x auto + bool removed_stub = false; + // TODO: make this search more efficient? + for (It it = instrumentation_stack_->begin(), end = instrumentation_stack_->end(); it != end; + ++it) { + InstrumentationStackFrame instrumentation_frame = *it; + if (instrumentation_frame.frame_id_ == GetFrameId()) { + if (kVerboseInstrumentation) { + LOG(INFO) << " Removing exit stub in " << DescribeLocation(); + } + CHECK(m == instrumentation_frame.method_) << PrettyMethod(m); + SetReturnPc(instrumentation_frame.return_pc_); + // Create the method exit events. As the methods didn't really exit the result is 0. + instrumentation_->MethodExitEvent(thread_, instrumentation_frame.this_object_, m, + GetDexPc(), JValue()); + frames_removed_++; + removed_stub = true; + break; + } + } + if (!removed_stub) { + if (kVerboseInstrumentation) { + LOG(INFO) << " No exit stub in " << DescribeLocation(); + DescribeStack(thread_); } } return true; // Continue. } - Thread* const self_; + Thread* const thread_; const uintptr_t instrumentation_exit_pc_; + Instrumentation* const instrumentation_; + std::deque* const instrumentation_stack_; + size_t frames_removed_; }; - uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc(); - RestoreStackVisitor visitor(self, instrumentation_exit_pc); - visitor.WalkStack(true); + if (kVerboseInstrumentation) { + std::string thread_name; + thread->GetThreadName(thread_name); + LOG(INFO) << "Removing exit stubs in " << thread_name; + } + std::deque* stack = thread->GetInstrumentationStack(); + if (stack->size() > 0) { + Instrumentation* instrumentation = reinterpret_cast(arg); + uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc(); + RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation); + visitor.WalkStack(true); + CHECK_EQ(visitor.frames_removed_, stack->size()); + while (stack->size() > 0) { + stack->pop_front(); + } + } } -Instrumentation::~Instrumentation() { - delete trace_; +void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) { + Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + bool require_entry_exit_stubs = false; + bool require_interpreter = false; + if ((events & kMethodEntered) != 0) { + method_entry_listeners_.push_back(listener); + require_entry_exit_stubs = true; + have_method_entry_listeners_ = true; + } + if ((events & kMethodExited) != 0) { + method_exit_listeners_.push_back(listener); + require_entry_exit_stubs = true; + have_method_exit_listeners_ = true; + } + if ((events & kMethodUnwind) != 0) { + method_unwind_listeners_.push_back(listener); + have_method_unwind_listeners_ = true; + } + if ((events & kDexPcMoved) != 0) { + dex_pc_listeners_.push_back(listener); + require_interpreter = true; + have_dex_pc_listeners_ = true; + } + ConfigureStubs(require_entry_exit_stubs, require_interpreter); } -void Instrumentation::InstallStubs() { - Thread* self = Thread::Current(); - Locks::thread_list_lock_->AssertNotHeld(self); - Runtime::Current()->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, NULL); - MutexLock mu(self, *Locks::thread_list_lock_); - Runtime::Current()->GetThreadList()->ForEach(InstrumentationInstallStack, GetTrace()); +void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) { + Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + bool require_entry_exit_stubs = false; + bool require_interpreter = false; + + if ((events & kMethodEntered) != 0) { + bool contains = std::find(method_entry_listeners_.begin(), method_entry_listeners_.end(), + listener) != method_entry_listeners_.end(); + if (contains) { + method_entry_listeners_.remove(listener); + } + have_method_entry_listeners_ = method_entry_listeners_.size() > 0; + require_entry_exit_stubs |= have_method_entry_listeners_; + } + if ((events & kMethodExited) != 0) { + bool contains = std::find(method_exit_listeners_.begin(), method_exit_listeners_.end(), + listener) != method_exit_listeners_.end(); + if (contains) { + method_exit_listeners_.remove(listener); + } + have_method_exit_listeners_ = method_exit_listeners_.size() > 0; + require_entry_exit_stubs |= have_method_exit_listeners_; + } + if ((events & kMethodUnwind) != 0) { + method_unwind_listeners_.remove(listener); + } + if ((events & kDexPcMoved) != 0) { + bool contains = std::find(dex_pc_listeners_.begin(), dex_pc_listeners_.end(), + listener) != dex_pc_listeners_.end(); + if (contains) { + dex_pc_listeners_.remove(listener); + } + have_dex_pc_listeners_ = dex_pc_listeners_.size() > 0; + require_interpreter |= have_dex_pc_listeners_; + } + ConfigureStubs(require_entry_exit_stubs, require_interpreter); } -void Instrumentation::UninstallStubs() { +void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require_interpreter) { + interpret_only_ = require_interpreter || forced_interpret_only_; + // Compute what level of instrumentation is required and compare to current. + int desired_level, current_level; + if (require_interpreter) { + desired_level = 2; + } else if (require_entry_exit_stubs) { + desired_level = 1; + } else { + desired_level = 0; + } + if (interpreter_stubs_installed_) { + current_level = 2; + } else if (entry_exit_stubs_installed_) { + current_level = 1; + } else { + current_level = 0; + } + if (desired_level == current_level) { + // We're already set. + return; + } Thread* self = Thread::Current(); + Runtime* runtime = Runtime::Current(); Locks::thread_list_lock_->AssertNotHeld(self); - Runtime::Current()->GetClassLinker()->VisitClasses(UninstallStubsClassVisitor, NULL); - MutexLock mu(self, *Locks::thread_list_lock_); - Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, NULL); + if (desired_level > 0) { + if (require_interpreter) { + interpreter_stubs_installed_ = true; + } else { + CHECK(require_entry_exit_stubs); + entry_exit_stubs_installed_ = true; + } + runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this); + instrumentation_stubs_installed_ = true; + MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); + runtime->GetThreadList()->ForEach(InstrumentationInstallStack, this); + } else { + interpreter_stubs_installed_ = false; + entry_exit_stubs_installed_ = false; + runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this); + instrumentation_stubs_installed_ = false; + MutexLock mu(self, *Locks::thread_list_lock_); + Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this); + } } -void Instrumentation::AddSavedCodeToMap(const mirror::AbstractMethod* method, const void* code) { - saved_code_map_.Put(method, code); +void Instrumentation::UpdateMethodsCode(mirror::AbstractMethod* method, const void* code) const { + if (LIKELY(!instrumentation_stubs_installed_)) { + method->SetCode(code); + } } -void Instrumentation::RemoveSavedCodeFromMap(const mirror::AbstractMethod* method) { - saved_code_map_.erase(method); +const void* Instrumentation::GetQuickCodeFor(const mirror::AbstractMethod* method) const { + Runtime* runtime = Runtime::Current(); + if (LIKELY(!instrumentation_stubs_installed_)) { + const void* code = method->GetCode(); + DCHECK(code != NULL); + if (LIKELY(code != runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData())) { + return code; + } + } + return runtime->GetClassLinker()->GetOatCodeFor(method); } -const void* Instrumentation::GetSavedCodeFromMap(const mirror::AbstractMethod* method) { - typedef SafeMap::const_iterator It; // TODO: C++0x auto - It it = saved_code_map_.find(method); - if (it == saved_code_map_.end()) { - return NULL; - } else { - return it->second; +void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc) const { + typedef std::list::const_iterator It; // TODO: C++0x auto + for (It it = method_entry_listeners_.begin(), end = method_entry_listeners_.end(); it != end; + ++it) { + (*it)->MethodEntered(thread, this_object, method, dex_pc); } } -void Instrumentation::SaveAndUpdateCode(mirror::AbstractMethod* method) { -#if defined(ART_USE_PORTABLE_COMPILER) - UNUSED(method); - UNIMPLEMENTED(FATAL); -#else - void* instrumentation_stub = GetInstrumentationEntryPoint(); - CHECK(GetSavedCodeFromMap(method) == NULL); - AddSavedCodeToMap(method, method->GetCode()); - method->SetCode(instrumentation_stub); -#endif +void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc, const JValue& return_value) const { + typedef std::list::const_iterator It; // TODO: C++0x auto + for (It it = method_exit_listeners_.begin(), end = method_exit_listeners_.end(); it != end; + ++it) { + (*it)->MethodExited(thread, this_object, method, dex_pc, return_value); + } } -void Instrumentation::ResetSavedCode(mirror::AbstractMethod* method) { - CHECK(GetSavedCodeFromMap(method) != NULL); - method->SetCode(GetSavedCodeFromMap(method)); - RemoveSavedCodeFromMap(method); +void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc) const { + if (have_method_unwind_listeners_) { + typedef std::list::const_iterator It; // TODO: C++0x auto + for (It it = method_unwind_listeners_.begin(), end = method_unwind_listeners_.end(); it != end; + ++it) { + (*it)->MethodUnwind(thread, method, dex_pc); + } + } } -Trace* Instrumentation::GetTrace() const { - return trace_; +void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc) const { + // TODO: STL copy-on-write collection? The copy below is due to the debug listener having an + // action where it can remove itself as a listener and break the iterator. The copy only works + // around the problem and in general we may have to move to something like reference counting to + // ensure listeners are deleted correctly. + std::list copy(dex_pc_listeners_); + typedef std::list::const_iterator It; // TODO: C++0x auto + for (It it = copy.begin(), end = copy.end(); it != end; ++it) { + (*it)->DexPcMoved(thread, this_object, method, dex_pc); + } } -void Instrumentation::SetTrace(Trace* trace) { - trace_ = trace; +void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, + uint32_t catch_dex_pc, + mirror::Throwable* exception_object) { + if (have_exception_caught_listeners_) { + typedef std::list::const_iterator It; // TODO: C++0x auto + for (It it = exception_caught_listeners_.begin(), end = exception_caught_listeners_.end(); + it != end; ++it) { + (*it)->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object); + } + } } -void Instrumentation::RemoveTrace() { - delete trace_; - trace_ = NULL; +static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame, + int delta) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t frame_id = StackVisitor::ComputeNumFrames(self) + delta; + if (frame_id != instrumentation_frame.frame_id_) { + LOG(ERROR) << "Expected frame_id=" << frame_id << " but found " + << instrumentation_frame.frame_id_; + StackVisitor::DescribeStack(self); + CHECK_EQ(frame_id, instrumentation_frame.frame_id_); + } } -uint32_t InstrumentationMethodUnwindFromCode(Thread* self) { - Trace* trace = Runtime::Current()->GetInstrumentation()->GetTrace(); - InstrumentationStackFrame instrumentation_frame = self->PopInstrumentationStackFrame(); +void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object, + mirror::AbstractMethod* method, + uintptr_t lr) { + // We have a callee-save frame meaning this value is guaranteed to never be 0. + size_t frame_id = StackVisitor::ComputeNumFrames(self); + std::deque* stack = self->GetInstrumentationStack(); + if (kVerboseInstrumentation) { + LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << (void*)lr; + } + instrumentation::InstrumentationStackFrame instrumentation_frame(this_object, method, lr, + frame_id); + stack->push_front(instrumentation_frame); + + MethodEnterEvent(self, this_object, method, 0); +} + +uint64_t Instrumentation::PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc, + uint64_t gpr_result, uint64_t fpr_result) { + // Do the pop. + std::deque* stack = self->GetInstrumentationStack(); + CHECK_GT(stack->size(), 0U); + InstrumentationStackFrame instrumentation_frame = stack->front(); + stack->pop_front(); + + // Set return PC and check the sanity of the stack. + *return_pc = instrumentation_frame.return_pc_; + CheckStackDepth(self, instrumentation_frame, 0); + + mirror::AbstractMethod* method = instrumentation_frame.method_; + char return_shorty = MethodHelper(method).GetShorty()[0]; + JValue return_value; + if (return_shorty == 'V') { + return_value.SetJ(0); + } else if (return_shorty == 'F' || return_shorty == 'D') { + return_value.SetJ(fpr_result); + } else { + return_value.SetJ(gpr_result); + } + // TODO: improve the dex pc information here, requires knowledge of current PC as opposed to + // return_pc. + uint32_t dex_pc = DexFile::kDexNoIndex; + mirror::Object* this_object = instrumentation_frame.this_object_; + MethodExitEvent(self, this_object, instrumentation_frame.method_, dex_pc, return_value); + + bool deoptimize = false; + if (interpreter_stubs_installed_) { + // Deoptimize unless we're returning to an upcall. + NthCallerVisitor visitor(self, 1, true); + visitor.WalkStack(true); + deoptimize = visitor.caller != NULL; + if (deoptimize && kVerboseInstrumentation) { + LOG(INFO) << "Deoptimizing into " << PrettyMethod(visitor.caller); + } + } + if (deoptimize) { + if (kVerboseInstrumentation) { + LOG(INFO) << "Deoptimizing from " << PrettyMethod(method) + << " result is " << std::hex << return_value.GetJ(); + } + self->SetDeoptimizationReturnValue(return_value); + return static_cast(GetDeoptimizationEntryPoint()) | + (static_cast(*return_pc) << 32); + } else { + if (kVerboseInstrumentation) { + LOG(INFO) << "Returning from " << PrettyMethod(method) << " to PC " << (void*)(*return_pc); + } + return *return_pc; + } +} + +void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) const { + // Do the pop. + std::deque* stack = self->GetInstrumentationStack(); + CHECK_GT(stack->size(), 0U); + InstrumentationStackFrame instrumentation_frame = stack->front(); + // TODO: bring back CheckStackDepth(self, instrumentation_frame, 2); + stack->pop_front(); + mirror::AbstractMethod* method = instrumentation_frame.method_; - uint32_t lr = instrumentation_frame.return_pc_; + if (is_deoptimization) { + if (kVerboseInstrumentation) { + LOG(INFO) << "Popping for deoptimization " << PrettyMethod(method); + } + } else { + if (kVerboseInstrumentation) { + LOG(INFO) << "Popping for unwind " << PrettyMethod(method); + } - trace->LogMethodTraceEvent(self, method, Trace::kMethodTraceUnwind); + // Notify listeners of method unwind. + // TODO: improve the dex pc information here, requires knowledge of current PC as opposed to + // return_pc. + uint32_t dex_pc = DexFile::kDexNoIndex; + MethodUnwindEvent(self, instrumentation_frame.this_object_, method, dex_pc); + } +} - return lr; +std::string InstrumentationStackFrame::Dump() const { + std::ostringstream os; + os << "Frame " << frame_id_ << " " << PrettyMethod(method_) << ":" + << reinterpret_cast(return_pc_) << " this=" << reinterpret_cast(this_object_); + return os.str(); } +} // namespace instrumentation } // namespace art diff --git a/src/instrumentation.h b/src/instrumentation.h index fb49bf8208..6a4a1421da 100644 --- a/src/instrumentation.h +++ b/src/instrumentation.h @@ -18,61 +18,261 @@ #define ART_SRC_INSTRUMENTATION_H_ #include "base/macros.h" -#include "safe_map.h" +#include "locks.h" #include +#include namespace art { - namespace mirror { class AbstractMethod; -} +class Class; +class Object; +class Throwable; +} // namespace mirror +union JValue; class Thread; -class Trace; +class ThrowLocation; -uint32_t InstrumentationMethodUnwindFromCode(Thread* self); +namespace instrumentation { -struct InstrumentationStackFrame { - InstrumentationStackFrame() : method_(NULL), return_pc_(0), frame_id_(0) {} - InstrumentationStackFrame(mirror::AbstractMethod* method, uintptr_t return_pc, size_t frame_id) - : method_(method), return_pc_(return_pc), frame_id_(frame_id) { - } - mirror::AbstractMethod* method_; - uintptr_t return_pc_; - size_t frame_id_; +const bool kVerboseInstrumentation = false; + +// Instrumentation event listener API. Registered listeners will get the appropriate call back for +// the events they are listening for. The call backs supply the thread, method and dex_pc the event +// occurred upon. The thread may or may not be Thread::Current(). +struct InstrumentationListener { + InstrumentationListener() {} + virtual ~InstrumentationListener() {} + + // Call-back for when a method is entered. + virtual void MethodEntered(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + + // Call-back for when a method is exited. + // TODO: its likely passing the return value would be useful, however, we may need to get and + // parse the shorty to determine what kind of register holds the result. + virtual void MethodExited(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc, + const JValue& return_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + + // Call-back for when a method is popped due to an exception throw. A method will either cause a + // MethodExited call-back or a MethodUnwind call-back when its activation is removed. + virtual void MethodUnwind(Thread* thread, const mirror::AbstractMethod* method, + uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + + // Call-back for when the dex pc moves in a method. + virtual void DexPcMoved(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t new_dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + + // Call-back when an exception is caught. + virtual void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, + mirror::Throwable* exception_object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; }; +// Instrumentation is a catch-all for when extra information is required from the runtime. The +// typical use for instrumentation is for profiling and debugging. Instrumentation may add stubs +// to method entry and exit, it may also force execution to be switched to the interpreter and +// trigger deoptimization. class Instrumentation { public: - Instrumentation() {} - ~Instrumentation(); + enum InstrumentationEvent { + kMethodEntered = 1, + kMethodExited = 2, + kMethodUnwind = 4, + kDexPcMoved = 8, + kExceptionCaught = 16 + }; + + Instrumentation() : + instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false), + interpreter_stubs_installed_(false), + interpret_only_(false), forced_interpret_only_(false), + have_method_entry_listeners_(false), have_method_exit_listeners_(false), + have_method_unwind_listeners_(false), have_dex_pc_listeners_(false), + have_exception_caught_listeners_(false) {} + + // Add a listener to be notified of the masked together sent of instrumentation events. This + // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy + // for saying you should have suspended all threads (installing stubs while threads are running + // will break). + void AddListener(InstrumentationListener* listener, uint32_t events) + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + + // Removes a listener possibly removing instrumentation stubs. + void RemoveListener(InstrumentationListener* listener, uint32_t events) + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + + // Update the code of a method respecting any installed stubs. + void UpdateMethodsCode(mirror::AbstractMethod* method, const void* code) const; + + // Get the quick code for the given method. More efficient than asking the class linker as it + // will short-cut to GetCode if instrumentation and static method resolution stubs aren't + // installed. + const void* GetQuickCodeFor(const mirror::AbstractMethod* method) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void ForceInterpretOnly() { + interpret_only_ = true; + forced_interpret_only_ = true; + } + + // Called by AbstractMethod::Invoke to determine dispatch mechanism. + bool InterpretOnly() const { + return interpret_only_; + } + + bool ShouldPortableCodeDeoptimize() const { + return instrumentation_stubs_installed_; + } + + bool AreExitStubsInstalled() const { + return instrumentation_stubs_installed_; + } + + // Inform listeners that a method has been entered. A dex PC is provided as we may install + // listeners into executing code and get method enter events for methods already on the stack. + void MethodEnterEvent(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (have_method_entry_listeners_) { + MethodEnterEventImpl(thread, this_object, method, dex_pc); + } + } + + // Inform listeners that a method has been exited. + void MethodExitEvent(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc, + const JValue& return_value) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (have_method_exit_listeners_) { + MethodExitEventImpl(thread, this_object, method, dex_pc, return_value); + } + } + + // Inform listeners that a method has been exited due to an exception. + void MethodUnwindEvent(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Inform listeners that the dex pc has moved (only supported by the interpreter). + void DexPcMovedEvent(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (have_dex_pc_listeners_) { + DexPcMovedEventImpl(thread, this_object, method, dex_pc); + } + } - // Replaces code of each method with a pointer to a stub for method tracing. - void InstallStubs() LOCKS_EXCLUDED(Locks::thread_list_lock_); + // Inform listeners that an exception was caught. + void ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, + mirror::Throwable* exception_object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Restores original code for each method and fixes the return values of each thread's stack. - void UninstallStubs() LOCKS_EXCLUDED(Locks::thread_list_lock_); + // Called when an instrumented method is entered. The intended link register (lr) is saved so + // that returning causes a branch to the method exit stub. Generates method enter events. + void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object, + mirror::AbstractMethod* method, uintptr_t lr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const void* GetSavedCodeFromMap(const mirror::AbstractMethod* method); - void SaveAndUpdateCode(mirror::AbstractMethod* method); - void ResetSavedCode(mirror::AbstractMethod* method); + // Called when an instrumented method is exited. Removes the pushed instrumentation frame + // returning the intended link register. Generates method exit events. + uint64_t PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc, uint64_t gpr_result, + uint64_t fpr_result) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Trace* GetTrace() const; - void SetTrace(Trace* trace); - void RemoveTrace(); + // Pops an instrumentation frame from the current thread and generate an unwind event. + void PopMethodForUnwind(Thread* self, bool is_deoptimization) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Call back for configure stubs. + bool InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - void AddSavedCodeToMap(const mirror::AbstractMethod* method, const void* code); - void RemoveSavedCodeFromMap(const mirror::AbstractMethod* method); + // Does the job of installing or removing instrumentation code within methods. + void ConfigureStubs(bool require_entry_exit_stubs, bool require_interpreter) + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + + void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MethodExitEventImpl(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, + uint32_t dex_pc, const JValue& return_value) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Have we hijacked AbstractMethod::code_ so that it calls instrumentation/interpreter code? + bool instrumentation_stubs_installed_; + + // Have we hijacked AbstractMethod::code_ to reference the enter/exit stubs? + bool entry_exit_stubs_installed_; + + // Have we hijacked AbstractMethod::code_ to reference the enter interpreter stub? + bool interpreter_stubs_installed_; + + // Do we need the fidelity of events that we only get from running within the interpreter? + bool interpret_only_; - // Maps a method to its original code pointer. - SafeMap saved_code_map_; + // Did the runtime request we only run in the interpreter? ie -Xint mode. + bool forced_interpret_only_; - Trace* trace_; + // Do we have any listeners for method entry events? Short-cut to avoid taking the + // instrumentation_lock_. + bool have_method_entry_listeners_; + + // Do we have any listeners for method exit events? Short-cut to avoid taking the + // instrumentation_lock_. + bool have_method_exit_listeners_; + + // Do we have any listeners for method unwind events? Short-cut to avoid taking the + // instrumentation_lock_. + bool have_method_unwind_listeners_; + + // Do we have any listeners for dex move events? Short-cut to avoid taking the + // instrumentation_lock_. + bool have_dex_pc_listeners_; + + // Do we have any exception caught listeners? Short-cut to avoid taking the instrumentation_lock_. + bool have_exception_caught_listeners_; + + // The event listeners, written to with the mutator_lock_ exclusively held. + std::list method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_); + std::list method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_); + std::list method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_); + std::list dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_); + std::list exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(Instrumentation); }; +// An element in the instrumentation side stack maintained in art::Thread. +struct InstrumentationStackFrame { + InstrumentationStackFrame(mirror::Object* this_object, mirror::AbstractMethod* method, + uintptr_t return_pc, size_t frame_id) + : this_object_(this_object), method_(method), return_pc_(return_pc), frame_id_(frame_id) { + } + + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + mirror::Object* this_object_; + mirror::AbstractMethod* method_; + const uintptr_t return_pc_; + const size_t frame_id_; +}; + +} // namespace instrumentation } // namespace art #endif // ART_SRC_INSTRUMENTATION_H_ diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc index 91b381cca5..b82c632236 100644 --- a/src/interpreter/interpreter.cc +++ b/src/interpreter/interpreter.cc @@ -21,7 +21,6 @@ #include "base/logging.h" #include "class_linker-inl.h" #include "common_throws.h" -#include "debugger.h" #include "dex_file-inl.h" #include "dex_instruction.h" #include "gc/card_table-inl.h" @@ -50,10 +49,6 @@ static const int32_t kMinInt = std::numeric_limits::min(); static const int64_t kMaxLong = std::numeric_limits::max(); static const int64_t kMinLong = std::numeric_limits::min(); -static JDWP::FrameId throw_frame_id_ = 0; -static AbstractMethod* throw_method_ = NULL; -static uint32_t throw_dex_pc_ = 0; - static void UnstartedRuntimeInvoke(Thread* self, AbstractMethod* target_method, Object* receiver, uint32_t* args, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -441,7 +436,7 @@ static void DoFieldGet(Thread* self, ShadowFrame& shadow_frame, } else { obj = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(f, true); + ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true); return; } } @@ -488,7 +483,8 @@ static void DoFieldPut(Thread* self, ShadowFrame& shadow_frame, } else { obj = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(f, false); + ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), + f, false); return; } } @@ -523,7 +519,7 @@ static void DoFieldPut(Thread* self, ShadowFrame& shadow_frame, static void DoIntDivide(Thread* self, ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { - self->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); + ThrowArithmeticExceptionDivideByZero(self); } else if (UNLIKELY(dividend == kMinInt && divisor == -1)) { shadow_frame.SetVReg(result_reg, kMinInt); } else { @@ -534,7 +530,7 @@ static void DoIntDivide(Thread* self, ShadowFrame& shadow_frame, size_t result_r static void DoIntRemainder(Thread* self, ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { - self->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); + ThrowArithmeticExceptionDivideByZero(self); } else if (UNLIKELY(dividend == kMinInt && divisor == -1)) { shadow_frame.SetVReg(result_reg, 0); } else { @@ -545,7 +541,7 @@ static void DoIntRemainder(Thread* self, ShadowFrame& shadow_frame, size_t resul static void DoLongDivide(Thread* self, ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { - self->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); + ThrowArithmeticExceptionDivideByZero(self); } else if (UNLIKELY(dividend == kMinLong && divisor == -1)) { shadow_frame.SetVRegLong(result_reg, kMinLong); } else { @@ -556,7 +552,7 @@ static void DoLongDivide(Thread* self, ShadowFrame& shadow_frame, size_t result_ static void DoLongRemainder(Thread* self, ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { - self->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); + ThrowArithmeticExceptionDivideByZero(self); } else if (UNLIKELY(dividend == kMinLong && divisor == -1)) { shadow_frame.SetVRegLong(result_reg, 0); } else { @@ -567,41 +563,46 @@ static void DoLongRemainder(Thread* self, ShadowFrame& shadow_frame, size_t resu static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (UNLIKELY(!shadow_frame.HasReferenceArray())) { + LOG(FATAL) << "Invalid shadow frame for interpreter use"; + return JValue(); + } + self->VerifyStack(); + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); const uint16_t* insns = code_item->insns_; const Instruction* inst = Instruction::At(insns + shadow_frame.GetDexPC()); - bool entry = (inst->GetDexPc(insns) == 0); + if (inst->GetDexPc(insns) == 0) { // We are entering the method as opposed to deoptimizing.. + instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(), shadow_frame.GetMethod(), + 0); + } while (true) { CheckSuspend(self); uint32_t dex_pc = inst->GetDexPc(insns); shadow_frame.SetDexPC(dex_pc); - if (entry) { - Dbg::UpdateDebugger(-1, self); - } - entry = false; - Dbg::UpdateDebugger(dex_pc, self); + instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(), shadow_frame.GetMethod(), + dex_pc); DecodedInstruction dec_insn(inst); const bool kTracing = false; if (kTracing) { - LOG(INFO) << PrettyMethod(shadow_frame.GetMethod()) - << StringPrintf("\n0x%x: %s\nReferences:", - inst->GetDexPc(insns), inst->DumpString(&mh.GetDexFile()).c_str()); +#define TRACE_LOG std::cerr + TRACE_LOG << PrettyMethod(shadow_frame.GetMethod()) + << StringPrintf("\n0x%x: ", inst->GetDexPc(insns)) + << inst->DumpString(&mh.GetDexFile()) << "\n"; for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) { - Object* o = shadow_frame.GetVRegReference(i); - if (o != NULL) { - if (o->GetClass()->IsStringClass() && o->AsString()->GetCharArray() != NULL) { - LOG(INFO) << i << ": java.lang.String " << static_cast(o) - << " \"" << o->AsString()->ToModifiedUtf8() << "\""; + uint32_t raw_value = shadow_frame.GetVReg(i); + Object* ref_value = shadow_frame.GetVRegReference(i); + TRACE_LOG << StringPrintf(" vreg%d=0x%08X", i, raw_value); + if (ref_value != NULL) { + if (ref_value->GetClass()->IsStringClass() && + ref_value->AsString()->GetCharArray() != NULL) { + TRACE_LOG << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\""; } else { - LOG(INFO) << i << ": " << PrettyTypeOf(o) << " " << static_cast(o); + TRACE_LOG << "/" << PrettyTypeOf(ref_value); } - } else { - LOG(INFO) << i << ": null"; } } - LOG(INFO) << "vregs:"; - for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) { - LOG(INFO) << StringPrintf("%d: %08x", i, shadow_frame.GetVReg(i)); - } + TRACE_LOG << "\n"; +#undef TRACE_LOG } const Instruction* next_inst = inst->Next(); switch (dec_insn.opcode) { @@ -632,31 +633,42 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c shadow_frame.SetVRegReference(dec_insn.vA, result_register.GetL()); break; case Instruction::MOVE_EXCEPTION: { - Throwable* exception = self->GetException(); + Throwable* exception = self->GetException(NULL); self->ClearException(); shadow_frame.SetVRegReference(dec_insn.vA, exception); break; } case Instruction::RETURN_VOID: { JValue result; - result.SetJ(0); + instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(), + shadow_frame.GetMethod(), shadow_frame.GetDexPC(), + result); return result; } case Instruction::RETURN: { JValue result; result.SetJ(0); result.SetI(shadow_frame.GetVReg(dec_insn.vA)); + instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(), + shadow_frame.GetMethod(), shadow_frame.GetDexPC(), + result); return result; } case Instruction::RETURN_WIDE: { JValue result; result.SetJ(shadow_frame.GetVRegLong(dec_insn.vA)); + instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(), + shadow_frame.GetMethod(), shadow_frame.GetDexPC(), + result); return result; } case Instruction::RETURN_OBJECT: { JValue result; result.SetJ(0); result.SetL(shadow_frame.GetVRegReference(dec_insn.vA)); + instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(), + shadow_frame.GetMethod(), shadow_frame.GetDexPC(), + result); return result; } case Instruction::CONST_4: { @@ -721,7 +733,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::MONITOR_ENTER: { Object* obj = shadow_frame.GetVRegReference(dec_insn.vA); if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); } else { DoMonitorEnter(self, obj); } @@ -730,7 +742,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::MONITOR_EXIT: { Object* obj = shadow_frame.GetVRegReference(dec_insn.vA); if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); } else { DoMonitorExit(self, obj); } @@ -743,10 +755,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c } else { Object* obj = shadow_frame.GetVRegReference(dec_insn.vA); if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) { - self->ThrowNewExceptionF("Ljava/lang/ClassCastException;", - "%s cannot be cast to %s", - PrettyDescriptor(obj->GetClass()).c_str(), - PrettyDescriptor(c).c_str()); + ThrowClassCastException(c, obj->GetClass()); } } break; @@ -764,7 +773,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::ARRAY_LENGTH: { Object* array = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(array == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } shadow_frame.SetVReg(dec_insn.vA, array->AsArray()->GetLength()); @@ -787,7 +796,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c int32_t length = dec_insn.vA; CHECK(is_range || length <= 5); if (UNLIKELY(length < 0)) { - self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); + ThrowNegativeArraySizeException(length); break; } Class* arrayClass = ResolveVerifyAndClinit(dec_insn.vB, shadow_frame.GetMethod(), self, false, true); @@ -799,11 +808,11 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c Class* componentClass = arrayClass->GetComponentType(); if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) { if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) { - self->ThrowNewExceptionF("Ljava/lang/RuntimeException;", - "Bad filled array request for type %s", - PrettyDescriptor(componentClass).c_str()); + ThrowRuntimeException("Bad filled array request for type %s", + PrettyDescriptor(componentClass).c_str()); } else { - self->ThrowNewExceptionF("Ljava/lang/InternalError;", + self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(), + "Ljava/lang/InternalError;", "Found type %s; filled-new-array not implemented for anything but \'int\'", PrettyDescriptor(componentClass).c_str()); } @@ -902,9 +911,12 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c break; } case Instruction::THROW: { - Object* o = shadow_frame.GetVRegReference(dec_insn.vA); - Throwable* t = (o == NULL) ? NULL : o->AsThrowable(); - self->DeliverException(t); + Object* exception = shadow_frame.GetVRegReference(dec_insn.vA); + if (exception == NULL) { + ThrowNullPointerException(NULL, "throw with null exception"); + } else { + self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable()); + } break; } case Instruction::GOTO: @@ -962,8 +974,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::FILL_ARRAY_DATA: { Object* obj = shadow_frame.GetVRegReference(dec_insn.vA); if (UNLIKELY(obj == NULL)) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", - "null array in FILL_ARRAY_DATA"); + ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); break; } Array* array = obj->AsArray(); @@ -972,9 +983,10 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c const Instruction::ArrayDataPayload* payload = reinterpret_cast(insns + dex_pc + dec_insn.vB); if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "failed FILL_ARRAY_DATA; length=%d, index=%d", - array->GetLength(), payload->element_count); + self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(), + "Ljava/lang/ArrayIndexOutOfBoundsException;", + "failed FILL_ARRAY_DATA; length=%d, index=%d", + array->GetLength(), payload->element_count); break; } uint32_t size_in_bytes = payload->element_count * payload->element_width; @@ -1068,7 +1080,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET_BOOLEAN: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1078,7 +1090,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET_BYTE: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1088,7 +1100,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET_CHAR: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1098,7 +1110,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET_SHORT: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1108,7 +1120,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1118,7 +1130,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET_WIDE: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1128,7 +1140,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c case Instruction::AGET_OBJECT: { Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1139,7 +1151,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c uint8_t val = shadow_frame.GetVReg(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1150,7 +1162,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c int8_t val = shadow_frame.GetVReg(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1161,7 +1173,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c uint16_t val = shadow_frame.GetVReg(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1172,7 +1184,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c int16_t val = shadow_frame.GetVReg(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1183,7 +1195,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c int32_t val = shadow_frame.GetVReg(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1194,7 +1206,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c int64_t val = shadow_frame.GetVRegLong(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1205,7 +1217,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c Object* val = shadow_frame.GetVRegReference(dec_insn.vA); Object* a = shadow_frame.GetVRegReference(dec_insn.vB); if (UNLIKELY(a == NULL)) { - ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns)); + ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); break; } int32_t index = shadow_frame.GetVReg(dec_insn.vC); @@ -1777,22 +1789,21 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c break; } if (UNLIKELY(self->IsExceptionPending())) { - if (throw_frame_id_ == 0) { - throw_method_ = shadow_frame.GetMethod(); - throw_dex_pc_ = dex_pc; - } - throw_frame_id_++; + self->VerifyStack(); + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); uint32_t found_dex_pc = - shadow_frame.GetMethod()->FindCatchBlock(self->GetException()->GetClass(), - inst->GetDexPc(insns)); + shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), inst->GetDexPc(insns)); if (found_dex_pc == DexFile::kDexNoIndex) { JValue result; result.SetJ(0); + instrumentation->MethodUnwindEvent(self, shadow_frame.GetThisObject(), + shadow_frame.GetMethod(), shadow_frame.GetDexPC()); return result; // Handler in caller. } else { - Dbg::PostException(self, throw_frame_id_, throw_method_, throw_dex_pc_, - shadow_frame.GetMethod(), found_dex_pc, self->GetException()); - throw_frame_id_ = 0; + Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, throw_location, + shadow_frame.GetMethod(), + found_dex_pc, exception); next_inst = Instruction::At(insns + found_dex_pc); } } @@ -1816,8 +1827,9 @@ void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* re num_regs = code_item->registers_size_; num_ins = code_item->ins_size_; } else if (method->IsAbstract()) { - self->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", - PrettyMethod(method).c_str()); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;", + "abstract method \"%s\"", PrettyMethod(method).c_str()); return; } else { DCHECK(method->IsNative()); @@ -1884,23 +1896,18 @@ void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* re self->PopShadowFrame(); } -JValue EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame& shadow_frame, JValue ret_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - MethodHelper mh(shadow_frame.GetMethod()); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - return Execute(self, mh, code_item, shadow_frame, ret_val); -} - -void EnterInterpreterFromLLVM(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val) +void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JValue value; - MethodHelper mh(shadow_frame->GetMethod()); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); + value.SetJ(ret_val->GetJ()); // Set value to last known result in case the shadow frame chain is empty. + MethodHelper mh; while (shadow_frame != NULL) { + self->SetTopOfShadowStack(shadow_frame); + mh.ChangeMethod(shadow_frame->GetMethod()); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); value = Execute(self, mh, code_item, *shadow_frame, value); ShadowFrame* old_frame = shadow_frame; shadow_frame = shadow_frame->GetLink(); - mh.ChangeMethod(shadow_frame->GetMethod()); delete old_frame; } ret_val->SetJ(value.GetJ()); diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h index 556b04442b..cf47b682ae 100644 --- a/src/interpreter/interpreter.h +++ b/src/interpreter/interpreter.h @@ -33,15 +33,13 @@ class Thread; namespace interpreter { +// Called by AbstractMethod::Invoke, shadow frames arguments are taken from the args array. extern void EnterInterpreterFromInvoke(Thread* self, mirror::AbstractMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -extern JValue EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame& shadow_frame, - JValue ret_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -void EnterInterpreterFromLLVM(Thread* self, ShadowFrame* shadow_frame, JValue* result) +extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, + JValue* ret_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, diff --git a/src/invoke_arg_array_builder.h b/src/invoke_arg_array_builder.h index aca1091184..a6e99a52d9 100644 --- a/src/invoke_arg_array_builder.h +++ b/src/invoke_arg_array_builder.h @@ -43,13 +43,24 @@ class ArgArray { public: explicit ArgArray(const char* shorty, uint32_t shorty_len) : shorty_(shorty), shorty_len_(shorty_len), num_bytes_(0) { - // TODO: This code is conservative. The multiply by 2 is to handle the case where all args are - // doubles or longs. We could scan the shorty to use the arg array more often. - if (shorty_len * 2 <= kSmallArgArraySize) { + size_t num_slots = shorty_len + 1; // +1 in case of receiver. + if (LIKELY((num_slots * 2) < kSmallArgArraySize)) { + // We can trivially use the small arg array. arg_array_ = small_arg_array_; } else { - large_arg_array_.reset(new uint32_t[shorty_len_ * 2]); - arg_array_ = large_arg_array_.get(); + // Analyze shorty to see if we need the large arg array. + for (size_t i = 1; i < shorty_len; ++i) { + char c = shorty[i]; + if (c == 'J' || c == 'D') { + num_slots++; + } + } + if (num_slots <= kSmallArgArraySize) { + arg_array_ = small_arg_array_; + } else { + large_arg_array_.reset(new uint32_t[num_slots]); + arg_array_ = large_arg_array_.get(); + } } } diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc index eb385fc55c..5b65aa4bab 100644 --- a/src/jdwp/jdwp_event.cc +++ b/src/jdwp/jdwp_event.cc @@ -1069,21 +1069,31 @@ void JdwpState::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) { // Try to avoid blocking GC during a send, but only safe when not using mutexes at a lower-level // than mutator for lock ordering reasons. Thread* self = Thread::Current(); - bool safe_to_release_mutator_lock_over_send; - for (size_t i=0; i < kMutatorLock; ++i) { - if (self->GetHeldMutex(static_cast(i)) != NULL) { - safe_to_release_mutator_lock_over_send = false; - break; + bool safe_to_release_mutator_lock_over_send = !Locks::mutator_lock_->IsExclusiveHeld(self); + if (safe_to_release_mutator_lock_over_send) { + for (size_t i=0; i < kMutatorLock; ++i) { + if (self->GetHeldMutex(static_cast(i)) != NULL) { + safe_to_release_mutator_lock_over_send = false; + break; + } } } + bool success; if (safe_to_release_mutator_lock_over_send) { // Change state to waiting to allow GC, ... while we're sending. self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); - (*transport_->sendBufferedRequest)(this, wrapiov, iov_count + 1); + success = (*transport_->sendBufferedRequest)(this, wrapiov, iov_count + 1); self->TransitionFromSuspendedToRunnable(); } else { // Send and possibly block GC... - (*transport_->sendBufferedRequest)(this, wrapiov, iov_count + 1); + success = (*transport_->sendBufferedRequest)(this, wrapiov, iov_count + 1); + } + if (!success) { + LOG(INFO) << StringPrintf("JDWP send of type %c%c%c%c failed.", + static_cast(type >> 24), + static_cast(type >> 16), + static_cast(type >> 8), + static_cast(type)); } } diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc index 4e738ff6fa..1f121f88ba 100644 --- a/src/jdwp/jdwp_main.cc +++ b/src/jdwp/jdwp_main.cc @@ -334,6 +334,7 @@ void JdwpState::Run() { /* set the thread state to kWaitingInMainDebuggerLoop so GCs don't wait for us */ CHECK_EQ(thread_->GetState(), kNative); + Locks::mutator_lock_->AssertNotHeld(thread_); thread_->SetState(kWaitingInMainDebuggerLoop); /* @@ -421,10 +422,9 @@ void JdwpState::Run() { // Release session state, e.g. remove breakpoint instructions. ResetState(); - - // Tell the rest of the runtime that the debugger is no longer around. - Dbg::Disconnected(); } + // Tell the rest of the runtime that the debugger is no longer around. + Dbg::Disconnected(); /* if we had threads suspended, resume them now */ Dbg::UndoDebuggerSuspensions(); diff --git a/src/jni_internal.cc b/src/jni_internal.cc index 6df03e999d..a6c9fa178c 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -112,7 +112,7 @@ static void CheckMethodArguments(AbstractMethod* m, uint32_t* args) CHECK(self->IsExceptionPending()); LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: " << mh.GetTypeDescriptorFromTypeIdx(type_idx) << "\n" - << self->GetException()->Dump(); + << self->GetException(NULL)->Dump(); self->ClearException(); ++error_count; } else if (!param_type->IsPrimitive()) { @@ -214,8 +214,10 @@ static std::string NormalizeJniClassDescriptor(const char* name) { static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, Class* c, const char* name, const char* sig, const char* kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", - "no %s method \"%s.%s%s\"", kind, ClassHelper(c).GetDescriptor(), name, sig); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchMethodError;", + "no %s method \"%s.%s%s\"", + kind, ClassHelper(c).GetDescriptor(), name, sig); } static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, @@ -248,7 +250,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, static ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = soa.Self()->GetCurrentMethod(); + AbstractMethod* method = soa.Self()->GetCurrentMethod(NULL); if (method == NULL || method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) { return soa.Self()->GetClassLoaderOverride(); @@ -276,10 +278,14 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con if (field_type == NULL) { // Failed to find type from the signature of the field. DCHECK(soa.Self()->IsExceptionPending()); + ThrowLocation throw_location; + SirtRef cause(soa.Self(), soa.Self()->GetException(&throw_location)); soa.Self()->ClearException(); - soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", - "no type \"%s\" found and so no field \"%s\" could be found in class " - "\"%s\" or its superclasses", sig, name, ClassHelper(c).GetDescriptor()); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", + "no type \"%s\" found and so no field \"%s\" could be found in class " + "\"%s\" or its superclasses", sig, name, + ClassHelper(c).GetDescriptor()); + soa.Self()->GetException(NULL)->SetCause(cause.get()); return NULL; } if (is_static) { @@ -288,9 +294,10 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con field = c->FindInstanceField(name, ClassHelper(field_type).GetDescriptor()); } if (field == NULL) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", - "no \"%s\" field \"%s\" in class \"%s\" or its superclasses", sig, - name, ClassHelper(c).GetDescriptor()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", + "no \"%s\" field \"%s\" in class \"%s\" or its superclasses", + sig, name, ClassHelper(c).GetDescriptor()); return NULL; } return soa.EncodeField(field); @@ -314,16 +321,19 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, Array* array, jsize start, jsize length, const char* identifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string type(PrettyTypeOf(array)); - soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "%s offset=%d length=%d %s.length=%d", - type.c_str(), start, length, identifier, array->GetLength()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "%s offset=%d length=%d %s.length=%d", + type.c_str(), start, length, identifier, array->GetLength()); } static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length, jsize array_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", - "offset=%d length=%d string.length()=%d", start, length, array_length); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;", + "offset=%d length=%d string.length()=%d", start, length, + array_length); } int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) @@ -362,10 +372,9 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj if (exception.get() == NULL) { return JNI_ERR; } - ScopedObjectAccess soa(env); - soa.Self()->SetException(soa.Decode(exception.get())); - + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->SetException(throw_location, soa.Decode(exception.get())); return JNI_OK; } @@ -678,7 +687,8 @@ class JNI { if (exception == NULL) { return JNI_ERR; } - soa.Self()->SetException(exception); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->SetException(throw_location, exception); return JNI_OK; } @@ -697,31 +707,42 @@ class JNI { static void ExceptionDescribe(JNIEnv* env) { ScopedObjectAccess soa(env); - Thread* self = soa.Self(); - Throwable* original_exception = self->GetException(); - self->ClearException(); - - ScopedLocalRef exception(env, soa.AddLocalReference(original_exception)); + SirtRef old_throw_this_object(soa.Self(), NULL); + SirtRef old_throw_method(soa.Self(), NULL); + SirtRef old_exception(soa.Self(), NULL); + uint32_t old_throw_dex_pc; + { + ThrowLocation old_throw_location; + mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location); + old_throw_this_object.reset(old_throw_location.GetThis()); + old_throw_method.reset(old_throw_location.GetMethod()); + old_exception.reset(old_exception_obj); + old_throw_dex_pc = old_throw_location.GetDexPc(); + soa.Self()->ClearException(); + } + ScopedLocalRef exception(env, soa.AddLocalReference(old_exception.get())); ScopedLocalRef exception_class(env, env->GetObjectClass(exception.get())); jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V"); if (mid == NULL) { LOG(WARNING) << "JNI WARNING: no printStackTrace()V in " - << PrettyTypeOf(original_exception); + << PrettyTypeOf(old_exception.get()); } else { env->CallVoidMethod(exception.get(), mid); - if (self->IsExceptionPending()) { - LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(self->GetException()) + if (soa.Self()->IsExceptionPending()) { + LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(soa.Self()->GetException(NULL)) << " thrown while calling printStackTrace"; - self->ClearException(); + soa.Self()->ClearException(); } } + ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(), + old_throw_dex_pc); - self->SetException(original_exception); + soa.Self()->SetException(gc_safe_throw_location, old_exception.get()); } static jthrowable ExceptionOccurred(JNIEnv* env) { ScopedObjectAccess soa(env); - Object* exception = soa.Self()->GetException(); + Object* exception = soa.Self()->GetException(NULL); return soa.AddLocalReference(exception); } @@ -2134,10 +2155,10 @@ class JNI { static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) { if (capacity < 0) { - JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %d", capacity); + JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %lld", capacity); } if (address == NULL && capacity != 0) { - JniAbortF("NewDirectByteBuffer", "non-zero capacity for NULL pointer: %d", capacity); + JniAbortF("NewDirectByteBuffer", "non-zero capacity for NULL pointer: %lld", capacity); } // At the moment, the Java side is limited to 32 bits. @@ -2676,7 +2697,7 @@ JavaVMExt::JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options) force_copy(false), // TODO: add a way to enable this trace(options->jni_trace_), work_around_app_jni_bugs(false), - pins_lock("JNI pin table lock"), + pins_lock("JNI pin table lock", kPinTableLock), pin_table("pin table", kPinTableInitial, kPinTableMax), globals_lock("JNI global reference table lock"), globals(gGlobalsInitial, gGlobalsMax, kGlobal), @@ -2889,9 +2910,10 @@ void* JavaVMExt::FindCodeForNativeMethod(AbstractMethod* m) { MutexLock mu(self, libraries_lock); native_method = libraries->FindNativeMethod(m, detail); } - // throwing can cause libraries_lock to be reacquired + // Throwing can cause libraries_lock to be reacquired. if (native_method == NULL) { - self->ThrowNewException("Ljava/lang/UnsatisfiedLinkError;", detail.c_str()); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str()); } return native_method; } diff --git a/src/jni_internal.h b/src/jni_internal.h index 9c067deb59..131032a29d 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -49,7 +49,8 @@ class ScopedObjectAccess; class Thread; void SetJniGlobalsMax(size_t max); -void JniAbortF(const char* jni_function_name, const char* fmt, ...); +void JniAbortF(const char* jni_function_name, const char* fmt, ...) + __attribute__((__format__(__printf__, 2, 3))); void* FindNativeMethod(Thread* thread); void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, size_t method_count); diff --git a/src/jvalue.h b/src/jvalue.h index fa85937dcd..66cd93e2c0 100644 --- a/src/jvalue.h +++ b/src/jvalue.h @@ -19,6 +19,8 @@ #include "base/macros.h" +#include + namespace art { namespace mirror { class Object; diff --git a/src/locks.cc b/src/locks.cc index 27b9d4bf96..eb0620c0c3 100644 --- a/src/locks.cc +++ b/src/locks.cc @@ -29,6 +29,7 @@ ReaderWriterMutex* Locks::mutator_lock_ = NULL; Mutex* Locks::runtime_shutdown_lock_ = NULL; Mutex* Locks::thread_list_lock_ = NULL; Mutex* Locks::thread_suspend_count_lock_ = NULL; +Mutex* Locks::trace_lock_ = NULL; Mutex* Locks::unexpected_signal_lock_ = NULL; void Locks::Init() { @@ -42,6 +43,7 @@ void Locks::Init() { DCHECK(mutator_lock_ != NULL); DCHECK(thread_list_lock_ != NULL); DCHECK(thread_suspend_count_lock_ != NULL); + DCHECK(trace_lock_ != NULL); DCHECK(unexpected_signal_lock_ != NULL); } else { logging_lock_ = new Mutex("logging lock", kLoggingLock, true); @@ -61,6 +63,8 @@ void Locks::Init() { thread_list_lock_ = new Mutex("thread list lock", kThreadListLock); DCHECK(thread_suspend_count_lock_ == NULL); thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock); + DCHECK(trace_lock_ == NULL); + trace_lock_ = new Mutex("trace lock", kTraceLock); DCHECK(unexpected_signal_lock_ == NULL); unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true); } diff --git a/src/locks.h b/src/locks.h index c0f6ae55c4..568f950873 100644 --- a/src/locks.h +++ b/src/locks.h @@ -37,17 +37,20 @@ enum LockLevel { kThreadSuspendCountLock, kAbortLock, kDefaultMutexLevel, + kJdwpSerialLock, kAllocSpaceLock, + kMarkSweepLargeObjectLock, + kPinTableLock, kLoadLibraryLock, kClassLinkerClassesLock, kBreakpointLock, + kJdwpObjectRegistryLock, kThreadListLock, kBreakpointInvokeLock, - kJdwpObjectRegistryLock, + kTraceLock, kJdwpEventListLock, kJdwpAttachLock, kJdwpStartLock, - kJdwpSerialLock, kRuntimeShutdownLock, kHeapBitmapLock, kMonitorLock, @@ -136,8 +139,11 @@ class Locks { // Guards breakpoints and single-stepping. static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_); + // Guards trace requests. + static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_); + // Guards lists of classes within the class linker. - static Mutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_); + static Mutex* classlinker_classes_lock_ ACQUIRED_AFTER(trace_lock_); // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code // doesn't try to hold a higher level Mutex. diff --git a/src/mirror/abstract_method-inl.h b/src/mirror/abstract_method-inl.h index 2049748f30..fd02474b8f 100644 --- a/src/mirror/abstract_method-inl.h +++ b/src/mirror/abstract_method-inl.h @@ -20,6 +20,7 @@ #include "abstract_method.h" #include "dex_file.h" +#include "oat/runtime/oat_support_entrypoints.h" #include "object_array.h" #include "runtime.h" @@ -113,6 +114,9 @@ inline void AbstractMethod::AssertPcIsWithinCode(uintptr_t pc) const { if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) { return; } + if (GetCode() == GetInterpreterEntryPoint()) { + return; + } Runtime* runtime = Runtime::Current(); if (GetCode() == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { return; diff --git a/src/mirror/abstract_method.cc b/src/mirror/abstract_method.cc index f74814c406..3ab3a939a8 100644 --- a/src/mirror/abstract_method.cc +++ b/src/mirror/abstract_method.cc @@ -151,23 +151,9 @@ AbstractMethod* AbstractMethod::FindOverriddenMethod() const { return result; } -static const void* GetOatCode(const AbstractMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Runtime* runtime = Runtime::Current(); - const void* code = m->GetCode(); - // Peel off any method tracing trampoline. - if (runtime->IsMethodTracingActive() && runtime->GetInstrumentation()->GetSavedCodeFromMap(m) != NULL) { - code = runtime->GetInstrumentation()->GetSavedCodeFromMap(m); - } - // Peel off any resolution stub. - if (code == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { - code = runtime->GetClassLinker()->GetOatCodeFor(m); - } - return code; -} - uintptr_t AbstractMethod::NativePcOffset(const uintptr_t pc) const { - return pc - reinterpret_cast(GetOatCode(this)); + const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this); + return pc - reinterpret_cast(code); } // Find the lowest-address native safepoint pc for a given dex pc @@ -181,7 +167,8 @@ uintptr_t AbstractMethod::ToFirstNativeSafepointPc(const uint32_t dex_pc) const size_t mapping_table_length = GetPcToDexMappingTableLength(); for (size_t i = 0; i < mapping_table_length; i += 2) { if (mapping_table[i + 1] == dex_pc) { - return mapping_table[i] + reinterpret_cast(GetOatCode(this)); + const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this); + return mapping_table[i] + reinterpret_cast(code); } } LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc @@ -201,14 +188,16 @@ uint32_t AbstractMethod::ToDexPc(const uintptr_t pc) const { return DexFile::kDexNoIndex; // Special no mapping case } size_t mapping_table_length = GetPcToDexMappingTableLength(); - uint32_t sought_offset = pc - reinterpret_cast(GetOatCode(this)); + const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this); + uint32_t sought_offset = pc - reinterpret_cast(code); for (size_t i = 0; i < mapping_table_length; i += 2) { if (mapping_table[i] == sought_offset) { return mapping_table[i + 1]; } } - LOG(ERROR) << "Failed to find Dex offset for PC offset " << reinterpret_cast(sought_offset) - << "(PC " << reinterpret_cast(pc) << ") in " << PrettyMethod(this); + LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast(sought_offset) + << "(PC " << reinterpret_cast(pc) << ", code=" << code + << ") in " << PrettyMethod(this); return DexFile::kDexNoIndex; #else // Compiler LLVM doesn't use the machine pc, we just use dex pc instead. @@ -227,7 +216,8 @@ uintptr_t AbstractMethod::ToNativePc(const uint32_t dex_pc) const { uint32_t map_offset = mapping_table[i]; uint32_t map_dex_offset = mapping_table[i + 1]; if (map_dex_offset == dex_pc) { - return reinterpret_cast(GetOatCode(this)) + map_offset; + const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this); + return reinterpret_cast(code) + map_offset; } } LOG(FATAL) << "Looking up Dex PC not contained in method, 0x" << std::hex << dex_pc @@ -270,14 +260,16 @@ void AbstractMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JV ManagedStack fragment; self->PushManagedStackFragment(&fragment); + Runtime* runtime = Runtime::Current(); // Call the invoke stub, passing everything as arguments. - if (UNLIKELY(!Runtime::Current()->IsStarted())){ + if (UNLIKELY(!runtime->IsStarted())){ LOG(INFO) << "Not invoking " << PrettyMethod(this) << " for a runtime that isn't started"; if (result != NULL) { result->SetJ(0); } } else { - bool interpret = self->ReadFlag(kEnterInterpreter) && !IsNative() && !IsProxyMethod(); + bool interpret = runtime->GetInstrumentation()->InterpretOnly() && !IsNative() && + !IsProxyMethod(); const bool kLogInvocationStartAndReturn = false; if (GetCode() != NULL) { if (!interpret) { @@ -289,15 +281,15 @@ void AbstractMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JV #else (*art_quick_invoke_stub)(this, args, args_size, self, result, result_type); #endif - if (UNLIKELY(reinterpret_cast(self->GetException()) == -1)) { + if (UNLIKELY(reinterpret_cast(self->GetException(NULL)) == -1)) { // Unusual case where we were running LLVM generated code and an // exception was thrown to force the activations to be removed from the // stack. Continue execution in the interpreter. - JValue value; self->ClearException(); - ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(&value); + ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(result); + self->SetTopOfStack(NULL, 0); self->SetTopOfShadowStack(shadow_frame); - interpreter::EnterInterpreterFromLLVM(self, shadow_frame, result); + interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result); } if (kLogInvocationStartAndReturn) { LOG(INFO) << StringPrintf("Returned '%s' code=%p", PrettyMethod(this).c_str(), GetCode()); diff --git a/src/mirror/abstract_method.h b/src/mirror/abstract_method.h index d10031a91b..9440915731 100644 --- a/src/mirror/abstract_method.h +++ b/src/mirror/abstract_method.h @@ -321,6 +321,11 @@ class MANAGED AbstractMethod : public Object { return GetFrameSizeInBytes() - kPointerSize; } + size_t GetSirtOffsetInBytes() const { + CHECK(IsNative()); + return kPointerSize; + } + bool IsRegistered() const; void RegisterNative(Thread* self, const void* native_method) diff --git a/src/mirror/array.cc b/src/mirror/array.cc index d0b3838525..84c2dc651a 100644 --- a/src/mirror/array.cc +++ b/src/mirror/array.cc @@ -18,6 +18,7 @@ #include "class.h" #include "class-inl.h" +#include "common_throws.h" #include "dex_file-inl.h" #include "gc/card_table-inl.h" #include "object-inl.h" @@ -43,10 +44,10 @@ Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, // Check for overflow and throw OutOfMemoryError if this was an unreasonable request. size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size); - if (data_size >> component_shift != size_t(component_count) || size < data_size) { - self->ThrowNewExceptionF("Ljava/lang/OutOfMemoryError;", - "%s of length %d would overflow", - PrettyDescriptor(array_class).c_str(), component_count); + if (UNLIKELY(data_size >> component_shift != size_t(component_count) || size < data_size)) { + self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow", + PrettyDescriptor(array_class).c_str(), + component_count).c_str()); return NULL; } @@ -108,8 +109,7 @@ Array* Array::CreateMultiArray(Thread* self, Class* element_class, IntArray* dim for (int i = 0; i < num_dimensions; i++) { int dimension = dimensions->Get(i); if (UNLIKELY(dimension < 0)) { - self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", - "Dimension %d: %d", i, dimension); + ThrowNegativeArraySizeException(StringPrintf("Dimension %d: %d", i, dimension).c_str()); return NULL; } } @@ -135,15 +135,12 @@ Array* Array::CreateMultiArray(Thread* self, Class* element_class, IntArray* dim } bool Array::ThrowArrayIndexOutOfBoundsException(int32_t index) const { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "length=%i; index=%i", length_, index); + art::ThrowArrayIndexOutOfBoundsException(index, GetLength()); return false; } bool Array::ThrowArrayStoreException(Object* object) const { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "%s cannot be stored in an array of type %s", - PrettyTypeOf(object).c_str(), PrettyTypeOf(this).c_str()); + art::ThrowArrayStoreException(object->GetClass(), this->GetClass()); return false; } diff --git a/src/mirror/class-inl.h b/src/mirror/class-inl.h index ec92c199ee..d7afed6cd5 100644 --- a/src/mirror/class-inl.h +++ b/src/mirror/class-inl.h @@ -24,7 +24,7 @@ #include "dex_cache.h" #include "field.h" #include "iftable.h" -#include "object_array.h" +#include "object_array-inl.h" #include "runtime.h" #include "string.h" diff --git a/src/mirror/class.cc b/src/mirror/class.cc index ba3556ef84..15129ab6dc 100644 --- a/src/mirror/class.cc +++ b/src/mirror/class.cc @@ -60,10 +60,22 @@ void Class::SetStatus(Status new_status) { if (new_status == kStatusError) { CHECK_NE(GetStatus(), kStatusError) << PrettyClass(this); - // stash current exception + // Stash current exception. Thread* self = Thread::Current(); - SirtRef exception(self, self->GetException()); - CHECK(exception.get() != NULL); + SirtRef old_throw_this_object(self, NULL); + SirtRef old_throw_method(self, NULL); + SirtRef old_exception(self, NULL); + uint32_t old_throw_dex_pc; + { + ThrowLocation old_throw_location; + mirror::Throwable* old_exception_obj = self->GetException(&old_throw_location); + old_throw_this_object.reset(old_throw_location.GetThis()); + old_throw_method.reset(old_throw_location.GetMethod()); + old_exception.reset(old_exception_obj); + old_throw_dex_pc = old_throw_location.GetDexPc(); + self->ClearException(); + } + CHECK(old_exception.get() != NULL); // clear exception to call FindSystemClass self->ClearException(); @@ -71,15 +83,18 @@ void Class::SetStatus(Status new_status) { Class* eiie_class = class_linker->FindSystemClass("Ljava/lang/ExceptionInInitializerError;"); CHECK(!self->IsExceptionPending()); - // only verification errors, not initialization problems, should set a verify error. - // this is to ensure that ThrowEarlierClassFailure will throw NoClassDefFoundError in that case. - Class* exception_class = exception->GetClass(); + // Only verification errors, not initialization problems, should set a verify error. + // This is to ensure that ThrowEarlierClassFailure will throw NoClassDefFoundError in that case. + Class* exception_class = old_exception->GetClass(); if (!eiie_class->IsAssignableFrom(exception_class)) { SetVerifyErrorClass(exception_class); } - // restore exception - self->SetException(exception.get()); + // Restore exception. + ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(), + old_throw_dex_pc); + + self->SetException(gc_safe_throw_location, old_exception.get()); } return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status, false); } diff --git a/src/mirror/object_test.cc b/src/mirror/object_test.cc index eed96bd9b7..5c7ec11222 100644 --- a/src/mirror/object_test.cc +++ b/src/mirror/object_test.cc @@ -116,12 +116,12 @@ TEST_F(ObjectTest, AllocObjectArray) { EXPECT_TRUE(oa->Get(-1) == NULL); EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass()); soa.Self()->ClearException(); EXPECT_TRUE(oa->Get(2) == NULL); EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass()); soa.Self()->ClearException(); ASSERT_TRUE(oa->GetClass() != NULL); @@ -166,12 +166,12 @@ void TestPrimitiveArray(ClassLinker* cl) { EXPECT_EQ(0, a->Get(-1)); EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass()); soa.Self()->ClearException(); EXPECT_EQ(0, a->Get(2)); EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass()); + EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass()); soa.Self()->ClearException(); } @@ -231,7 +231,7 @@ TEST_F(ObjectTest, CreateMultiArray) { dims->Set(0, -1); multi = Array::CreateMultiArray(soa.Self(), c.get(), dims.get()); EXPECT_TRUE(soa.Self()->IsExceptionPending()); - EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException()->GetClass()), + EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException(NULL)->GetClass()), "java.lang.NegativeArraySizeException"); soa.Self()->ClearException(); diff --git a/src/mirror/string.cc b/src/mirror/string.cc index f571fb828a..45a6779c45 100644 --- a/src/mirror/string.cc +++ b/src/mirror/string.cc @@ -103,8 +103,9 @@ uint16_t String::CharAt(int32_t index) const { // bounds check itself. if (index < 0 || index >= count_) { Thread* self = Thread::Current(); - self->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", - "length=%i; index=%i", count_, index); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;", + "length=%i; index=%i", count_, index); return 0; } return GetCharArray()->Get(index + GetOffset()); diff --git a/src/mirror/throwable.cc b/src/mirror/throwable.cc index d1192b0728..bbff9c2f82 100644 --- a/src/mirror/throwable.cc +++ b/src/mirror/throwable.cc @@ -35,7 +35,9 @@ Class* Throwable::java_lang_Throwable_ = NULL; void Throwable::SetCause(Throwable* cause) { CHECK(cause != NULL); CHECK(cause != this); - CHECK(GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false) == NULL); + Throwable* current_cause = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), + false); + CHECK(current_cause == NULL || current_cause == this); SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause, false); } diff --git a/src/monitor.cc b/src/monitor.cc index 2377734a2d..11790e5c9e 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -254,10 +254,12 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list args; va_start(args, fmt); - Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionV(throw_location, "Ljava/lang/IllegalMonitorStateException;", fmt, args); if (!Runtime::Current()->IsStarted()) { std::ostringstream ss; - Thread::Current()->Dump(ss); + self->Dump(ss); std::string str(ss.str()); LOG(ERROR) << "IllegalMonitorStateException: " << str; } @@ -411,8 +413,9 @@ void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow, ThreadState why) { // Enforce the timeout range. if (ms < 0 || ns < 0 || ns > 999999) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "timeout arguments out of range: ms=%lld ns=%d", ms, ns); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/IllegalArgumentException;", + "timeout arguments out of range: ms=%lld ns=%d", ms, ns); return; } @@ -517,7 +520,8 @@ void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns, self->interrupted_ = false; } if (interruptShouldThrow) { - Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(throw_location, "Ljava/lang/InterruptedException;", NULL); } } } diff --git a/src/monitor_android.cc b/src/monitor_android.cc index d3ac14355a..9265cd649d 100644 --- a/src/monitor_android.cc +++ b/src/monitor_android.cc @@ -77,7 +77,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample cp = EventLogWriteInt(cp, wait_ms); // Emit the source code file name, <= 37 bytes. - uintptr_t pc; + uint32_t pc; mirror::AbstractMethod* m = self->GetCurrentMethod(&pc); const char* filename; uint32_t line_number; diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 7c6fbd9234..d703f83513 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -18,6 +18,7 @@ #include "base/logging.h" #include "class_linker.h" +#include "common_throws.h" #include "dex_file-inl.h" #include "gc/space.h" #include "image.h" @@ -103,18 +104,18 @@ static jint DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jst } if (dex_file == NULL) { LOG(WARNING) << "Failed to open dex file: " << source; - Thread::Current()->ThrowNewExceptionF("Ljava/io/IOException;", "Unable to open dex file: %s", - source.c_str()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/io/IOException;", + "Unable to open dex file: %s", source.c_str()); return 0; } return static_cast(reinterpret_cast(dex_file)); } -static const DexFile* toDexFile(int dex_file_address) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static const DexFile* toDexFile(int dex_file_address) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* dex_file = reinterpret_cast(static_cast(dex_file_address)); if (dex_file == NULL) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", "dex_file == null"); + ThrowNullPointerException(NULL, "dex_file == null"); } return dex_file; } @@ -188,7 +189,9 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename if (!OS::FileExists(filename.c_str())) { LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename.c_str() << "' does not exist"; ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewExceptionF("Ljava/io/FileNotFoundException;", "%s", filename.c_str()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/io/FileNotFoundException;", + "%s", filename.c_str()); return JNI_TRUE; } diff --git a/src/native/dalvik_system_VMDebug.cc b/src/native/dalvik_system_VMDebug.cc index dc07a31ee3..992998eb66 100644 --- a/src/native/dalvik_system_VMDebug.cc +++ b/src/native/dalvik_system_VMDebug.cc @@ -18,6 +18,7 @@ #include #include "class_linker.h" +#include "common_throws.h" #include "debugger.h" #include "hprof/hprof.h" #include "jni_internal.h" @@ -68,8 +69,9 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF int fd = dup(originalFd); if (fd < 0) { ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", - "dup(%d) failed: %s", originalFd, strerror(errno)); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/RuntimeException;", + "dup(%d) failed: %s", originalFd, strerror(errno)); return; } @@ -90,7 +92,7 @@ static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring java } static jboolean VMDebug_isMethodTracingActive(JNIEnv*, jclass) { - return Runtime::Current()->IsMethodTracingActive(); + return Trace::IsMethodTracingActive(); } static void VMDebug_stopMethodTracing(JNIEnv*, jclass) { @@ -119,24 +121,26 @@ static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) { return Dbg::LastDebuggerActivity(); } -static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) { +static void ThrowUnsupportedOperationException(JNIEnv* env) { ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewException(throw_location, "Ljava/lang/UnsupportedOperationException;", NULL); +} + +static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) { + ThrowUnsupportedOperationException(env); } static void VMDebug_stopInstructionCounting(JNIEnv* env, jclass) { - ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); + ThrowUnsupportedOperationException(env); } static void VMDebug_getInstructionCount(JNIEnv* env, jclass, jintArray /*javaCounts*/) { - ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); + ThrowUnsupportedOperationException(env); } static void VMDebug_resetInstructionCount(JNIEnv* env, jclass) { - ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", ""); + ThrowUnsupportedOperationException(env); } static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) { @@ -166,8 +170,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job // Only one of these may be NULL. if (javaFilename == NULL && javaFd == NULL) { ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", - "fileName == null && fd == null"); + ThrowNullPointerException(NULL, "fileName == null && fd == null"); return; } @@ -187,8 +190,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job fd = jniGetFDFromFileDescriptor(env, javaFd); if (fd < 0) { ScopedObjectAccess soa(env); - Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", - "Invalid file descriptor"); + ThrowRuntimeException("Invalid file descriptor"); return; } } diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index a13d07a0d3..d2ef43c7e2 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -17,6 +17,7 @@ #include #include "class_linker.h" +#include "common_throws.h" #include "debugger.h" #include "dex_file-inl.h" #include "jni_internal.h" @@ -57,11 +58,11 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle mirror::Class* element_class = soa.Decode(javaElementClass); if (element_class == NULL) { - soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "element class == null"); + ThrowNullPointerException(NULL, "element class == null"); return NULL; } if (length < 0) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); + ThrowNegativeArraySizeException(length); return NULL; } @@ -84,7 +85,7 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) { ScopedObjectAccess soa(env); mirror::Array* array = soa.Decode(javaArray); if (!array->IsArrayInstance()) { - soa.Self()->ThrowNewException("Ljava/lang/IllegalArgumentException;", "not an array"); + ThrowIllegalArgumentException(NULL, "not an array"); return 0; } // TODO: we should also check that this is a non-movable array. diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index 72f4c18244..a7296996da 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -53,8 +53,9 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean // is especially handy for array types, since we want to avoid // auto-generating bogus array classes. if (!IsValidBinaryClassName(name.c_str())) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassNotFoundException;", - "Invalid name: %s", name.c_str()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ClassNotFoundException;", + "Invalid name: %s", name.c_str()); return NULL; } diff --git a/src/native/java_lang_String.cc b/src/native/java_lang_String.cc index 44ab1ca8be..3e9c3f36fa 100644 --- a/src/native/java_lang_String.cc +++ b/src/native/java_lang_String.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "common_throws.h" #include "jni_internal.h" #include "mirror/string.h" #include "scoped_thread_state_change.h" @@ -22,12 +23,11 @@ namespace art { static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) { + ScopedObjectAccess soa(env); if (UNLIKELY(javaRhs == NULL)) { - ScopedLocalRef npe(env, env->FindClass("java/lang/NullPointerException")); - env->ThrowNew(npe.get(), "rhs == null"); + ThrowNullPointerException(NULL, "rhs == null"); return -1; } else { - ScopedObjectAccess soa(env); return soa.Decode(javaThis)->CompareTo(soa.Decode(javaRhs)); } } diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index 5572623a0c..d8df9d9dae 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "common_throws.h" #include "gc/card_table-inl.h" #include "jni_internal.h" #include "mirror/array.h" @@ -171,31 +172,33 @@ namespace art { static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "%s of type %s is not an array", identifier, actualType.c_str()); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;", + "%s of type %s is not an array", identifier, actualType.c_str()); } static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) { ScopedObjectAccess soa(env); // Null pointer checks. - if (javaSrc == NULL) { - soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "src == null"); + if (UNLIKELY(javaSrc == NULL)) { + ThrowNullPointerException(NULL, "src == null"); return; } - if (javaDst == NULL) { - soa.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "dst == null"); + if (UNLIKELY(javaDst == NULL)) { + ThrowNullPointerException(NULL, "dst == null"); return; } // Make sure source and destination are both arrays. mirror::Object* srcObject = soa.Decode(javaSrc); mirror::Object* dstObject = soa.Decode(javaDst); - if (!srcObject->IsArrayInstance()) { + if (UNLIKELY(!srcObject->IsArrayInstance())) { ThrowArrayStoreException_NotAnArray("source", srcObject); return; } - if (!dstObject->IsArrayInstance()) { + if (UNLIKELY(!dstObject->IsArrayInstance())) { ThrowArrayStoreException_NotAnArray("destination", dstObject); return; } @@ -205,21 +208,24 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, mirror::Class* dstComponentType = dstArray->GetClass()->GetComponentType(); // Bounds checking. - if (srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d", - srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length); + if (UNLIKELY(srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length)) { + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d", + srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length); return; } // Handle primitive arrays. if (srcComponentType->IsPrimitive() || dstComponentType->IsPrimitive()) { // If one of the arrays holds a primitive type the other array must hold the exact same type. - if (srcComponentType->IsPrimitive() != dstComponentType->IsPrimitive() || srcComponentType != dstComponentType) { + if (UNLIKELY(srcComponentType != dstComponentType)) { std::string srcType(PrettyTypeOf(srcArray)); std::string dstType(PrettyTypeOf(dstArray)); - soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "Incompatible types: src=%s, dst=%s", srcType.c_str(), dstType.c_str()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;", + "Incompatible types: src=%s, dst=%s", + srcType.c_str(), dstType.c_str()); return; } @@ -299,12 +305,13 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, } Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length); - if (i != length) { + if (UNLIKELY(i != length)) { std::string actualSrcType(PrettyTypeOf(o)); std::string dstType(PrettyTypeOf(dstArray)); - soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "source[%d] of type %s cannot be stored in destination array of type %s", - srcPos + i, actualSrcType.c_str(), dstType.c_str()); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;", + "source[%d] of type %s cannot be stored in destination array of type %s", + srcPos + i, actualSrcType.c_str(), dstType.c_str()); return; } } diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index ca4be9de9b..7ccfaaa350 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "common_throws.h" #include "debugger.h" #include "jni_internal.h" #include "monitor.h" @@ -90,7 +91,7 @@ static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject ScopedObjectAccess soa(env); mirror::Object* object = soa.Decode(java_object); if (object == NULL) { - Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null"); + ThrowNullPointerException(NULL, "object == null"); return JNI_FALSE; } MutexLock mu(soa.Self(), *Locks::thread_list_lock_); diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index af7a77a0d3..45ec0ad5a2 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -15,6 +15,7 @@ */ #include "class_linker.h" +#include "common_throws.h" #include "dex_file-inl.h" #include "jni_internal.h" #include "mirror/class-inl.h" @@ -44,7 +45,7 @@ static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementCl DCHECK(javaElementClass != NULL); mirror::Class* element_class = soa.Decode(javaElementClass); if (UNLIKELY(length < 0)) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length); + ThrowNegativeArraySizeException(length); return NULL; } std::string descriptor("["); diff --git a/src/native/java_lang_reflect_Constructor.cc b/src/native/java_lang_reflect_Constructor.cc index fb84dfd2bd..918021748b 100644 --- a/src/native/java_lang_reflect_Constructor.cc +++ b/src/native/java_lang_reflect_Constructor.cc @@ -37,9 +37,12 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA ScopedObjectAccess soa(env); mirror::AbstractMethod* m = soa.Decode(javaMethod)->AsMethod(); mirror::Class* c = m->GetDeclaringClass(); - if (c->IsAbstract()) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", - "Can't instantiate abstract class %s", PrettyDescriptor(c).c_str()); + if (UNLIKELY(c->IsAbstract())) { + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/InstantiationException;", + "Can't instantiate %s %s", + c->IsInterface() ? "interface" : "abstract class", + PrettyDescriptor(c).c_str()); return NULL; } diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index 922fe00b29..b0daa916c6 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -16,6 +16,7 @@ #include "class_linker.h" #include "class_linker-inl.h" +#include "common_throws.h" #include "dex_file-inl.h" #include "jni_internal.h" #include "mirror/class-inl.h" @@ -71,22 +72,23 @@ static bool GetFieldValue(const ScopedObjectAccess& soa, mirror::Object* o, mirr // Never okay. break; } - soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "Not a primitive field: %s", PrettyField(f).c_str()); + ThrowIllegalArgumentException(NULL, + StringPrintf("Not a primitive field: %s", + PrettyField(f).c_str()).c_str()); return false; } -static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, mirror::Field* f, - mirror::Object*& o) +static bool CheckReceiver(const ScopedObjectAccess& soa, jobject j_rcvr, mirror::Field* f, + mirror::Object*& class_or_rcvr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (f->IsStatic()) { - o = f->GetDeclaringClass(); + class_or_rcvr = f->GetDeclaringClass(); return true; } - o = soa.Decode(javaObj); + class_or_rcvr = soa.Decode(j_rcvr); mirror::Class* declaringClass = f->GetDeclaringClass(); - if (!VerifyObjectInClass(o, declaringClass)) { + if (!VerifyObjectInClass(class_or_rcvr, declaringClass)) { return false; } return true; @@ -126,8 +128,8 @@ static JValue GetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, // Widen it if necessary (and possible). JValue wide_value; mirror::Class* dst_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(dst_descriptor); - if (!ConvertPrimitiveValue(FieldHelper(f).GetTypeAsPrimitiveType(), dst_type->GetPrimitiveType(), - field_value, wide_value)) { + if (!ConvertPrimitiveValue(NULL, false, FieldHelper(f).GetTypeAsPrimitiveType(), + dst_type->GetPrimitiveType(), field_value, wide_value)) { return JValue(); } return wide_value; @@ -205,8 +207,8 @@ static void SetFieldValue(mirror::Object* o, mirror::Field* f, const JValue& new // Else fall through to report an error. case Primitive::kPrimVoid: // Never okay. - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "Not a primitive field: %s", PrettyField(f).c_str()); + ThrowIllegalArgumentException(NULL, StringPrintf("Not a primitive field: %s", + PrettyField(f).c_str()).c_str()); return; } @@ -247,15 +249,15 @@ static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, c } FieldHelper fh(f); if (!fh.IsPrimitiveType()) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "Not a primitive field: %s", PrettyField(f).c_str()); + ThrowIllegalArgumentException(NULL, StringPrintf("Not a primitive field: %s", + PrettyField(f).c_str()).c_str()); return; } // Widen the value if necessary (and possible). JValue wide_value; mirror::Class* src_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(src_descriptor); - if (!ConvertPrimitiveValue(src_type->GetPrimitiveType(), fh.GetTypeAsPrimitiveType(), + if (!ConvertPrimitiveValue(NULL, false, src_type->GetPrimitiveType(), fh.GetTypeAsPrimitiveType(), new_value, wide_value)) { return; } diff --git a/src/nth_caller_visitor.h b/src/nth_caller_visitor.h index 7d9feb622e..c32a46aa02 100644 --- a/src/nth_caller_visitor.h +++ b/src/nth_caller_visitor.h @@ -18,6 +18,7 @@ #define ART_SRC_NTH_CALLER_VISITOR_H_ #include "mirror/abstract_method.h" +#include "locks.h" #include "stack.h" namespace art { @@ -25,19 +26,32 @@ class Thread; // Walks up the stack 'n' callers, when used with Thread::WalkStack. struct NthCallerVisitor : public StackVisitor { - NthCallerVisitor(Thread* thread, size_t n) - : StackVisitor(thread, NULL), n(n), count(0), caller(NULL) {} - - bool VisitFrame() { - DCHECK(caller == NULL); - if (count++ == n) { - caller = GetMethod(); - return false; + NthCallerVisitor(Thread* thread, size_t n, bool include_runtime_and_upcalls = false) + : StackVisitor(thread, NULL), n(n), include_runtime_and_upcalls_(include_runtime_and_upcalls), + count(0), caller(NULL) {} + + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* m = GetMethod(); + bool do_count = false; + if (m == NULL || m->IsRuntimeMethod()) { + // Upcall. + do_count = include_runtime_and_upcalls_; + } else { + do_count = true; + } + if (do_count) { + DCHECK(caller == NULL); + if (count == n) { + caller = m; + return false; + } + count++; } return true; } - size_t n; + const size_t n; + const bool include_runtime_and_upcalls_; size_t count; mirror::AbstractMethod* caller; }; diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc index 58341bf7ad..1276f78c08 100644 --- a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc +++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc @@ -33,10 +33,6 @@ extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, extern "C" void art_quick_can_put_array_element_from_code(void*, void*); extern "C" void art_quick_check_cast_from_code(void*, void*); -// Debug entrypoints. -extern void DebugMe(mirror::AbstractMethod* method, uint32_t info); -extern "C" void art_quick_update_debugger(void*, void*, int32_t, void*); - // DexCache entrypoints. extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); @@ -145,10 +141,6 @@ void InitEntryPoints(EntryPoints* points) { points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; points->pCheckCastFromCode = art_quick_check_cast_from_code; - // Debug - points->pDebugMe = DebugMe; - points->pUpdateDebuggerFromCode = NULL; // Controlled by SetDebuggerUpdatesEnabled. - // DexCache points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; @@ -236,10 +228,6 @@ void InitEntryPoints(EntryPoints* points) { points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; }; -void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled) { - points->pUpdateDebuggerFromCode = (enabled ? art_quick_update_debugger : NULL); -} - uintptr_t GetInstrumentationExitPc() { return reinterpret_cast(art_quick_instrumentation_exit_from_code); } diff --git a/src/oat/runtime/arm/runtime_support_arm.S b/src/oat/runtime/arm/runtime_support_arm.S index fe7d69fc1e..96b39806cd 100644 --- a/src/oat/runtime/arm/runtime_support_arm.S +++ b/src/oat/runtime/arm/runtime_support_arm.S @@ -339,22 +339,6 @@ ENTRY art_quick_invoke_stub bx lr END art_quick_invoke_stub - /* - * On entry, r0 and r1 must be preserved, r2 is dex PC - */ - .extern artUpdateDebuggerFromCode -ENTRY art_quick_update_debugger - mov r3, r0 @ stash away r0 so that it's saved as if it were an argument - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - mov r0, r2 @ arg0 is dex PC - mov r1, rSELF @ arg1 is Thread* - mov r2, sp @ arg2 is sp - bl artUpdateDebuggerFromCode @ artUpdateDebuggerFromCode(int32_t, Thread*, Method**) - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - mov r0, r3 @ restore original r0 - bx lr -END art_quick_update_debugger - /* * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_ */ @@ -979,21 +963,21 @@ END art_quick_interpreter_entry .extern artInstrumentationMethodEntryFromCode .extern artInstrumentationMethodExitFromCode ENTRY art_quick_instrumentation_entry_from_code - mov r12, sp @ remember bottom of caller's frame - push {r0-r3} @ save arguments (4 words) - .save {r0-r3} + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + str r0, [sp, #4] @ preserve r0 + mov r12, sp @ remember sp + str lr, [sp, #-16]! @ expand the frame and pass LR + .pad #16 .cfi_adjust_cfa_offset 16 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r1, 4 - .cfi_rel_offset r2, 8 - .cfi_rel_offset r3, 12 - mov r1, r9 @ pass Thread::Current - mov r2, r12 @ pass SP - mov r3, lr @ pass LR - blx artInstrumentationMethodEntryFromCode @ (Method*, Thread*, SP, LR) - mov r12, r0 @ r12 holds reference to code - pop {r0-r3} @ restore arguments + .cfi_rel_offset lr, 0 + mov r2, r9 @ pass Thread::Current + mov r3, r12 @ pass SP + blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP, LR) + add sp, #16 @ remove out argument and padding from stack .cfi_adjust_cfa_offset -16 + mov r12, r0 @ r12 holds reference to code + ldr r0, [sp, #4] @ restore r0 + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME blx r12 @ call method with lr set to art_quick_instrumentation_exit_from_code END art_quick_instrumentation_entry_from_code .type art_quick_instrumentation_exit_from_code, #function @@ -1001,51 +985,44 @@ END art_quick_instrumentation_entry_from_code art_quick_instrumentation_exit_from_code: .cfi_startproc .fnstart + mov lr, #0 @ link register is to here, so clobber with 0 for later checks + SETUP_REF_ONLY_CALLEE_SAVE_FRAME mov r12, sp @ remember bottom of caller's frame push {r0-r1} @ save return value .save {r0-r1} .cfi_adjust_cfa_offset 8 .cfi_rel_offset r0, 0 .cfi_rel_offset r1, 4 - sub sp, #8 @ align stack + sub sp, #8 @ space for return value argument .pad #8 .cfi_adjust_cfa_offset 8 + strd r0, [sp] @ r0/r1 -> [sp] for fpr_res + mov r2, r0 @ pass return value as gpr_res + mov r3, r1 mov r0, r9 @ pass Thread::Current mov r1, r12 @ pass SP - blx artInstrumentationMethodExitFromCode @ (Thread*, SP) + blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res, fpr_res) add sp, #8 .cfi_adjust_cfa_offset -8 + mov r2, r0 @ link register saved by instrumentation mov lr, r1 @ r1 is holding link register if we're to bounce to deoptimize pop {r0, r1} @ restore return value - .cfi_adjust_cfa_offset -8 + add sp, #32 @ remove callee save frame + .cfi_adjust_cfa_offset -32 bx r2 @ return END art_quick_instrumentation_exit_from_code /* - * The thread's enter interpreter flag is set and so we should transition to the interpreter - * rather than allow execution to continue in the frame below. There may be live results in - * registers depending on how complete the operation is when we safepoint - for example, a - * set operation may have completed while a get operation needs writing back into the vregs. + * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization + * will long jump to the upcall with a special exception of -1. */ .extern artDeoptimize - .extern artEnterInterpreterFromDeoptimize ENTRY art_quick_deoptimize SETUP_REF_ONLY_CALLEE_SAVE_FRAME - mov r2, r9 @ Set up args. - mov r3, sp - blx artDeoptimize @ artDeoptimize(return value, Thread*, SP) - @ Returns caller method's frame size. - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ Was the caller an upcall? - bxeq lr @ Return if caller was upcall. - add r12, sp, r0 @ r12 == bottom of caller's frame. - ldr lr, [r12, #-4] @ Restore lr. - mov sp, r12 @ Remove frame. - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - blx artEnterInterpreterFromDeoptimize @ Enter interpreter, callee-save ends stack fragment. - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - bx lr @ Return to caller. + mov r0, r9 @ Set up args. + mov r1, sp + blx artDeoptimize @ artDeoptimize(Thread*, SP) END art_quick_deoptimize /* diff --git a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc index ea861e838e..599b14c4b3 100644 --- a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc +++ b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc @@ -33,10 +33,6 @@ extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, extern "C" void art_quick_can_put_array_element_from_code(void*, void*); extern "C" void art_quick_check_cast_from_code(void*, void*); -// Debug entrypoints. -extern void DebugMe(mirror::AbstractMethod* method, uint32_t info); -extern "C" void art_quick_update_debugger(void*, void*, int32_t, void*); - // DexCache entrypoints. extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); @@ -147,10 +143,6 @@ void InitEntryPoints(EntryPoints* points) { points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; points->pCheckCastFromCode = art_quick_check_cast_from_code; - // Debug - points->pDebugMe = DebugMe; - points->pUpdateDebuggerFromCode = NULL; // Controlled by SetDebuggerUpdatesEnabled. - // DexCache points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; @@ -237,10 +229,6 @@ void InitEntryPoints(EntryPoints* points) { points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; }; -void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled) { - points->pUpdateDebuggerFromCode = (enabled ? art_quick_update_debugger : NULL); -} - uintptr_t GetInstrumentationExitPc() { return reinterpret_cast(art_quick_instrumentation_exit_from_code); } diff --git a/src/oat/runtime/mips/runtime_support_mips.S b/src/oat/runtime/mips/runtime_support_mips.S index 0fc2437f1f..529fd0d7a6 100644 --- a/src/oat/runtime/mips/runtime_support_mips.S +++ b/src/oat/runtime/mips/runtime_support_mips.S @@ -203,24 +203,6 @@ DELIVER_PENDING_EXCEPTION .endm - /* - * On entry, $a0 and $a1 must be preserved, $a2 is dex PC - */ - .extern artUpdateDebuggerFromCode -ENTRY art_quick_update_debugger - GENERATE_GLOBAL_POINTER - move $a3, $a0 # stash away $a0 so that it's saved as if it were an argument - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - move $a0, $a2 # arg0 is dex PC - move $a1, rSELF # arg1 is Thread* - move $a2, $sp # arg2 is $sp - jal artUpdateDebuggerFromCode # artUpdateDebuggerFromCode(int32_t, Thread*, Method**) - nop - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - jr $ra - move $a0, $a3 # restore original $a0 -END art_quick_update_debugger - /* * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_ * FIXME: just guessing about the shape of the jmpbuf. Where will pc be? @@ -989,29 +971,22 @@ END art_quick_interpreter_entry .extern artInstrumentationMethodExitFromCode ENTRY art_quick_instrumentation_entry_from_code GENERATE_GLOBAL_POINTER + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME move $t0, $sp # remember bottom of caller's frame - addiu $sp, $sp, -16 # save arguments (4 words) - .cfi_adjust_cfa_offset 16 - sw $a0, 0($sp) - .cfi_rel_offset 4, 0 - sw $a1, 4($sp) - .cfi_rel_offset 5, 4 - sw $a2, 8($sp) - .cfi_rel_offset 6, 8 - sw $a3, 12($sp) - .cfi_rel_offset 7, 12 - move $a3, $ra # pass $ra - move $a2, $t0 # pass $sp - jal artInstrumentationMethodEntryFromCode # (Method*, Thread*, SP, LR) - move $a1, rSELF # pass Thread::Current + addiu $sp, $sp, -32 # space for args, pad (3 words), arguments (5 words) + .cfi_adjust_cfa_offset 32 + sw $a0, 28($sp) # save arg0 + sw $ra, 16($sp) # pass $ra + move $a3, $t0 # pass $sp + jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP, LR) + move $a2, rSELF # pass Thread::Current move $t9, $v0 # $t9 holds reference to code - lw $a0, 0($sp) - lw $a1, 4($sp) - lw $a2, 8($sp) - lw $a3, 12($sp) + lw $a0, 28($sp) # restore arg0 + addiu $sp, $sp, 32 # remove args + .cfi_adjust_cfa_offset -32 + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME jalr $t9 # call method - addiu $sp, $sp, 16 - .cfi_adjust_cfa_offset -16 + nop END art_quick_instrumentation_entry_from_code /* intentional fallthrough */ .global art_quick_instrumentation_exit_from_code @@ -1020,53 +995,46 @@ art_quick_instrumentation_exit_from_code: addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp GENERATE_GLOBAL_POINTER move $t0, $sp # remember bottom of caller's frame - addiu $sp, $sp, -16 # save return values - .cfi_adjust_cfa_offset 16 - sw $v0, 0($sp) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + addiu $sp, $sp, -48 # save return values and set up args + .cfi_adjust_cfa_offset 48 + sw $v0, 32($sp) .cfi_rel_offset 2, 0 - sw $v1, 4($sp) + sw $v1, 36($sp) .cfi_rel_offset 3, 4 + s.s $f0, 40($sp) + s.s $f1, 44($sp) + s.s $f0, 16($sp) # pass fpr result + s.s $f1, 20($sp) + move $a2, $v0 # pass gpr result + move $a3, $v1 move $a1, $t0 # pass $sp - jal artInstrumentationMethodExitFromCode # (Thread*, SP) + jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res) move $a0, rSELF # pass Thread::Current move $t0, $v0 # set aside returned link register move $ra, $v1 # set link register for deoptimization - lw $v0, 0($sp) - lw $v1, 4($sp) + lw $v0, 32($sp) # restore return values + lw $v1, 36($sp) + l.s $f0, 40($sp) + l.s $f1, 44($sp) jr $t0 # return - addiu $sp, $sp, 16 - .cfi_adjust_cfa_offset -16 + addiu $sp, $sp, 112 # 48 bytes of args + 64 bytes of callee save frame + .cfi_adjust_cfa_offset -112 END art_quick_instrumentation_exit_from_code /* - * The thread's enter interpreter flag is set and so we should transition to the interpreter - * rather than allow execution to continue in the frame below. There may be live results in - * registers depending on how complete the operation is when we safepoint - for example, a - * set operation may have completed while a get operation needs writing back into the vregs. + * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization + * will long jump to the upcall with a special exception of -1. */ .extern artDeoptimize .extern artEnterInterpreterFromDeoptimize ENTRY art_quick_deoptimize GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME - move $a0, $v0 # pass first half of return value - move $a1, $v1 # pass second half of return value - move $a2, rSELF # pass Thread::current - jal artDeoptimize # artDeoptimize(return value, Thread*, SP) + move $a0, rSELF # pass Thread::current + jal artDeoptimize # artDeoptimize(Thread*, SP) # Returns caller method's frame size. - move $a3, $sp # pass $sp - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - beqz $v0, 1f # Return if caller was upcall. - add $t9, $sp, $v0 # $t9 == bottom of caller's frame. - lw $ra, -4($t9) # Restore $ra. - move $sp, $t9 # Remove frame. - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - jal artEnterInterpreterFromDeoptimize # Enter interpreter, callee-save ends stack fragment. - nop - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME -1: - jr $ra # Return to caller. - nop + move $a1, $sp # pass $sp END art_quick_deoptimize /* diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h index ee59df495c..9cd9cc7e98 100644 --- a/src/oat/runtime/oat_support_entrypoints.h +++ b/src/oat/runtime/oat_support_entrypoints.h @@ -46,10 +46,6 @@ struct PACKED(4) EntryPoints { void (*pCanPutArrayElementFromCode)(void*, void*); void (*pCheckCastFromCode)(void*, void*); - // Debug - void (*pDebugMe)(mirror::AbstractMethod*, uint32_t); - void (*pUpdateDebuggerFromCode)(void*, void*, int32_t, void*); - // DexCache void* (*pInitializeStaticStorage)(uint32_t, void*); void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*); diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc index 0b1fb742ef..fe91e617bb 100644 --- a/src/oat/runtime/support_cast.cc +++ b/src/oat/runtime/support_cast.cc @@ -32,19 +32,16 @@ extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, } // Check whether it is safe to cast one class to the other, throw exception and return -1 on failure -extern "C" int artCheckCastFromCode(const mirror::Class* a, const mirror::Class* b, Thread* self, - mirror::AbstractMethod** sp) +extern "C" int artCheckCastFromCode(mirror::Class* src_type, mirror::Class* dest_type, + Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(a->IsClass()) << PrettyClass(a); - DCHECK(b->IsClass()) << PrettyClass(b); - if (LIKELY(b->IsAssignableFrom(a))) { + DCHECK(src_type->IsClass()) << PrettyClass(src_type); + DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); + if (LIKELY(dest_type->IsAssignableFrom(src_type))) { return 0; // Success } else { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - self->ThrowNewExceptionF("Ljava/lang/ClassCastException;", - "%s cannot be cast to %s", - PrettyDescriptor(a).c_str(), - PrettyDescriptor(b).c_str()); + ThrowClassCastException(dest_type, src_type); return -1; // Failure } } @@ -63,10 +60,7 @@ extern "C" int artCanPutArrayElementFromCode(const mirror::Object* element, return 0; // Success } else { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", - "%s cannot be stored in an array of type %s", - PrettyDescriptor(element_class).c_str(), - PrettyDescriptor(array_class).c_str()); + ThrowArrayStoreException(element_class, array_class); return -1; // Failure } } diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc deleted file mode 100644 index 0d67dd92c3..0000000000 --- a/src/oat/runtime/support_debug.cc +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "debugger.h" - -namespace art { - -/* - * Report location to debugger. Note: dex_pc is the current offset within - * the method. However, because the offset alone cannot distinguish between - * method entry and offset 0 within the method, we'll use an offset of -1 - * to denote method entry. - */ -extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - Dbg::UpdateDebugger(dex_pc, self); -} - -// Temporary debugging hook for compiler. -extern void DebugMe(mirror::AbstractMethod* method, uint32_t info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - LOG(INFO) << "DebugMe"; - if (method != NULL) { - LOG(INFO) << PrettyMethod(method); - } - LOG(INFO) << "Info: " << info; -} - -} // namespace art diff --git a/src/oat/runtime/support_deoptimize.cc b/src/oat/runtime/support_deoptimize.cc index 2cc5dd36d7..0b0a7c3809 100644 --- a/src/oat/runtime/support_deoptimize.cc +++ b/src/oat/runtime/support_deoptimize.cc @@ -28,88 +28,11 @@ namespace art { -extern "C" uint64_t artDeoptimize(JValue ret_val, Thread* self, mirror::AbstractMethod** sp) +extern "C" void artDeoptimize(Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - // Return value may hold Object* so avoid suspension. - const char* old_cause = self->StartAssertNoThreadSuspension("Deoptimizing stack frame"); - CHECK(old_cause == NULL); - class DeoptimizationVisitor : public StackVisitor { - public: - DeoptimizationVisitor(Thread* thread, Context* context) - : StackVisitor(thread, context), shadow_frame_(NULL), runtime_frames_(0) { } - - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* m = GetMethod(); - if (m->IsRuntimeMethod()) { - if (runtime_frames_ == 0) { - runtime_frames_++; - return true; // Skip the callee save frame. - } else { - return false; // Caller was an upcall. - } - } - MethodHelper mh(m); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - CHECK(code_item != NULL); - uint16_t num_regs = code_item->registers_size_; - shadow_frame_ = ShadowFrame::Create(num_regs, NULL, m, GetDexPc()); - std::vector kinds = DescribeVRegs(m->GetDexMethodIndex(), &mh.GetDexFile(), - mh.GetDexCache(), mh.GetClassLoader(), - mh.GetClassDefIndex(), code_item, m, - m->GetAccessFlags(), GetDexPc()); - for(uint16_t reg = 0; reg < num_regs; reg++) { - VRegKind kind = static_cast(kinds.at(reg * 2)); - switch (kind) { - case kUndefined: - shadow_frame_->SetVReg(reg, 0xEBADDE09); - break; - case kConstant: - shadow_frame_->SetVReg(reg, kinds.at((reg * 2) + 1)); - break; - default: - shadow_frame_->SetVReg(reg, GetVReg(m, reg, kind)); - break; - } - } - return false; // Stop now we have built the shadow frame. - } - - std::vector DescribeVRegs(uint32_t dex_method_idx, - const DexFile* dex_file, - mirror::DexCache* dex_cache, - mirror::ClassLoader* class_loader, - uint32_t class_def_idx, - const DexFile::CodeItem* code_item, - mirror::AbstractMethod* method, - uint32_t method_access_flags, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - verifier::MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def_idx, code_item, - dex_method_idx, method, method_access_flags, true); - verifier.Verify(); - return verifier.DescribeVRegs(dex_pc); - } - - ShadowFrame* shadow_frame_; - uint32_t runtime_frames_; - } visitor(self, self->GetLongJumpContext()); - visitor.WalkStack(false); - if (visitor.shadow_frame_ != NULL) { - self->SetDeoptimizationShadowFrame(visitor.shadow_frame_, ret_val); - return (*sp)->GetFrameSizeInBytes(); - } else { - return 0; // Caller was an upcall. - } -} - - -extern "C" JValue artEnterInterpreterFromDeoptimize(Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - JValue return_value; - UniquePtr shadow_frame(self->GetAndClearDeoptimizationShadowFrame(&return_value)); - self->EndAssertNoThreadSuspension(NULL); - return interpreter::EnterInterpreterFromDeoptimize(self, *shadow_frame.get(), return_value); + self->SetException(ThrowLocation(), reinterpret_cast(-1)); + self->QuickDeliverException(); } } // namespace art diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc index 43d5c9bcce..5821063cf6 100644 --- a/src/oat/runtime/support_field.cc +++ b/src/oat/runtime/support_field.cc @@ -86,7 +86,8 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(field, true); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); } else { return field->Get32(obj); } @@ -106,7 +107,8 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(field, true); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); } else { return field->Get64(obj); } @@ -127,7 +129,8 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror: field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(field, true); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); } else { return field->GetObj(obj); } @@ -204,7 +207,8 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(field, false); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); } else { field->Set32(obj, new_value); return 0; // success @@ -230,7 +234,8 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(field, false); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); } else { field->Set64(obj, new_value); return 0; // success @@ -255,7 +260,8 @@ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj sizeof(mirror::Object*)); if (LIKELY(field != NULL)) { if (UNLIKELY(obj == NULL)) { - ThrowNullPointerExceptionForFieldAccess(field, false); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); } else { field->SetObj(obj, new_value); return 0; // success diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc index 73f832aa35..a0b06fb521 100644 --- a/src/oat/runtime/support_fillarray.cc +++ b/src/oat/runtime/support_fillarray.cc @@ -15,6 +15,7 @@ */ #include "callee_save_frame.h" +#include "common_throws.h" #include "dex_instruction.h" #include "mirror/array.h" #include "mirror/object-inl.h" @@ -43,15 +44,15 @@ extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array, FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); if (UNLIKELY(array == NULL)) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", - "null array in FILL_ARRAY_DATA"); + ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); return -1; // Error } DCHECK(array->IsArrayInstance() && !array->IsObjectArray()); if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "failed FILL_ARRAY_DATA; length=%d, index=%d", - array->GetLength(), payload->element_count); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "failed FILL_ARRAY_DATA; length=%d, index=%d", + array->GetLength(), payload->element_count); return -1; // Error } uint32_t size_in_bytes = payload->element_count * payload->element_width; diff --git a/src/oat/runtime/support_instrumentation.cc b/src/oat/runtime/support_instrumentation.cc index 6598f196cd..8f56ce3a21 100644 --- a/src/oat/runtime/support_instrumentation.cc +++ b/src/oat/runtime/support_instrumentation.cc @@ -14,58 +14,51 @@ * limitations under the License. */ -#include "base/logging.h" +#include "callee_save_frame.h" #include "instrumentation.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" #include "runtime.h" #include "thread-inl.h" -#include "trace.h" namespace art { extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::AbstractMethod* method, + mirror::Object* this_object, Thread* self, mirror::AbstractMethod** sp, uintptr_t lr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - self->SetTopOfStack(sp, lr); - self->VerifyStack(); - Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - // +1 as frame id's start at 1, +1 as we haven't yet built this method's frame. - size_t frame_id = StackVisitor::ComputeNumFrames(self) + 2; - InstrumentationStackFrame instrumentation_frame(method, lr, frame_id); - self->PushInstrumentationStackFrame(instrumentation_frame); - - Trace* trace = instrumentation->GetTrace(); - if (trace != NULL) { - trace->LogMethodTraceEvent(self, method, Trace::kMethodTraceEnter); - } - - return instrumentation->GetSavedCodeFromMap(method); + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); + instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object, + method, lr); + const void* result = instrumentation->GetQuickCodeFor(method); + CHECK(result != NULL) << PrettyMethod(method); + return result; } -extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::AbstractMethod** sp) +extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::AbstractMethod** sp, + uint64_t gpr_result, uint64_t fpr_result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below. + // We use the hand inline version to ensure the return_pc is assigned before verifying the + // stack. + // Be aware the store below may well stomp on an incoming argument. + Locks::mutator_lock_->AssertSharedHeld(self); + mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); + *sp = callee_save; + uintptr_t* return_pc = reinterpret_cast(reinterpret_cast(sp) + + callee_save->GetReturnPcOffsetInBytes()); + CHECK(*return_pc == 0); self->SetTopOfStack(sp, 0); self->VerifyStack(); - // +1 as frame id's start at 1, +1 as we want the called frame not the frame being returned into. - size_t frame_id = StackVisitor::ComputeNumFrames(self) + 2; - InstrumentationStackFrame instrumentation_frame; - instrumentation_frame = self->PopInstrumentationStackFrame(); - if (frame_id != instrumentation_frame.frame_id_) { - LOG(ERROR) << "Expected frame_id=" << frame_id << " but found " << instrumentation_frame.frame_id_; - StackVisitor::DescribeStack(self); - } - Runtime* runtime = Runtime::Current(); - if (runtime->IsMethodTracingActive()) { - Trace* trace = runtime->GetInstrumentation()->GetTrace(); - trace->LogMethodTraceEvent(self, instrumentation_frame.method_, Trace::kMethodTraceExit); - } - if (self->ReadFlag(kEnterInterpreter)) { - return static_cast(GetDeoptimizationEntryPoint()) | - (static_cast(instrumentation_frame.return_pc_) << 32); - } else { - return instrumentation_frame.return_pc_; - } + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); + uint64_t return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(self, return_pc, + gpr_result, + fpr_result); + self->VerifyStack(); + return return_or_deoptimize_pc; } } // namespace art diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc index ee19d4e969..bc9cc45c04 100644 --- a/src/oat/runtime/support_jni.cc +++ b/src/oat/runtime/support_jni.cc @@ -33,7 +33,7 @@ extern void* FindNativeMethod(Thread* self) { DCHECK(Thread::Current() == self); ScopedObjectAccess soa(self); - mirror::AbstractMethod* method = self->GetCurrentMethod(); + mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); DCHECK(method != NULL); // Lookup symbol address for method, on failure we'll return NULL with an @@ -139,7 +139,7 @@ extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) // | unused | // | unused | // | unused | <- sp - mirror::AbstractMethod* jni_method = self->GetCurrentMethod(); + mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL); DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method); intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack // Fix up this/jclass argument diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc index 0cb3fe450f..0e00dfd48a 100644 --- a/src/oat/runtime/support_stubs.cc +++ b/src/oat/runtime/support_stubs.cc @@ -242,7 +242,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(mirror::AbstractMethod* cal // go into deliver exception with the pending exception in r0 CHECK(thread->IsExceptionPending()); code = reinterpret_cast(art_quick_deliver_exception_from_code); - regs[0] = reinterpret_cast(thread->GetException()); + regs[0] = reinterpret_cast(thread->GetException(NULL)); thread->ClearException(); } else { // Expect class to at least be initializing. @@ -350,13 +350,14 @@ const void* UnresolvedDirectMethodTrampolineFromCode(mirror::AbstractMethod* cal #if !defined(ART_USE_PORTABLE_COMPILER) // Called by the AbstractMethodError. Called by stub code. -extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, +extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); - thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", - "abstract method \"%s\"", PrettyMethod(method).c_str()); - thread->QuickDeliverException(); + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;", + "abstract method \"%s\"", PrettyMethod(method).c_str()); + self->QuickDeliverException(); } #else // ART_USE_PORTABLE_COMPILER extern void ThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* thread, diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc index 80ba118d96..b8c68a565c 100644 --- a/src/oat/runtime/support_throw.cc +++ b/src/oat/runtime/support_throw.cc @@ -23,14 +23,6 @@ namespace art { -// Used to implement MOVE_EXCEPTION. -extern "C" void* GetAndClearException(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(self->IsExceptionPending()); - mirror::Throwable* exception = self->GetException(); - self->ClearException(); - return exception; -} - // Deliver an exception that's pending on thread helping set up a callee save frame on the way. extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -39,7 +31,7 @@ extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::Abstr } // Called by generated call to throw an exception. -extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* thread, +extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* @@ -49,9 +41,15 @@ extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread * and threw a NPE if NULL. This routine responsible for setting * exception_ in thread and delivering the exception. */ - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); - thread->DeliverException(exception); - thread->QuickDeliverException(); + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + if (exception == NULL) { + self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;", + "throw with null exception"); + } else { + self->SetException(throw_location, exception); + } + self->QuickDeliverException(); } // Called by generated call to throw a NPE exception. @@ -59,29 +57,27 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - uint32_t dex_pc; - mirror::AbstractMethod* throw_method = self->GetCurrentMethod(&dex_pc); - ThrowNullPointerExceptionFromDexPC(throw_method, dex_pc); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionFromDexPC(throw_location); self->QuickDeliverException(); } // Called by generated call to throw an arithmetic divide by zero exception. -extern "C" void artThrowDivZeroFromCode(Thread* thread, +extern "C" void artThrowDivZeroFromCode(Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); - thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); - thread->QuickDeliverException(); + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowArithmeticExceptionDivideByZero(self); + self->QuickDeliverException(); } // Called by generated call to throw an array index out of bounds exception. -extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, +extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); - thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", - "length=%d; index=%d", limit, index); - thread->QuickDeliverException(); + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowArrayIndexOutOfBoundsException(index, length); + self->QuickDeliverException(); } extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::AbstractMethod** sp) @@ -95,8 +91,7 @@ extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, mirror::AbstractMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - mirror::AbstractMethod* method = self->GetCurrentMethod(); - ThrowNoSuchMethodError(method_idx, method); + ThrowNoSuchMethodError(method_idx); self->QuickDeliverException(); } diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc index a7c518a905..708e04e23b 100644 --- a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc +++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc @@ -33,10 +33,6 @@ extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass extern "C" void art_quick_can_put_array_element_from_code(void*, void*); extern "C" void art_quick_check_cast_from_code(void*, void*); -// Debug entrypoints. -extern void DebugMe(mirror::AbstractMethod* method, uint32_t info); -extern "C" void art_quick_update_debugger(void*, void*, int32_t, void*); - // DexCache entrypoints. extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); @@ -130,10 +126,6 @@ void InitEntryPoints(EntryPoints* points) { points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; points->pCheckCastFromCode = art_quick_check_cast_from_code; - // Debug - points->pDebugMe = DebugMe; - points->pUpdateDebuggerFromCode = NULL; // Controlled by SetDebuggerUpdatesEnabled. - // DexCache points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; @@ -220,10 +212,6 @@ void InitEntryPoints(EntryPoints* points) { points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; }; -void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled) { - points->pUpdateDebuggerFromCode = (enabled ? art_quick_update_debugger : NULL); -} - uintptr_t GetInstrumentationExitPc() { return reinterpret_cast(art_quick_instrumentation_exit_from_code); } diff --git a/src/oat/runtime/x86/runtime_support_x86.S b/src/oat/runtime/x86/runtime_support_x86.S index 4b4689fdcb..4900b84830 100644 --- a/src/oat/runtime/x86/runtime_support_x86.S +++ b/src/oat/runtime/x86/runtime_support_x86.S @@ -512,26 +512,6 @@ TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeSta TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO - /* - * On entry, eax and ecx must be preserved, edx is dex PC - */ -DEFINE_FUNCTION art_quick_update_debugger - mov %eax, %ebx // stash away eax so that it's saved as if it were an argument - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - subl LITERAL(4), %esp // alignment padding - .cfi_adjust_cfa_offset 4 - PUSH esp // pass arg2 (sp) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass arg0 (dex pc) - call SYMBOL(artUpdateDebuggerFromCode) // artUpdateDebuggerFromCode(int32_t, Thread*, Method**) - addl LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - mov %ebx, %eax // restore original eax - ret -END_FUNCTION art_quick_update_debugger - ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO @@ -879,6 +859,7 @@ DEFINE_FUNCTION art_quick_set_obj_static_from_code mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 PUSH ebx // pass SP pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() .cfi_adjust_cfa_offset 4 @@ -972,7 +953,7 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler END_FUNCTION art_quick_proxy_invoke_handler DEFINE_FUNCTION art_quick_interpreter_entry - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame mov %esp, %edx // remember SP PUSH eax // alignment padding PUSH edx // pass SP @@ -992,71 +973,84 @@ END_FUNCTION art_quick_interpreter_entry * Routine that intercepts method calls and returns. */ DEFINE_FUNCTION art_quick_instrumentation_entry_from_code - xchgl %eax, (%esp) // place LR in eax, save eax - PUSH ecx // save ecx - PUSH edx // save edx - PUSH ebx // save ebx - lea 16(%esp), %edx // remember bottom of caller's frame - PUSH eax // pass LR - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + movl %esp, %edx // Save SP. + PUSH eax // Save eax which will be clobbered by the callee-save method. + subl LITERAL(8), %esp // Align stack. + .cfi_adjust_cfa_offset 8 + pushl 40(%esp) // Pass LR. .cfi_adjust_cfa_offset 4 - pushl 24(%esp) // pass Method* + PUSH edx // Pass SP. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). .cfi_adjust_cfa_offset 4 - call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Thread*, SP, LR) - addl LITERAL(16), %esp // pop arguments - POP ebx // restore ebx - POP edx // restore edx - movl (%esp), %ecx // restore ecx (without popping) - movl %eax, (%esp) // place method's code pointer on stack - movl 4(%esp), %eax // restore eax (without popping) - movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 4(%esp) - // place instrumentation exit as return pc - ret // call method (and pop) + PUSH ecx // Pass receiver. + PUSH eax // Pass Method*. + call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR) + addl LITERAL(28), %esp // Pop arguments upto saved Method*. + movl 28(%esp), %edi // Restore edi. + movl %eax, 28(%esp) // Place code* over edi, just under return pc. + movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp) + // Place instrumentation exit as return pc. + movl (%esp), %eax // Restore eax. + movl 8(%esp), %ecx // Restore ecx. + movl 12(%esp), %edx // Restore edx. + movl 16(%esp), %ebx // Restore ebx. + movl 20(%esp), %ebp // Restore ebp. + movl 24(%esp), %esi // Restore esi. + addl LITERAL(28), %esp // Wind stack back upto code*. + ret // Call method (and pop). END_FUNCTION art_quick_instrumentation_entry_from_code + DEFINE_FUNCTION art_quick_instrumentation_exit_from_code - mov %esp, %ecx // remember bottom of caller's frame - PUSH edx // save return value - PUSH eax // save other half of return value - PUSH ecx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current + pushl LITERAL(0) // Push a fake return PC as there will be none on the stack. + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + mov %esp, %ecx // Remember SP + subl LITERAL(8), %esp // Save float return value. + .cfi_adjust_cfa_offset 8 + movd %xmm0, (%esp) + PUSH edx // Save gpr return value. + PUSH eax + subl LITERAL(8), %esp // Align stack + movd %xmm0, (%esp) + subl LITERAL(8), %esp // Pass float return value. + .cfi_adjust_cfa_offset 8 + movd %xmm0, (%esp) + PUSH edx // Pass gpr return value. + PUSH eax + PUSH ecx // Pass SP. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current. .cfi_adjust_cfa_offset 4 - call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP) - mov %eax, %ecx // move returned link register - // TODO: Set link register for deopt - addl LITERAL(8), %esp // pop arguments + call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result) + mov %eax, %ecx // Move returned link register. + addl LITERAL(32), %esp // Pop arguments. + .cfi_adjust_cfa_offset -32 + movl %edx, %ebx // Move returned link register for deopt + // (ebx is pretending to be our LR). + POP eax // Restore gpr return value. + POP edx + movd (%esp), %xmm0 // Restore fpr return value. + addl LITERAL(8), %esp .cfi_adjust_cfa_offset -8 - POP eax // restore return value - POP edx // restore other half of return value - jmp *%ecx // return + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + addl LITERAL(4), %esp // Remove fake return pc. + jmp *%ecx // Return. END_FUNCTION art_quick_instrumentation_exit_from_code /* - * The thread's enter interpreter flag is set and so we should transition to the interpreter - * rather than allow execution to continue in the frame below. There may be live results in - * registers depending on how complete the operation is when we safepoint - for example, a - * set operation may have completed while a get operation needs writing back into the vregs. + * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization + * will long jump to the upcall with a special exception of -1. */ DEFINE_FUNCTION art_quick_deoptimize + pushl %ebx // Fake that we were called. SETUP_REF_ONLY_CALLEE_SAVE_FRAME - PUSH esp // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + mov %esp, %ecx // Remember SP. + subl LITERAL(8), %esp // Align stack. + .cfi_adjust_cfa_offset 8 + PUSH ecx // Pass SP. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). .cfi_adjust_cfa_offset 4 - PUSH edx // push half of return value - PUSH eax // push other half of return value - call SYMBOL(artDeoptimize) // artDeoptimize(return value, Thread*, SP) - // Returns caller method's frame size. - addl LITERAL(16), %esp // pop arguments - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - testl %eax, %eax // Was the caller an upcall? - jz 1f // Return if caller was upcall. - lea (%esp, %eax), %edx // edx == bottom of caller's frame. - mov %edx, %esp // Remove frame. - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - call SYMBOL(artEnterInterpreterFromDeoptimize) // Enter interpreter, callee-save ends stack fragment. - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME -1: - ret // Return to caller. + call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP) + int3 // Unreachable. END_FUNCTION art_quick_deoptimize /* diff --git a/src/reflection.cc b/src/reflection.cc index 73a8a53324..467575cdf5 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -17,6 +17,7 @@ #include "reflection.h" #include "class_linker.h" +#include "common_throws.h" #include "dex_file-inl.h" #include "invoke_arg_array_builder.h" #include "jni_internal.h" @@ -64,9 +65,9 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject uint32_t classes_size = classes == NULL ? 0 : classes->Size(); uint32_t arg_count = (objects != NULL) ? objects->GetLength() : 0; if (arg_count != classes_size) { - soa.Self()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "wrong number of arguments; expected %d, got %d", - classes_size, arg_count); + ThrowIllegalArgumentException(NULL, + StringPrintf("Wrong number of arguments; expected %d, got %d", + classes_size, arg_count).c_str()); return NULL; } @@ -103,23 +104,23 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject } bool VerifyObjectInClass(mirror::Object* o, mirror::Class* c) { - const char* exception = NULL; if (o == NULL) { - exception = "Ljava/lang/NullPointerException;"; + ThrowNullPointerException(NULL, "null receiver"); + return false; } else if (!o->InstanceOf(c)) { - exception = "Ljava/lang/IllegalArgumentException;"; - } - if (exception != NULL) { std::string expected_class_name(PrettyDescriptor(c)); std::string actual_class_name(PrettyTypeOf(o)); - Thread::Current()->ThrowNewExceptionF(exception, "expected receiver of type %s, but got %s", - expected_class_name.c_str(), actual_class_name.c_str()); + ThrowIllegalArgumentException(NULL, + StringPrintf("Expected receiver of type %s, but got %s", + expected_class_name.c_str(), + actual_class_name.c_str()).c_str()); return false; } return true; } -bool ConvertPrimitiveValue(Primitive::Type srcType, Primitive::Type dstType, +bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result, + Primitive::Type srcType, Primitive::Type dstType, const JValue& src, JValue& dst) { CHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot); switch (dstType) { @@ -196,10 +197,18 @@ bool ConvertPrimitiveValue(Primitive::Type srcType, Primitive::Type dstType, default: break; } - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "invalid primitive conversion from %s to %s", - PrettyDescriptor(srcType).c_str(), - PrettyDescriptor(dstType).c_str()); + if (!unbox_for_result) { + ThrowIllegalArgumentException(throw_location, + StringPrintf("Invalid primitive conversion from %s to %s", + PrettyDescriptor(srcType).c_str(), + PrettyDescriptor(dstType).c_str()).c_str()); + } else { + ThrowClassCastException(throw_location, + StringPrintf("Couldn't convert result of type %s to %s", + PrettyDescriptor(srcType).c_str(), + PrettyDescriptor(dstType).c_str() + ).c_str()); + } return false; } @@ -271,32 +280,48 @@ static std::string UnboxingFailureKind(mirror::AbstractMethod* m, int index, mir return "result"; } -static bool UnboxPrimitive(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, +static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* o, + mirror::Class* dst_class, JValue& unboxed_value, mirror::AbstractMethod* m, int index, mirror::Field* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool unbox_for_result = (f == NULL) && (index == -1); if (!dst_class->IsPrimitive()) { - if (o != NULL && !o->InstanceOf(dst_class)) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "%s has type %s, got %s", - UnboxingFailureKind(m, index, f).c_str(), - PrettyDescriptor(dst_class).c_str(), - PrettyTypeOf(o).c_str()); + if (UNLIKELY(o != NULL && !o->InstanceOf(dst_class))) { + if (!unbox_for_result) { + ThrowIllegalArgumentException(throw_location, + StringPrintf("%s has type %s, got %s", + UnboxingFailureKind(m, index, f).c_str(), + PrettyDescriptor(dst_class).c_str(), + PrettyTypeOf(o).c_str()).c_str()); + } else { + ThrowClassCastException(throw_location, + StringPrintf("Couldn't convert result of type %s to %s", + PrettyTypeOf(o).c_str(), + PrettyDescriptor(dst_class).c_str() + ).c_str()); + } return false; } unboxed_value.SetL(o); return true; - } else if (dst_class->GetPrimitiveType() == Primitive::kPrimVoid) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "can't unbox %s to void", - UnboxingFailureKind(m, index, f).c_str()); + } + if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) { + ThrowIllegalArgumentException(throw_location, + StringPrintf("Can't unbox %s to void", + UnboxingFailureKind(m, index, f).c_str()).c_str()); return false; } - - if (o == NULL) { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "%s has type %s, got null", - UnboxingFailureKind(m, index, f).c_str(), - PrettyDescriptor(dst_class).c_str()); + if (UNLIKELY(o == NULL)) { + if (!unbox_for_result) { + ThrowIllegalArgumentException(throw_location, + StringPrintf("%s has type %s, got null", + UnboxingFailureKind(m, index, f).c_str(), + PrettyDescriptor(dst_class).c_str()).c_str()); + } else { + ThrowNullPointerException(throw_location, + StringPrintf("Expected to unbox a '%s' primitive type but was returned null", + PrettyDescriptor(dst_class).c_str()).c_str()); + } return false; } @@ -330,32 +355,35 @@ static bool UnboxPrimitive(mirror::Object* o, mirror::Class* dst_class, JValue& src_class = class_linker->FindPrimitiveClass('S'); boxed_value.SetS(primitive_field->GetShort(o)); } else { - Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", - "%s has type %s, got %s", - UnboxingFailureKind(m, index, f).c_str(), - PrettyDescriptor(dst_class).c_str(), - PrettyDescriptor(src_descriptor.c_str()).c_str()); + ThrowIllegalArgumentException(throw_location, + StringPrintf("%s has type %s, got %s", + UnboxingFailureKind(m, index, f).c_str(), + PrettyDescriptor(dst_class).c_str(), + PrettyDescriptor(src_descriptor.c_str()).c_str() + ).c_str()); return false; } - return ConvertPrimitiveValue(src_class->GetPrimitiveType(), dst_class->GetPrimitiveType(), + return ConvertPrimitiveValue(throw_location, unbox_for_result, + src_class->GetPrimitiveType(), dst_class->GetPrimitiveType(), boxed_value, unboxed_value); } bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, mirror::AbstractMethod* m, size_t index) { CHECK(m != NULL); - return UnboxPrimitive(o, dst_class, unboxed_value, m, index, NULL); + return UnboxPrimitive(NULL, o, dst_class, unboxed_value, m, index, NULL); } bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, mirror::Field* f) { CHECK(f != NULL); - return UnboxPrimitive(o, dst_class, unboxed_value, NULL, -1, f); + return UnboxPrimitive(NULL, o, dst_class, unboxed_value, NULL, -1, f); } -bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value) { - return UnboxPrimitive(o, dst_class, unboxed_value, NULL, -1, NULL); +bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o, + mirror::Class* dst_class, JValue& unboxed_value) { + return UnboxPrimitive(&throw_location, o, dst_class, unboxed_value, NULL, -1, NULL); } } // namespace art diff --git a/src/reflection.h b/src/reflection.h index 8f3224380c..e9f4e0893e 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -29,6 +29,7 @@ class Object; } // namespace mirror union JValue; class ScopedObjectAccess; +class ThrowLocation; mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -38,11 +39,13 @@ bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JVal bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value, mirror::Field* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value) +bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o, + mirror::Class* dst_class, JValue& unboxed_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, - JValue& dst) +bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result, + Primitive::Type src_class, Primitive::Type dst_class, + const JValue& src, JValue& dst) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver, jobject args) diff --git a/src/runtime.cc b/src/runtime.cc index 3e9cd8eb53..23a7309bd8 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -43,6 +43,7 @@ #include "jni_internal.h" #include "mirror/abstract_method-inl.h" #include "mirror/array.h" +#include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/field.h" #include "mirror/field-inl.h" @@ -97,7 +98,7 @@ Runtime::Runtime() stats_enabled_(false), method_trace_(0), method_trace_file_size_(0), - instrumentation_(NULL), + instrumentation_(), use_compile_time_class_path_(false), main_thread_group_(NULL), system_thread_group_(NULL) { @@ -119,11 +120,7 @@ Runtime::~Runtime() { } shutting_down_ = true; } - - if (IsMethodTracingActive()) { - Trace::Shutdown(); - } - delete instrumentation_; + Trace::Shutdown(); // Make sure to let the GC complete if it is running. heap_->WaitForConcurrentGcToComplete(self); @@ -171,8 +168,11 @@ struct AbortState { os << "Aborting thread:\n"; self->Dump(os); if (self->IsExceptionPending()) { - os << "Pending " << PrettyTypeOf(self->GetException()) << " on thread:\n" - << self->GetException()->Dump(); + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); + os << "Pending exception " << PrettyTypeOf(exception) + << " thrown by '" << throw_location.Dump() << "\n" + << exception->Dump(); } } DumpAllThreads(os, self); @@ -652,9 +652,9 @@ bool Runtime::Start() { // Pre-allocate an OutOfMemoryError for the double-OOME case. Thread* self = Thread::Current(); - self->ThrowNewException("Ljava/lang/OutOfMemoryError;", + self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;", "OutOfMemoryError thrown while trying to throw OutOfMemoryError; no stack available"); - pre_allocated_OutOfMemoryError_ = self->GetException(); + pre_allocated_OutOfMemoryError_ = self->GetException(NULL); self->ClearException(); // Restore main thread state to kNative as expected by native code. @@ -794,7 +794,6 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) { is_compiler_ = options->is_compiler_; is_zygote_ = options->is_zygote_; - interpreter_only_ = options->interpreter_only_; is_concurrent_gc_enabled_ = options->is_concurrent_gc_enabled_; vfprintf_ = options->hook_vfprintf_; @@ -809,6 +808,10 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) { intern_table_ = new InternTable; + if (options->interpreter_only_) { + GetInstrumentation()->ForceInterpretOnly(); + } + heap_ = new Heap(options->heap_initial_size_, options->heap_growth_limit_, options->heap_min_free_, @@ -1196,28 +1199,6 @@ void Runtime::SetCalleeSaveMethod(mirror::AbstractMethod* method, CalleeSaveType callee_save_methods_[type] = method; } -void Runtime::EnableMethodTracing(Trace* trace) { - CHECK(!IsMethodTracingActive()); - if (instrumentation_ == NULL) { - instrumentation_ = new Instrumentation(); - } - instrumentation_->SetTrace(trace); -} - -void Runtime::DisableMethodTracing() { - CHECK(IsMethodTracingActive()); - instrumentation_->RemoveTrace(); -} - -bool Runtime::IsMethodTracingActive() const { - return instrumentation_ != NULL && instrumentation_->GetTrace() != NULL; -} - -Instrumentation* Runtime::GetInstrumentation() const { - CHECK(IsMethodTracingActive()); - return instrumentation_; -} - const std::vector& Runtime::GetCompileTimeClassPath(jobject class_loader) { if (class_loader == NULL) { return GetClassLinker()->GetBootClassPath(); diff --git a/src/runtime.h b/src/runtime.h index f8788ad934..67c8e3662c 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -30,6 +30,7 @@ #include "globals.h" #include "heap.h" #include "instruction_set.h" +#include "instrumentation.h" #include "jobject_comparator.h" #include "locks.h" #include "root_visitor.h" @@ -49,7 +50,6 @@ class Throwable; class ClassLinker; class DexFile; class Heap; -class Instrumentation; class InternTable; struct JavaVMExt; class MonitorList; @@ -112,10 +112,6 @@ class Runtime { return is_zygote_; } - bool InterpreterOnly() const { - return interpreter_only_; - } - bool IsConcurrentGcEnabled() const { return is_concurrent_gc_enabled_; } @@ -234,8 +230,7 @@ class Runtime { void DirtyRoots(); // Visit all the roots. - void VisitRoots(RootVisitor* visitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Visit all of the roots we can do safely do concurrently. void VisitConcurrentRoots(RootVisitor* visitor, void* arg); @@ -350,10 +345,9 @@ class Runtime { bool InitZygote(); void DidForkFromZygote(); - void EnableMethodTracing(Trace* trace); - void DisableMethodTracing(); - bool IsMethodTracingActive() const; - Instrumentation* GetInstrumentation() const; + instrumentation::Instrumentation* GetInstrumentation() { + return &instrumentation_; + } bool UseCompileTimeClassPath() const { return use_compile_time_class_path_; @@ -383,7 +377,6 @@ class Runtime { bool is_compiler_; bool is_zygote_; - bool interpreter_only_; bool is_concurrent_gc_enabled_; // The host prefix is used during cross compilation. It is removed @@ -466,7 +459,7 @@ class Runtime { bool method_trace_; std::string method_trace_file_; size_t method_trace_file_size_; - Instrumentation* instrumentation_; + instrumentation::Instrumentation instrumentation_; typedef SafeMap, JobjectComparator> CompileTimeClassPaths; CompileTimeClassPaths compile_time_class_paths_; diff --git a/src/runtime_support.cc b/src/runtime_support.cc index 5b2c58c55c..b601f8c5d9 100644 --- a/src/runtime_support.cc +++ b/src/runtime_support.cc @@ -101,16 +101,16 @@ int32_t art_f2i(float f) { namespace art { // Helper function to allocate array for FILLED_NEW_ARRAY. -mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, +mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, int32_t component_count, Thread* self, bool access_check) { if (UNLIKELY(component_count < 0)) { - self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); + ThrowNegativeArraySizeException(component_count); return NULL; // Failure } - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); if (klass == NULL) { // Error DCHECK(self->IsExceptionPending()); return NULL; // Failure @@ -118,20 +118,21 @@ mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMet } if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { - self->ThrowNewExceptionF("Ljava/lang/RuntimeException;", - "Bad filled array request for type %s", - PrettyDescriptor(klass).c_str()); + ThrowRuntimeException("Bad filled array request for type %s", + PrettyDescriptor(klass).c_str()); } else { - self->ThrowNewExceptionF("Ljava/lang/InternalError;", + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", "Found type %s; filled-new-array not implemented for anything but \'int\'", PrettyDescriptor(klass).c_str()); } return NULL; // Failure } else { if (access_check) { - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); + mirror::Class* referrer_klass = referrer->GetDeclaringClass(); + if (UNLIKELY(!referrer_klass->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer_klass, klass); return NULL; // Failure } } @@ -194,7 +195,9 @@ mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMetho FieldHelper fh(resolved_field); if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || fh.FieldSize() != expected_size)) { - self->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;", + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", "Attempted read of %zd-bit %s on field '%s'", expected_size * (32 / sizeof(int32_t)), is_primitive ? "primitive" : "non-primitive", @@ -232,7 +235,9 @@ mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* } else if (UNLIKELY(this_object == NULL && type != kStatic)) { // Maintain interpreter-like semantics where NullPointerException is thrown // after potential NoSuchMethodError from class linker. - ThrowNullPointerExceptionForMethodAccess(referrer, method_idx, type); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(referrer == throw_location.GetMethod()); + ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); return NULL; // Failure. } else { if (!access_check) { @@ -320,7 +325,7 @@ mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* // Behavior to agree with that of the verifier. MethodHelper mh(resolved_method); ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), - mh.GetSignature(), referrer); + mh.GetSignature()); return NULL; // Failure. } } @@ -363,10 +368,12 @@ mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, const mirror::AbstractM void ThrowStackOverflowError(Thread* self) { CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; - // Remove extra entry pushed onto second stack during method tracing. - if (Runtime::Current()->IsMethodTracingActive()) { - InstrumentationMethodUnwindFromCode(self); + + if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { + // Remove extra entry pushed onto second stack during method tracing. + Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); } + self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. JNIEnvExt* env = self->GetJniEnv(); std::string msg("stack size "); @@ -430,33 +437,36 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char invocation_args); // Unbox result and handle error conditions. - if (!soa.Self()->IsExceptionPending()) { - if (shorty[0] == 'V' || result == NULL) { + if (LIKELY(!soa.Self()->IsExceptionPending())) { + if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { // Do nothing. return zero; } else { - JValue result_unboxed; - MethodHelper mh(soa.Decode(interface_method_jobj)); - mirror::Class* result_type = mh.GetReturnType(); mirror::Object* result_ref = soa.Decode(result); - bool unboxed_okay = UnboxPrimitiveForResult(result_ref, result_type, result_unboxed); - if (!unboxed_okay) { - // UnboxPrimitiveForResult creates an IllegalArgumentException. Discard and create a - // meaningful ClassCastException. + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); + mirror::AbstractMethod* proxy_method; + if (interface_method->GetDeclaringClass()->IsInterface()) { + proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + } else { + // Proxy dispatch to a method defined in Object. + DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); + proxy_method = interface_method; + } + ThrowLocation throw_location(rcvr, proxy_method, -1); + JValue result_unboxed; + if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { DCHECK(soa.Self()->IsExceptionPending()); - soa.Self()->ClearException(); - soa.Self()->ThrowNewException("Ljava/lang/ClassCastException;", - StringPrintf("Couldn't convert result of type %s to %s", - PrettyTypeOf(result_ref).c_str(), - PrettyDescriptor(result_type).c_str() - ).c_str()); + return zero; } return result_unboxed; } } else { // In the case of checked exceptions that aren't declared, the exception must be wrapped by // a UndeclaredThrowableException. - mirror::Throwable* exception = soa.Self()->GetException(); + mirror::Throwable* exception = soa.Self()->GetException(NULL); if (exception->IsCheckedException()) { mirror::Object* rcvr = soa.Decode(rcvr_jobj); mirror::SynthesizedProxyClass* proxy_class = @@ -482,7 +492,9 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char declares_exception = declared_exception->IsAssignableFrom(exception_class); } if (!declares_exception) { - soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;", + ThrowLocation throw_location(rcvr, proxy_method, -1); + soa.Self()->ThrowNewWrappedException(throw_location, + "Ljava/lang/reflect/UndeclaredThrowableException;", NULL); } } diff --git a/src/runtime_support.h b/src/runtime_support.h index 89026c1bf6..c7eb9576df 100644 --- a/src/runtime_support.h +++ b/src/runtime_support.h @@ -68,7 +68,8 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::Abs } if (access_check) { if (UNLIKELY(!klass->IsInstantiable())) { - self->ThrowNewException("Ljava/lang/InstantiationError;", + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", PrettyDescriptor(klass).c_str()); return NULL; // Failure } @@ -95,7 +96,7 @@ static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::Abstr Thread* self, bool access_check) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(component_count < 0)) { - self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); + ThrowNegativeArraySizeException(component_count); return NULL; // Failure } mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); @@ -257,8 +258,9 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) UNLOCK_FUNCTION(monitor_lock_) { // Save any pending exception over monitor exit call. mirror::Throwable* saved_exception = NULL; + ThrowLocation saved_throw_location; if (UNLIKELY(self->IsExceptionPending())) { - saved_exception = self->GetException(); + saved_exception = self->GetException(&saved_throw_location); self->ClearException(); } // Decode locked object and unlock, before popping local references. @@ -267,11 +269,11 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" << saved_exception->Dump() << "\nEncountered second exception during implicit MonitorExit:\n" - << self->GetException()->Dump(); + << self->GetException(NULL)->Dump(); } // Restore pending exception. if (saved_exception != NULL) { - self->SetException(saved_exception); + self->SetException(saved_throw_location, saved_exception); } } @@ -280,14 +282,12 @@ static inline void CheckReferenceResult(mirror::Object* o, Thread* self) if (o == NULL) { return; } + mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); if (o == kInvalidIndirectRefObject) { - JniAbortF(NULL, "invalid reference returned from %s", - PrettyMethod(self->GetCurrentMethod()).c_str()); + JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); } // Make sure that the result is an instance of the type this method was expected to return. - mirror::AbstractMethod* m = self->GetCurrentMethod(); - MethodHelper mh(m); - mirror::Class* return_type = mh.GetReturnType(); + mirror::Class* return_type = MethodHelper(m).GetReturnType(); if (!o->InstanceOf(return_type)) { JniAbortF(NULL, "attempt to return an instance of %s from %s", diff --git a/src/stack.cc b/src/stack.cc index 66051f2287..8690a36387 100644 --- a/src/stack.cc +++ b/src/stack.cc @@ -24,9 +24,28 @@ #include "mirror/object_array-inl.h" #include "object_utils.h" #include "thread_list.h" +#include "throw_location.h" namespace art { +mirror::Object* ShadowFrame::GetThisObject() const { + mirror::AbstractMethod* m = GetMethod(); + if (m->IsStatic()) { + return NULL; + } else if (m->IsNative()) { + return GetVRegReference(0); + } else { + const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem(); + CHECK(code_item != NULL) << PrettyMethod(m); + uint16_t reg = code_item->registers_size_ - code_item->ins_size_; + return GetVRegReference(reg); + } +} + +ThrowLocation ShadowFrame::GetCurrentLocationForThrow() const { + return ThrowLocation(GetThisObject(), GetMethod(), GetDexPC()); +} + size_t ManagedStack::NumJniShadowFrameReferences() const { size_t count = 0; for (const ManagedStack* current_fragment = this; current_fragment != NULL; @@ -59,7 +78,7 @@ StackVisitor::StackVisitor(Thread* thread, Context* context) : thread_(thread), cur_shadow_frame_(NULL), cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0), context_(context) { - DCHECK(thread == Thread::Current() || thread->IsSuspended()); + DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread; } uint32_t StackVisitor::GetDexPc() const { @@ -72,6 +91,33 @@ uint32_t StackVisitor::GetDexPc() const { } } +mirror::Object* StackVisitor::GetThisObject() const { + mirror::AbstractMethod* m = GetMethod(); + if (m->IsStatic()) { + return NULL; + } else if (m->IsNative()) { + if (cur_quick_frame_ != NULL) { + StackIndirectReferenceTable* sirt = + reinterpret_cast( + reinterpret_cast(cur_quick_frame_) + + m->GetSirtOffsetInBytes()); + return sirt->GetReference(0); + } else { + return cur_shadow_frame_->GetVRegReference(0); + } + } else { + const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem(); + if (code_item == NULL) { + UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method" + << PrettyMethod(m); + return NULL; + } else { + uint16_t reg = code_item->registers_size_ - code_item->ins_size_; + return reinterpret_cast(GetVReg(m, reg, kReferenceVReg)); + } + } +} + size_t StackVisitor::GetNativePcOffset() const { DCHECK(!IsShadowFrame()); return GetMethod()->NativePcOffset(cur_quick_frame_pc_); @@ -198,7 +244,7 @@ std::string StackVisitor::DescribeLocation() const { return result; } -InstrumentationStackFrame StackVisitor::GetInstrumentationStackFrame(uint32_t depth) const { +instrumentation::InstrumentationStackFrame StackVisitor::GetInstrumentationStackFrame(uint32_t depth) const { return thread_->GetInstrumentationStack()->at(depth); } @@ -221,9 +267,8 @@ void StackVisitor::SanityCheckFrame() const { void StackVisitor::WalkStack(bool include_transitions) { DCHECK(thread_ == Thread::Current() || thread_->IsSuspended()); - const std::deque* instrumentation_stack = - thread_->GetInstrumentationStack(); - bool method_tracing_active = instrumentation_stack != NULL; + CHECK_EQ(cur_depth_, 0U); + bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled(); uint32_t instrumentation_stack_depth = 0; for (const ManagedStack* current_fragment = thread_->GetManagedStack(); current_fragment != NULL; current_fragment = current_fragment->GetLink()) { @@ -235,6 +280,7 @@ void StackVisitor::WalkStack(bool include_transitions) { DCHECK(current_fragment->GetTopShadowFrame() == NULL); mirror::AbstractMethod* method = *cur_quick_frame_; while (method != NULL) { + DCHECK(cur_quick_frame_pc_ != GetInstrumentationExitPc()); SanityCheckFrame(); bool should_continue = VisitFrame(); if (UNLIKELY(!should_continue)) { @@ -248,16 +294,24 @@ void StackVisitor::WalkStack(bool include_transitions) { size_t return_pc_offset = method->GetReturnPcOffsetInBytes(); byte* return_pc_addr = reinterpret_cast(cur_quick_frame_) + return_pc_offset; uintptr_t return_pc = *reinterpret_cast(return_pc_addr); - if (UNLIKELY(method_tracing_active)) { + if (UNLIKELY(exit_stubs_installed)) { // While profiling, the return pc is restored from the side stack, except when walking // the stack for an exception where the side stack will be unwound in VisitFrame. - // TODO: stop using include_transitions as a proxy for is this the catch block visitor. - if (GetInstrumentationExitPc() == return_pc && !include_transitions) { - // TODO: unify trace and managed stack. - InstrumentationStackFrame instrumentation_frame = GetInstrumentationStackFrame(instrumentation_stack_depth); + if (GetInstrumentationExitPc() == return_pc) { + instrumentation::InstrumentationStackFrame instrumentation_frame = + GetInstrumentationStackFrame(instrumentation_stack_depth); instrumentation_stack_depth++; - CHECK(instrumentation_frame.method_ == GetMethod()) << "Excepted: " << PrettyMethod(method) + if (instrumentation_frame.method_ != GetMethod()) { + LOG(FATAL) << "Expected: " << PrettyMethod(instrumentation_frame.method_) << " Found: " << PrettyMethod(GetMethod()); + } + if (num_frames_ != 0) { + // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite + // recursion. + CHECK(instrumentation_frame.frame_id_ == GetFrameId()) + << "Expected: " << instrumentation_frame.frame_id_ + << " Found: " << GetFrameId(); + } return_pc = instrumentation_frame.return_pc_; } } @@ -278,13 +332,16 @@ void StackVisitor::WalkStack(bool include_transitions) { cur_shadow_frame_ = cur_shadow_frame_->GetLink(); } while(cur_shadow_frame_ != NULL); } - cur_depth_++; if (include_transitions) { bool should_continue = VisitFrame(); if (!should_continue) { return; } } + cur_depth_++; + } + if (num_frames_ != 0) { + CHECK_EQ(cur_depth_, num_frames_); } } diff --git a/src/stack.h b/src/stack.h index e0cb28e95a..eb187b2b84 100644 --- a/src/stack.h +++ b/src/stack.h @@ -168,6 +168,10 @@ class ShadowFrame { return method_; } + mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetMethod(mirror::AbstractMethod* method) { DCHECK_NE(method, static_cast(NULL)); method_ = method; @@ -368,6 +372,8 @@ class StackVisitor { uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const { @@ -383,7 +389,7 @@ class StackVisitor { // Returns the height of the stack in the managed stack frames, including transitions. size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetNumFrames() - cur_depth_; + return GetNumFrames() - cur_depth_ - 1; } // Returns a frame ID for JDWP use, starting from 1. @@ -503,7 +509,7 @@ class StackVisitor { private: - InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const; + instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const; void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/src/thread.cc b/src/thread.cc index 9c58b6d0ef..2c955b173b 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -61,6 +61,7 @@ #include "thread_list.h" #include "utils.h" #include "verifier/dex_gc_map.h" +#include "verifier/method_verifier.h" #include "well_known_classes.h" namespace art { @@ -92,24 +93,16 @@ void Thread::InitFunctionPointers() { InitEntryPoints(&entrypoints_); } -void Thread::SetDebuggerUpdatesEnabled(bool enabled) { -#if !defined(ART_USE_PORTABLE_COMPILER) - ChangeDebuggerEntryPoint(&entrypoints_, enabled); -#else - UNIMPLEMENTED(FATAL); -#endif +void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { + deoptimization_shadow_frame_ = sf; } - -void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf, const JValue& ret_val) { - CHECK(sf != NULL); - deoptimization_shadow_frame_ = sf; +void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) { deoptimization_return_value_.SetJ(ret_val.GetJ()); } ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) { ShadowFrame* sf = deoptimization_shadow_frame_; - DCHECK(sf != NULL); deoptimization_shadow_frame_ = NULL; ret_val->SetJ(deoptimization_return_value_.GetJ()); return sf; @@ -327,9 +320,6 @@ void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) { InitFunctionPointers(); InitCardTable(); InitTid(); - if (Runtime::Current()->InterpreterOnly()) { - AtomicSetFlag(kEnterInterpreter); - } // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). pthread_self_ = pthread_self(); @@ -864,7 +854,7 @@ static bool ShouldShowNativeStack(const Thread* thread) { // We don't just check kNative because native methods will be in state kSuspended if they're // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the // thread-startup states if it's early enough in their life cycle (http://b/7432159). - mirror::AbstractMethod* current_method = thread->GetCurrentMethod(); + mirror::AbstractMethod* current_method = thread->GetCurrentMethod(NULL); return current_method != NULL && current_method->IsNative(); } @@ -948,7 +938,8 @@ Thread::Thread(bool daemon) throwing_OutOfMemoryError_(false), debug_suspend_count_(0), debug_invoke_req_(new DebugInvokeReq), - instrumentation_stack_(new std::deque), + deoptimization_shadow_frame_(NULL), + instrumentation_stack_(new std::deque), name_(new std::string(kThreadNameDuringStartup)), daemon_(daemon), pthread_self_(0), @@ -975,7 +966,7 @@ bool Thread::IsStillStarting() const { void Thread::AssertNoPendingException() const { if (UNLIKELY(IsExceptionPending())) { ScopedObjectAccess soa(Thread::Current()); - mirror::Throwable* exception = GetException(); + mirror::Throwable* exception = GetException(NULL); LOG(FATAL) << "No pending exception expected: " << exception->Dump(); } } @@ -1427,82 +1418,131 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job return result; } -void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { +void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location, + const char* exception_class_descriptor, const char* fmt, ...) { va_list args; va_start(args, fmt); - ThrowNewExceptionV(exception_class_descriptor, fmt, args); + ThrowNewExceptionV(throw_location, exception_class_descriptor, + fmt, args); va_end(args); } -void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) { +void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location, + const char* exception_class_descriptor, + const char* fmt, va_list ap) { std::string msg; StringAppendV(&msg, fmt, ap); - ThrowNewException(exception_class_descriptor, msg.c_str()); + ThrowNewException(throw_location, exception_class_descriptor, msg.c_str()); } -void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) { +void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor, + const char* msg) { AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException. - ThrowNewWrappedException(exception_class_descriptor, msg); -} - -void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) { - // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception". - CHECK_EQ('L', exception_class_descriptor[0]); - std::string descriptor(exception_class_descriptor + 1); - CHECK_EQ(';', descriptor[descriptor.length() - 1]); - descriptor.erase(descriptor.length() - 1); - - JNIEnv* env = GetJniEnv(); - jobject cause = env->ExceptionOccurred(); - env->ExceptionClear(); + ThrowNewWrappedException(throw_location, exception_class_descriptor, msg); +} + +void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location, + const char* exception_class_descriptor, + const char* msg) { + DCHECK_EQ(this, Thread::Current()); + // Ensure we don't forget arguments over object allocation. + SirtRef saved_throw_this(this, throw_location.GetThis()); + SirtRef saved_throw_method(this, throw_location.GetMethod()); + // Ignore the cause throw location. TODO: should we report this as a re-throw? + SirtRef cause(this, GetException(NULL)); + ClearException(); + Runtime* runtime = Runtime::Current(); - ScopedLocalRef exception_class(env, env->FindClass(descriptor.c_str())); - if (exception_class.get() == NULL) { - LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI FindClass failed: " - << PrettyTypeOf(GetException()); + mirror::ClassLoader* cl = NULL; + if (throw_location.GetMethod() != NULL) { + cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader(); + } + SirtRef + exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl)); + if (UNLIKELY(exception_class.get() == NULL)) { CHECK(IsExceptionPending()); + LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); return; } - if (!Runtime::Current()->IsStarted()) { - // Something is trying to throw an exception without a started - // runtime, which is the common case in the compiler. We won't be - // able to invoke the constructor of the exception, so use - // AllocObject which will not invoke a constructor. - ScopedLocalRef exception( - env, reinterpret_cast(env->AllocObject(exception_class.get()))); - if (exception.get() != NULL) { - ScopedObjectAccessUnchecked soa(env); - mirror::Throwable* t = - reinterpret_cast(soa.Self()->DecodeJObject(exception.get())); - t->SetDetailMessage(mirror::String::AllocFromModifiedUtf8(soa.Self(), msg)); - if (cause != NULL) { - t->SetCause(soa.Decode(cause)); - } - soa.Self()->SetException(t); + + if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) { + DCHECK(IsExceptionPending()); + return; + } + DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); + SirtRef exception(this, + down_cast(exception_class->AllocObject(this))); + + // Choose an appropriate constructor and set up the arguments. + const char* signature; + SirtRef msg_string(this, NULL); + if (msg != NULL) { + // Ensure we remember this and the method over the String allocation. + msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg)); + if (UNLIKELY(msg_string.get() == NULL)) { + CHECK(IsExceptionPending()); // OOME. + return; + } + if (cause.get() == NULL) { + signature = "(Ljava/lang/String;)V"; } else { - LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI AllocObject failed: " - << PrettyTypeOf(GetException()); - CHECK(IsExceptionPending()); + signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; + } + } else { + if (cause.get() == NULL) { + signature = "()V"; + } else { + signature = "(Ljava/lang/Throwable;)V"; } - return; } - int rc = ::art::ThrowNewException(env, exception_class.get(), msg, cause); - if (rc != JNI_OK) { - LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI ThrowNew failed: " - << PrettyTypeOf(GetException()); - CHECK(IsExceptionPending()); + mirror::AbstractMethod* exception_init_method = + exception_class->FindDeclaredDirectMethod("", signature); + + CHECK(exception_init_method != NULL) << "No " << signature << " in " + << PrettyDescriptor(exception_class_descriptor); + + if (UNLIKELY(!runtime->IsStarted())) { + // Something is trying to throw an exception without a started runtime, which is the common + // case in the compiler. We won't be able to invoke the constructor of the exception, so set + // the exception fields directly. + if (msg != NULL) { + exception->SetDetailMessage(msg_string.get()); + } + if (cause.get() != NULL) { + exception->SetCause(cause.get()); + } + ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(), + throw_location.GetDexPc()); + SetException(gc_safe_throw_location, exception.get()); + } else { + ArgArray args("VLL", 3); + args.Append(reinterpret_cast(exception.get())); + if (msg != NULL) { + args.Append(reinterpret_cast(msg_string.get())); + } + if (cause.get() != NULL) { + args.Append(reinterpret_cast(cause.get())); + } + JValue result; + exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V'); + if (LIKELY(!IsExceptionPending())) { + ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(), + throw_location.GetDexPc()); + SetException(gc_safe_throw_location, exception.get()); + } } } void Thread::ThrowOutOfMemoryError(const char* msg) { LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : "")); + ThrowLocation throw_location = GetCurrentLocationForThrow(); if (!throwing_OutOfMemoryError_) { throwing_OutOfMemoryError_ = true; - ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); + ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg); } else { Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one. - SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); + SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError()); } throwing_OutOfMemoryError_ = false; } @@ -1538,8 +1578,6 @@ static const EntryPointInfo gThreadEntryPointInfo[] = { ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), ENTRY_POINT_INFO(pCanPutArrayElementFromCode), ENTRY_POINT_INFO(pCheckCastFromCode), - ENTRY_POINT_INFO(pDebugMe), - ENTRY_POINT_INFO(pUpdateDebuggerFromCode), ENTRY_POINT_INFO(pInitializeStaticStorage), ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), ENTRY_POINT_INFO(pInitializeTypeFromCode), @@ -1644,13 +1682,17 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_ static const bool kDebugExceptionDelivery = false; class CatchBlockStackVisitor : public StackVisitor { public: - CatchBlockStackVisitor(Thread* self, mirror::Throwable* exception) + CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location, + mirror::Throwable* exception, bool is_deoptimization) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(self, self->GetLongJumpContext()), - self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL), - throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL), - handler_quick_frame_pc_(0), handler_dex_pc_(0), native_method_count_(0), - method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) { + self_(self), exception_(exception), is_deoptimization_(is_deoptimization), + to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location), + handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0), + native_method_count_(0), + method_tracing_active_(is_deoptimization || + Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()), + instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) { // Exception not in root sets, can't allow GC. last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block"); } @@ -1659,38 +1701,38 @@ class CatchBlockStackVisitor : public StackVisitor { LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::AbstractMethod* method = GetMethod(); if (method == NULL) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. handler_quick_frame_pc_ = GetCurrentQuickFramePc(); handler_quick_frame_ = GetCurrentQuickFrame(); return false; // End stack walk. - } - uint32_t dex_pc = DexFile::kDexNoIndex; - if (method->IsRuntimeMethod()) { - // ignore callee save method - DCHECK(method->IsCalleeSaveMethod()); } else { - if (throw_method_ == NULL) { - throw_method_ = method; - throw_frame_id_ = GetFrameId(); - throw_dex_pc_ = GetDexPc(); + if (UNLIKELY(method_tracing_active_ && + GetInstrumentationExitPc() == GetReturnPc())) { + // Keep count of the number of unwinds during instrumentation. + instrumentation_frames_to_pop_++; } - if (method->IsNative()) { - native_method_count_++; + if (method->IsRuntimeMethod()) { + // Ignore callee save method. + DCHECK(method->IsCalleeSaveMethod()); + return true; + } else if (is_deoptimization_) { + return HandleDeoptimization(method); } else { - // Unwind stack when an exception occurs during instrumentation - if (UNLIKELY(method_tracing_active_ && - GetInstrumentationExitPc() == GetCurrentQuickFramePc())) { - uintptr_t pc = InstrumentationMethodUnwindFromCode(Thread::Current()); - dex_pc = method->ToDexPc(pc); - } else { - dex_pc = GetDexPc(); - } + return HandleTryItems(method); } } + } + + bool HandleTryItems(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t dex_pc = DexFile::kDexNoIndex; + if (method->IsNative()) { + native_method_count_++; + } else { + dex_pc = GetDexPc(); + } if (dex_pc != DexFile::kDexNoIndex) { uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc); if (found_dex_pc != DexFile::kDexNoIndex) { @@ -1703,22 +1745,81 @@ class CatchBlockStackVisitor : public StackVisitor { return true; // Continue stack walk. } + bool HandleDeoptimization(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MethodHelper mh(m); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + CHECK(code_item != NULL); + uint16_t num_regs = code_item->registers_size_; + uint32_t dex_pc = GetDexPc(); + const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc); + uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits(); + ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc); + verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(), + mh.GetClassDefIndex(), code_item, + m->GetDexMethodIndex(), m, m->GetAccessFlags(), false); + verifier.Verify(); + std::vector kinds = verifier.DescribeVRegs(dex_pc); + for(uint16_t reg = 0; reg < num_regs; reg++) { + VRegKind kind = static_cast(kinds.at(reg * 2)); + switch (kind) { + case kUndefined: + new_frame->SetVReg(reg, 0xEBADDE09); + break; + case kConstant: + new_frame->SetVReg(reg, kinds.at((reg * 2) + 1)); + break; + case kReferenceVReg: + new_frame->SetVRegReference(reg, + reinterpret_cast(GetVReg(m, reg, kind))); + break; + default: + new_frame->SetVReg(reg, GetVReg(m, reg, kind)); + break; + } + } + if (prev_shadow_frame_ != NULL) { + prev_shadow_frame_->SetLink(new_frame); + } else { + top_shadow_frame_ = new_frame; + } + prev_shadow_frame_ = new_frame; + return true; + } + void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::AbstractMethod* catch_method = *handler_quick_frame_; - if (kDebugExceptionDelivery) { - if (catch_method == NULL) { + if (catch_method == NULL) { + if (kDebugExceptionDelivery) { LOG(INFO) << "Handler is upcall"; - } else { + } + } else { + CHECK(!is_deoptimization_); + if (instrumentation_frames_to_pop_ > 0) { + // Don't pop the instrumentation frame of the catch handler. + instrumentation_frames_to_pop_--; + } + if (kDebugExceptionDelivery) { const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_); LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")"; } } - self_->SetException(exception_); // Exception back in root set. + // Put exception back in root set and clear throw location. + self_->SetException(ThrowLocation(), exception_); self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_); - // Do debugger PostException after allowing thread suspension again. - Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_, - catch_method, handler_dex_pc_, exception_); + // Do instrumentation events after allowing thread suspension again. + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); + for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) { + // We pop the instrumentation stack here so as not to corrupt it during the stack walk. + instrumentation->PopMethodForUnwind(self_, is_deoptimization_); + } + if (!is_deoptimization_) { + instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_, + exception_); + } else { + // TODO: proper return value. + self_->SetDeoptimizationShadowFrame(top_shadow_frame_); + } // Place context back on thread so it will be available when we continue. self_->ReleaseLongJumpContext(context_); context_->SetSP(reinterpret_cast(handler_quick_frame_)); @@ -1729,13 +1830,13 @@ class CatchBlockStackVisitor : public StackVisitor { } private: - Thread* self_; - mirror::Throwable* exception_; + Thread* const self_; + mirror::Throwable* const exception_; + const bool is_deoptimization_; // The type of the exception catch block to find. - mirror::Class* to_find_; - mirror::AbstractMethod* throw_method_; - JDWP::FrameId throw_frame_id_; - uint32_t throw_dex_pc_; + mirror::Class* const to_find_; + // Location of the throw. + const ThrowLocation& throw_location_; // Quick frame with found handler or last frame if no handler found. mirror::AbstractMethod** handler_quick_frame_; // PC to branch to for the handler. @@ -1748,21 +1849,32 @@ class CatchBlockStackVisitor : public StackVisitor { const bool method_tracing_active_; // Support for nesting no thread suspension checks. const char* last_no_assert_suspension_cause_; + // Number of frames to pop in long jump. + size_t instrumentation_frames_to_pop_; + ShadowFrame* top_shadow_frame_; + ShadowFrame* prev_shadow_frame_; }; void Thread::QuickDeliverException() { - mirror::Throwable* exception = GetException(); // Get exception from thread + // Get exception from thread. + ThrowLocation throw_location; + mirror::Throwable* exception = GetException(&throw_location); CHECK(exception != NULL); // Don't leave exception visible while we try to find the handler, which may cause class // resolution. ClearException(); + bool is_deoptimization = (exception == reinterpret_cast(-1)); if (kDebugExceptionDelivery) { - mirror::String* msg = exception->GetDetailMessage(); - std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); - DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) - << ": " << str_msg << "\n"); + if (!is_deoptimization) { + mirror::String* msg = exception->GetDetailMessage(); + std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); + DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) + << ": " << str_msg << "\n"); + } else { + DumpStack(LOG(INFO) << "Deoptimizing: "); + } } - CatchBlockStackVisitor catch_finder(this, exception); + CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization); catch_finder.WalkStack(true); catch_finder.DoLongJump(); LOG(FATAL) << "UNREACHABLE"; @@ -1779,39 +1891,45 @@ Context* Thread::GetLongJumpContext() { return result; } -mirror::AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const { - struct CurrentMethodVisitor : public StackVisitor { - CurrentMethodVisitor(Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : StackVisitor(thread, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {} - - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* m = GetMethod(); - if (m->IsRuntimeMethod()) { - // Continue if this is a runtime method. - return true; - } - method_ = m; - dex_pc_ = GetDexPc(); - frame_id_ = GetFrameId(); - return false; +struct CurrentMethodVisitor : public StackVisitor { + CurrentMethodVisitor(Thread* thread, Context* context) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {} + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* m = GetMethod(); + if (m->IsRuntimeMethod()) { + // Continue if this is a runtime method. + return true; + } + if (context_ != NULL) { + this_object_ = GetThisObject(); } - mirror::AbstractMethod* method_; - uint32_t dex_pc_; - size_t frame_id_; - }; + method_ = m; + dex_pc_ = GetDexPc(); + return false; + } + mirror::Object* this_object_; + mirror::AbstractMethod* method_; + uint32_t dex_pc_; +}; - CurrentMethodVisitor visitor(const_cast(this)); +mirror::AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const { + CurrentMethodVisitor visitor(const_cast(this), NULL); visitor.WalkStack(false); if (dex_pc != NULL) { *dex_pc = visitor.dex_pc_; } - if (frame_id != NULL) { - *frame_id = visitor.frame_id_; - } return visitor.method_; } +ThrowLocation Thread::GetCurrentLocationForThrow() { + Context* context = GetLongJumpContext(); + CurrentMethodVisitor visitor(this, context); + visitor.WalkStack(false); + ReleaseLongJumpContext(context); + return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_); +} + bool Thread::HoldsLock(mirror::Object* object) { if (object == NULL) { return false; @@ -1981,6 +2099,7 @@ void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) { if (exception_ != NULL) { VerifyRootWrapperCallback(exception_, &wrapperArg); } + throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); if (class_loader_override_ != NULL) { VerifyRootWrapperCallback(class_loader_override_, &wrapperArg); } @@ -1995,6 +2114,17 @@ void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) { ReferenceMapVisitor mapper(this, context, visitorToCallback); mapper.WalkStack(); ReleaseLongJumpContext(context); + + std::deque* instrumentation_stack = GetInstrumentationStack(); + typedef std::deque::const_iterator It; + for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) { + mirror::Object* this_object = (*it).this_object_; + if (this_object != NULL) { + VerifyRootWrapperCallback(this_object, &wrapperArg); + } + mirror::AbstractMethod* method = (*it).method_; + VerifyRootWrapperCallback(method, &wrapperArg); + } } void Thread::VisitRoots(RootVisitor* visitor, void* arg) { @@ -2004,6 +2134,7 @@ void Thread::VisitRoots(RootVisitor* visitor, void* arg) { if (exception_ != NULL) { visitor(exception_, arg); } + throw_location_.VisitRoots(visitor, arg); if (class_loader_override_ != NULL) { visitor(class_loader_override_, arg); } @@ -2018,6 +2149,17 @@ void Thread::VisitRoots(RootVisitor* visitor, void* arg) { ReferenceMapVisitor mapper(this, context, visitorToCallback); mapper.WalkStack(); ReleaseLongJumpContext(context); + + std::deque* instrumentation_stack = GetInstrumentationStack(); + typedef std::deque::const_iterator It; + for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) { + mirror::Object* this_object = (*it).this_object_; + if (this_object != NULL) { + visitor(this_object, arg); + } + mirror::AbstractMethod* method = (*it).method_; + visitor(method, arg); + } } static void VerifyObject(const mirror::Object* root, void* arg) { diff --git a/src/thread.h b/src/thread.h index dd67a21edd..37f272196f 100644 --- a/src/thread.h +++ b/src/thread.h @@ -36,6 +36,7 @@ #include "stack.h" #include "stack_indirect_reference_table.h" #include "thread_state.h" +#include "throw_location.h" #include "UniquePtr.h" namespace art { @@ -80,14 +81,13 @@ enum ThreadPriority { enum ThreadFlag { kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the // safepoint handler. - kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue. - kEnterInterpreter = 4, // Instruct managed code it should enter the interpreter. + kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue. }; class PACKED(4) Thread { public: // Space to throw a StackOverflowError in. - static const size_t kStackOverflowReservedBytes = 10 * KB; + static const size_t kStackOverflowReservedBytes = 16 * KB; // Creates a new native thread corresponding to the given managed peer. // Used to implement Thread.start. @@ -279,28 +279,27 @@ class PACKED(4) Thread { return exception_ != NULL; } - mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Throwable* GetException(ThrowLocation* throw_location) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (throw_location != NULL) { + *throw_location = throw_location_; + } return exception_; } void AssertNoPendingException() const; - void SetException(mirror::Throwable* new_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(new_exception != NULL); // TODO: DCHECK(!IsExceptionPending()); exception_ = new_exception; + throw_location_ = throw_location; } void ClearException() { exception_ = NULL; - } - - void DeliverException(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (exception == NULL) { - ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception"); - } else { - SetException(exception); - } + throw_location_.Clear(); } // Find catch block and perform long jump to appropriate exception handle @@ -312,9 +311,11 @@ class PACKED(4) Thread { long_jump_context_ = context; } - mirror::AbstractMethod* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const + mirror::AbstractMethod* GetCurrentMethod(uint32_t* dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetTopOfStack(void* stack, uintptr_t pc) { mirror::AbstractMethod** top_method = reinterpret_cast(stack); managed_stack_.SetTopQuickFrame(top_method); @@ -330,32 +331,30 @@ class PACKED(4) Thread { } // If 'msg' is NULL, no detail message is set. - void ThrowNewException(const char* exception_class_descriptor, const char* msg) + void ThrowNewException(const ThrowLocation& throw_location, + const char* exception_class_descriptor, const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be // used as the new exception's cause. - void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) + void ThrowNewWrappedException(const ThrowLocation& throw_location, + const char* exception_class_descriptor, + const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) - __attribute__((format(printf, 3, 4))) + void ThrowNewExceptionF(const ThrowLocation& throw_location, + const char* exception_class_descriptor, const char* fmt, ...) + __attribute__((format(printf, 4, 5))) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) + void ThrowNewExceptionV(const ThrowLocation& throw_location, + const char* exception_class_descriptor, const char* fmt, va_list ap) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // OutOfMemoryError is special, because we need to pre-allocate an instance. // Only the GC should call this. void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc); - - void* FindExceptionHandlerInMethod(const mirror::AbstractMethod* method, - void* throw_pc, - const DexFile& dex_file, - ClassLinker* class_linker); - static void Startup(); static void FinishStartup(); static void Shutdown(); @@ -395,8 +394,7 @@ class PACKED(4) Thread { static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, jobjectArray output_array = NULL, int* stack_depth = NULL); - void VisitRoots(RootVisitor* visitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VerifyRoots(VerifyRootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -535,34 +533,15 @@ class PACKED(4) Thread { return debug_invoke_req_; } - void SetDebuggerUpdatesEnabled(bool enabled); - - void SetDeoptimizationShadowFrame(ShadowFrame* sf, const JValue& ret_val); + void SetDeoptimizationShadowFrame(ShadowFrame* sf); + void SetDeoptimizationReturnValue(const JValue& ret_val); ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val); - const std::deque* GetInstrumentationStack() const { + std::deque* GetInstrumentationStack() { return instrumentation_stack_; } - bool IsInstrumentationStackEmpty() const { - return instrumentation_stack_->empty(); - } - - void PushInstrumentationStackFrame(const InstrumentationStackFrame& frame) { - instrumentation_stack_->push_front(frame); - } - - void PushBackInstrumentationStackFrame(const InstrumentationStackFrame& frame) { - instrumentation_stack_->push_back(frame); - } - - InstrumentationStackFrame PopInstrumentationStackFrame() { - InstrumentationStackFrame frame = instrumentation_stack_->front(); - instrumentation_stack_->pop_front(); - return frame; - } - BaseMutex* GetHeldMutex(LockLevel level) const { return held_mutexes_[level]; } @@ -598,13 +577,15 @@ class PACKED(4) Thread { void CreatePeer(const char* name, bool as_daemon, jobject thread_group); friend class Runtime; // For CreatePeer. - // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and ~Thread. + // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and + // Dbg::Disconnected. ThreadState SetStateUnsafe(ThreadState new_state) { ThreadState old_state = GetState(); state_and_flags_.as_struct.state = new_state; return old_state; } friend class SignalCatcher; // For SetStateUnsafe. + friend class Dbg; // For SetStateUnsafe. void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -710,6 +691,8 @@ class PACKED(4) Thread { // System thread id. pid_t tid_; + ThrowLocation throw_location_; + // Guards the 'interrupted_' and 'wait_monitor_' members. mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER; ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_); @@ -755,7 +738,7 @@ class PACKED(4) Thread { // Additional stack used by method instrumentation to store method and return pc values. // Stored as a pointer since std::deque is not PACKED. - std::deque* instrumentation_stack_; + std::deque* instrumentation_stack_; // A cached copy of the java.lang.Thread's name. std::string* name_; diff --git a/src/throw_location.cc b/src/throw_location.cc new file mode 100644 index 0000000000..84d2c9b446 --- /dev/null +++ b/src/throw_location.cc @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "throw_location.h" + +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "object_utils.h" +#include "utils.h" + +namespace art { + +std::string ThrowLocation::Dump() const { + return StringPrintf("%s:%d", PrettyMethod(method_).c_str(), + MethodHelper(method_).GetLineNumFromDexPC(dex_pc_)); +} + +void ThrowLocation::VisitRoots(RootVisitor* visitor, void* arg) { + if (this_object_ != NULL) { + visitor(this_object_, arg); + } + if (method_ != NULL) { + visitor(method_, arg); + } +} + +} // namespace art diff --git a/src/throw_location.h b/src/throw_location.h new file mode 100644 index 0000000000..8c1b9410af --- /dev/null +++ b/src/throw_location.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_SRC_THROW_LOCATION_H_ +#define ART_SRC_THROW_LOCATION_H_ + +#include "base/macros.h" +#include "root_visitor.h" + +#include +#include + +namespace art { + +namespace mirror { +class AbstractMethod; +class Object; +} // mirror + +class PACKED(4) ThrowLocation { + public: + ThrowLocation() { + Clear(); + } + + ThrowLocation(mirror::Object* throw_this_object, mirror::AbstractMethod* throw_method, + uint32_t throw_dex_pc) : + this_object_(throw_this_object), + method_(throw_method), + dex_pc_(throw_dex_pc) {} + + mirror::Object* GetThis() const { + return this_object_; + } + + mirror::AbstractMethod* GetMethod() const { + return method_; + } + + uint32_t GetDexPc() const { + return dex_pc_; + } + + void Clear() { + this_object_ = NULL; + method_ = NULL; + dex_pc_ = -1; + } + + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void VisitRoots(RootVisitor* visitor, void* arg); + + private: + // The 'this' reference of the throwing method. + mirror::Object* this_object_; + // The throwing method. + mirror::AbstractMethod* method_; + // The instruction within the throwing method. + uint32_t dex_pc_; +}; + +} // namespace art + +#endif // ART_SRC_THROW_LOCATION_H_ diff --git a/src/trace.cc b/src/trace.cc index 859f523ef0..32932907e5 100644 --- a/src/trace.cc +++ b/src/trace.cc @@ -20,6 +20,7 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" +#include "common_throws.h" #include "debugger.h" #include "dex_file-inl.h" #include "instrumentation.h" @@ -73,7 +74,14 @@ namespace art { // // All values are stored in little-endian order. -static const uint32_t kTraceMethodActionMask = 0x03; // two bits +enum TraceAction { + kTraceMethodEnter = 0x00, // method entry + kTraceMethodExit = 0x01, // method exit + kTraceUnroll = 0x02, // method exited by exception unrolling + // 0x03 currently unused + kTraceMethodActionMask = 0x03, // two bits +}; + static const char kTraceTokenChar = '*'; static const uint16_t kTraceHeaderLength = 32; static const uint32_t kTraceMagicValue = 0x574f4c53; @@ -82,34 +90,57 @@ static const uint16_t kTraceVersionDualClock = 3; static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps -static ProfilerClockSource gDefaultTraceClockSource = kProfilerClockSourceDual; +#if defined(HAVE_POSIX_CLOCKS) +ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceDual; +#else +ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceWall; +#endif + +Trace* Trace::the_trace_ = NULL; -static inline uint32_t TraceMethodId(uint32_t methodValue) { - return (methodValue & ~kTraceMethodActionMask); +static mirror::AbstractMethod* DecodeTraceMethodId(uint32_t tmid) { + return reinterpret_cast(tmid & ~kTraceMethodActionMask); } -static inline uint32_t TraceMethodCombine(uint32_t method, uint8_t traceEvent) { - return (method | traceEvent); +static TraceAction DecodeTraceAction(uint32_t tmid) { + return static_cast(tmid & kTraceMethodActionMask); } -void Trace::SetDefaultClockSource(ProfilerClockSource clock_source) { - gDefaultTraceClockSource = clock_source; +static uint32_t EncodeTraceMethodAndAction(const mirror::AbstractMethod* method, + TraceAction action) { + uint32_t tmid = reinterpret_cast(method) | action; + DCHECK_EQ(method, DecodeTraceMethodId(tmid)); + return tmid; } -bool Trace::UseThreadCpuClock() { +void Trace::SetDefaultClockSource(ProfilerClockSource clock_source) { #if defined(HAVE_POSIX_CLOCKS) - return clock_source_ != kProfilerClockSourceWall; + default_clock_source_ = clock_source; #else - return false; + if (clock_source != kProfilerClockSourceWall) { + LOG(WARNING) << "Ignoring tracing request to use "; + } #endif } +static uint16_t GetTraceVersion(ProfilerClockSource clock_source) { + return (clock_source == kProfilerClockSourceDual) ? kTraceVersionDualClock + : kTraceVersionSingleClock; +} + +static uint16_t GetRecordSize(ProfilerClockSource clock_source) { + return (clock_source == kProfilerClockSourceDual) ? kTraceRecordSizeDualClock + : kTraceRecordSizeSingleClock; +} + +bool Trace::UseThreadCpuClock() { + return (clock_source_ == kProfilerClockSourceThreadCpu) || + (clock_source_ == kProfilerClockSourceDual); +} + bool Trace::UseWallClock() { -#if defined(HAVE_POSIX_CLOCKS) - return clock_source_ != kProfilerClockSourceThreadCpu; -#else - return true; -#endif + return (clock_source_ == kProfilerClockSourceWall) || + (clock_source_ == kProfilerClockSourceDual); } static void MeasureClockOverhead(Trace* trace) { @@ -165,109 +196,129 @@ static void Append8LE(uint8_t* buf, uint64_t val) { *buf++ = (uint8_t) (val >> 56); } -Trace::Trace(File* trace_file, int buffer_size, int flags) - : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), - clock_source_(gDefaultTraceClockSource), overflow_(false), - buffer_size_(buffer_size), start_time_(0), trace_version_(0), record_size_(0), cur_offset_(0) { -} - -void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, bool direct_to_ddms) { - if (Runtime::Current()->IsMethodTracingActive()) { - LOG(INFO) << "Trace already in progress, ignoring this request"; - return; +void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, + bool direct_to_ddms) { + Thread* self = Thread::Current(); + { + MutexLock mu(self, *Locks::trace_lock_); + if (the_trace_ != NULL) { + LOG(ERROR) << "Trace already in progress, ignoring this request"; + return; + } } - - Runtime::Current()->GetThreadList()->SuspendAll(); + Runtime* runtime = Runtime::Current(); + runtime->GetThreadList()->SuspendAll(); // Open trace file if not going directly to ddms. - File* trace_file = NULL; + UniquePtr trace_file; if (!direct_to_ddms) { if (trace_fd < 0) { - trace_file = OS::OpenFile(trace_filename, true); + trace_file.reset(OS::OpenFile(trace_filename, true)); } else { - trace_file = new File(trace_fd, "tracefile"); + trace_file.reset(new File(trace_fd, "tracefile")); trace_file->DisableAutoClose(); } - if (trace_file == NULL) { + if (trace_file.get() == NULL) { PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; - Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", - StringPrintf("Unable to open trace file '%s'", trace_filename).c_str()); - Runtime::Current()->GetThreadList()->ResumeAll(); + runtime->GetThreadList()->ResumeAll(); + ScopedObjectAccess soa(self); + ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); return; } } // Create Trace object. - Trace* tracer(new Trace(trace_file, buffer_size, flags)); - - // Enable count of allocs if specified in the flags. - if ((flags && kTraceCountAllocs) != 0) { - Runtime::Current()->SetStatsEnabled(true); - } + { + MutexLock mu(self, *Locks::trace_lock_); + if(the_trace_ != NULL) { + LOG(ERROR) << "Trace already in progress, ignoring this request"; + } else { + the_trace_ = new Trace(trace_file.release(), buffer_size, flags); - Runtime::Current()->EnableMethodTracing(tracer); - tracer->BeginTracing(); + // Enable count of allocs if specified in the flags. + if ((flags && kTraceCountAllocs) != 0) { + runtime->SetStatsEnabled(true); + } - Runtime::Current()->GetThreadList()->ResumeAll(); + runtime->GetInstrumentation()->AddListener(the_trace_, + instrumentation::Instrumentation::kMethodEntered | + instrumentation::Instrumentation::kMethodExited | + instrumentation::Instrumentation::kMethodUnwind); + } + } + runtime->GetThreadList()->ResumeAll(); } void Trace::Stop() { - if (!Runtime::Current()->IsMethodTracingActive()) { - LOG(INFO) << "Trace stop requested, but no trace currently running"; - return; + Runtime* runtime = Runtime::Current(); + runtime->GetThreadList()->SuspendAll(); + Trace* the_trace = NULL; + { + MutexLock mu(Thread::Current(), *Locks::trace_lock_); + if (the_trace_ == NULL) { + LOG(ERROR) << "Trace stop requested, but no trace currently running"; + } else { + the_trace = the_trace_; + the_trace_ = NULL; + } } - - Runtime::Current()->GetThreadList()->SuspendAll(); - - Runtime::Current()->GetInstrumentation()->GetTrace()->FinishTracing(); - Runtime::Current()->DisableMethodTracing(); - - Runtime::Current()->GetThreadList()->ResumeAll(); + if (the_trace != NULL) { + the_trace->FinishTracing(); + runtime->GetInstrumentation()->RemoveListener(the_trace, + instrumentation::Instrumentation::kMethodEntered | + instrumentation::Instrumentation::kMethodExited | + instrumentation::Instrumentation::kMethodUnwind); + delete the_trace; + } + runtime->GetThreadList()->ResumeAll(); } void Trace::Shutdown() { - if (!Runtime::Current()->IsMethodTracingActive()) { - LOG(INFO) << "Trace shutdown requested, but no trace currently running"; - return; + if (IsMethodTracingActive()) { + Stop(); } - Runtime::Current()->GetInstrumentation()->GetTrace()->FinishTracing(); - Runtime::Current()->DisableMethodTracing(); } -void Trace::BeginTracing() { - // Set the start time of tracing. - start_time_ = MicroTime(); - - // Set trace version and record size. - if (UseThreadCpuClock() && UseWallClock()) { - trace_version_ = kTraceVersionDualClock; - record_size_ = kTraceRecordSizeDualClock; - } else { - trace_version_ = kTraceVersionSingleClock; - record_size_ = kTraceRecordSizeSingleClock; - } +bool Trace::IsMethodTracingActive() { + MutexLock mu(Thread::Current(), *Locks::trace_lock_); + return the_trace_ != NULL; +} +Trace::Trace(File* trace_file, int buffer_size, int flags) + : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), + clock_source_(default_clock_source_), buffer_size_(buffer_size), start_time_(MicroTime()), + cur_offset_(0), overflow_(false) { // Set up the beginning of the trace. + uint16_t trace_version = GetTraceVersion(clock_source_); memset(buf_.get(), 0, kTraceHeaderLength); Append4LE(buf_.get(), kTraceMagicValue); - Append2LE(buf_.get() + 4, trace_version_); + Append2LE(buf_.get() + 4, trace_version); Append2LE(buf_.get() + 6, kTraceHeaderLength); Append8LE(buf_.get() + 8, start_time_); - if (trace_version_ >= kTraceVersionDualClock) { - Append2LE(buf_.get() + 16, record_size_); + if (trace_version >= kTraceVersionDualClock) { + uint16_t record_size = GetRecordSize(clock_source_); + Append2LE(buf_.get() + 16, record_size); } // Update current offset. cur_offset_ = kTraceHeaderLength; +} + +static void DumpBuf(uint8_t* buf, size_t buf_size, ProfilerClockSource clock_source) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint8_t* ptr = buf + kTraceHeaderLength; + uint8_t* end = buf + buf_size; - // Install all method tracing stubs. - Runtime::Current()->GetInstrumentation()->InstallStubs(); + while (ptr < end) { + uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); + mirror::AbstractMethod* method = DecodeTraceMethodId(tmid); + TraceAction action = DecodeTraceAction(tmid); + LOG(INFO) << PrettyMethod(method) << " " << static_cast(action); + ptr += GetRecordSize(clock_source); + } } void Trace::FinishTracing() { - // Uninstall all method tracing stubs. - Runtime::Current()->GetInstrumentation()->UninstallStubs(); - // Compute elapsed time. uint64_t elapsed = MicroTime() - start_time_; @@ -278,12 +329,13 @@ void Trace::FinishTracing() { Runtime::Current()->SetStatsEnabled(false); } - GetVisitedMethods(final_offset); + std::set visited_methods; + GetVisitedMethods(final_offset, &visited_methods); std::ostringstream os; os << StringPrintf("%cversion\n", kTraceTokenChar); - os << StringPrintf("%d\n", trace_version_); + os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); if (UseThreadCpuClock()) { if (UseWallClock()) { @@ -295,7 +347,8 @@ void Trace::FinishTracing() { os << StringPrintf("clock=wall\n"); } os << StringPrintf("elapsed-time-usec=%llu\n", elapsed); - os << StringPrintf("num-method-calls=%zd\n", (final_offset - kTraceHeaderLength) / record_size_); + size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); + os << StringPrintf("num-method-calls=%zd\n", num_records); os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead); os << StringPrintf("vm=art\n"); if ((flags_ & kTraceCountAllocs) != 0) { @@ -306,7 +359,7 @@ void Trace::FinishTracing() { os << StringPrintf("%cthreads\n", kTraceTokenChar); DumpThreadList(os); os << StringPrintf("%cmethods\n", kTraceTokenChar); - DumpMethodList(os); + DumpMethodList(os, visited_methods); os << StringPrintf("%cend\n", kTraceTokenChar); std::string header(os.str()); @@ -317,74 +370,128 @@ void Trace::FinishTracing() { iov[1].iov_base = buf_.get(); iov[1].iov_len = final_offset; Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); + const bool kDumpTraceInfo = false; + if (kDumpTraceInfo) { + LOG(INFO) << "Trace sent:\n" << header; + DumpBuf(buf_.get(), final_offset, clock_source_); + } } else { if (!trace_file_->WriteFully(header.c_str(), header.length()) || !trace_file_->WriteFully(buf_.get(), final_offset)) { std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); PLOG(ERROR) << detail; - Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", detail.c_str()); + ThrowRuntimeException("%s", detail.c_str()); } } } -void Trace::LogMethodTraceEvent(Thread* self, const mirror::AbstractMethod* method, - Trace::TraceEvent event) { - if (thread_clock_base_map_.find(self) == thread_clock_base_map_.end()) { - uint64_t time = ThreadCpuMicroTime(); - thread_clock_base_map_.Put(self, time); - } +void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t new_dex_pc) { + // We're not recorded to listen to this kind of event, so complain. + LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; +}; + +void Trace::MethodEntered(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) { + LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered); +} + +void Trace::MethodExited(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc, + const JValue& return_value) { + UNUSED(return_value); + LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited); +} + +void Trace::MethodUnwind(Thread* thread, const mirror::AbstractMethod* method, uint32_t dex_pc) { + LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind); +} + +void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, + mirror::Throwable* exception_object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + LOG(ERROR) << "Unexpected exception caught event in tracing"; +} +void Trace::LogMethodTraceEvent(Thread* thread, const mirror::AbstractMethod* method, + instrumentation::Instrumentation::InstrumentationEvent event) { // Advance cur_offset_ atomically. int32_t new_offset; int32_t old_offset; do { old_offset = cur_offset_; - new_offset = old_offset + record_size_; + new_offset = old_offset + GetRecordSize(clock_source_); if (new_offset > buffer_size_) { overflow_ = true; return; } } while (android_atomic_release_cas(old_offset, new_offset, &cur_offset_) != 0); - uint32_t method_value = TraceMethodCombine(reinterpret_cast(method), event); + TraceAction action = kTraceMethodEnter; + switch (event) { + case instrumentation::Instrumentation::kMethodEntered: + action = kTraceMethodEnter; + break; + case instrumentation::Instrumentation::kMethodExited: + action = kTraceMethodExit; + break; + case instrumentation::Instrumentation::kMethodUnwind: + action = kTraceUnroll; + break; + default: + UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; + } + + uint32_t method_value = EncodeTraceMethodAndAction(method, action); // Write data uint8_t* ptr = buf_.get() + old_offset; - Append2LE(ptr, self->GetTid()); + Append2LE(ptr, thread->GetTid()); Append4LE(ptr + 2, method_value); ptr += 6; if (UseThreadCpuClock()) { - uint64_t thread_clock_base = thread_clock_base_map_.find(self)->second; - uint32_t thread_clock_diff = ThreadCpuMicroTime() - thread_clock_base; + // TODO: this isn't vaguely thread safe. + SafeMap::iterator it = thread_clock_base_map_.find(thread); + uint32_t thread_clock_diff = 0; + if (UNLIKELY(it == thread_clock_base_map_.end())) { + // First event, the diff is 0, record the base time in the map. + uint64_t time = ThreadCpuMicroTime(); + thread_clock_base_map_.Put(thread, time); + } else { + uint64_t thread_clock_base = it->second; + thread_clock_diff = ThreadCpuMicroTime() - thread_clock_base; + } Append4LE(ptr, thread_clock_diff); ptr += 4; } - if (UseWallClock()) { uint32_t wall_clock_diff = MicroTime() - start_time_; Append4LE(ptr, wall_clock_diff); } } -void Trace::GetVisitedMethods(size_t end_offset) { +void Trace::GetVisitedMethods(size_t buf_size, + std::set* visited_methods) { uint8_t* ptr = buf_.get() + kTraceHeaderLength; - uint8_t* end = buf_.get() + end_offset; + uint8_t* end = buf_.get() + buf_size; while (ptr < end) { - uint32_t method_value = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); - mirror::AbstractMethod* method = - reinterpret_cast(TraceMethodId(method_value)); - visited_methods_.insert(method); - ptr += record_size_; + uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); + mirror::AbstractMethod* method = DecodeTraceMethodId(tmid); + visited_methods->insert(method); + ptr += GetRecordSize(clock_source_); } } -void Trace::DumpMethodList(std::ostream& os) { - typedef std::set::const_iterator It; // TODO: C++0x auto - for (It it = visited_methods_.begin(); it != visited_methods_.end(); ++it) { - const mirror::AbstractMethod* method = *it; - MethodHelper mh(method); +void Trace::DumpMethodList(std::ostream& os, + const std::set& visited_methods) { + typedef std::set::const_iterator It; // TODO: C++0x auto + MethodHelper mh; + for (It it = visited_methods.begin(); it != visited_methods.end(); ++it) { + mirror::AbstractMethod* method = *it; + mh.ChangeMethod(method); os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(), mh.GetSignature().c_str(), mh.GetDeclaringClassSourceFile()); diff --git a/src/trace.h b/src/trace.h index 1be1cc4142..9432e718ff 100644 --- a/src/trace.h +++ b/src/trace.h @@ -23,6 +23,7 @@ #include "base/macros.h" #include "globals.h" +#include "instrumentation.h" #include "os.h" #include "safe_map.h" #include "UniquePtr.h" @@ -37,45 +38,65 @@ class Thread; enum ProfilerClockSource { kProfilerClockSourceThreadCpu, kProfilerClockSourceWall, - kProfilerClockSourceDual, + kProfilerClockSourceDual, // Both wall and thread CPU clocks. }; -class Trace { +class Trace : public instrumentation::InstrumentationListener { public: - enum TraceEvent { - kMethodTraceEnter = 0, - kMethodTraceExit = 1, - kMethodTraceUnwind = 2, - }; - enum TraceFlag { kTraceCountAllocs = 1, }; static void SetDefaultClockSource(ProfilerClockSource clock_source); - static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, bool direct_to_ddms); - static void Stop(); - static void Shutdown() NO_THREAD_SAFETY_ANALYSIS; // TODO: implement appropriate locking. + static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, + bool direct_to_ddms) + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_, + Locks::trace_lock_); + static void Stop() LOCKS_EXCLUDED(Locks::trace_lock_); + static void Shutdown() LOCKS_EXCLUDED(Locks::trace_lock_); + static bool IsMethodTracingActive() LOCKS_EXCLUDED(Locks::trace_lock_); bool UseWallClock(); bool UseThreadCpuClock(); - void LogMethodTraceEvent(Thread* self, const mirror::AbstractMethod* method, TraceEvent event); - + virtual void MethodEntered(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void MethodExited(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t dex_pc, + const JValue& return_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void MethodUnwind(Thread* thread, const mirror::AbstractMethod* method, uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void DexPcMoved(Thread* thread, mirror::Object* this_object, + const mirror::AbstractMethod* method, uint32_t new_dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, + mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, + mirror::Throwable* exception_object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: explicit Trace(File* trace_file, int buffer_size, int flags); - void BeginTracing(); void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void LogMethodTraceEvent(Thread* thread, const mirror::AbstractMethod* method, + instrumentation::Instrumentation::InstrumentationEvent event); + // Methods to output traced methods and threads. - void GetVisitedMethods(size_t end_offset); - void DumpMethodList(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void GetVisitedMethods(size_t end_offset, std::set* visited_methods); + void DumpMethodList(std::ostream& os, const std::set& visited_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); - // Set of methods visited by the profiler. - std::set visited_methods_; + // Singleton instance of the Trace or NULL when no method tracing is active. + static Trace* the_trace_ GUARDED_BY(Locks::trace_lock_); + + // The default profiler clock source. + static ProfilerClockSource default_clock_source_; // Maps a thread to its clock base. SafeMap thread_clock_base_map_; @@ -87,18 +108,22 @@ class Trace { UniquePtr buf_; // Flags enabling extra tracing of things such as alloc counts. - int flags_; + const int flags_; - ProfilerClockSource clock_source_; + const ProfilerClockSource clock_source_; - bool overflow_; - int buffer_size_; - uint64_t start_time_; - uint16_t trace_version_; - uint16_t record_size_; + // Size of buf_. + const int buffer_size_; + + // Time trace was created. + const uint64_t start_time_; + // Offset into buf_. volatile int32_t cur_offset_; + // Did we overflow the buffer recording traces? + bool overflow_; + DISALLOW_COPY_AND_ASSIGN(Trace); }; diff --git a/src/verifier/reg_type_cache.cc b/src/verifier/reg_type_cache.cc index c7a1b4f047..6228ba51e0 100644 --- a/src/verifier/reg_type_cache.cc +++ b/src/verifier/reg_type_cache.cc @@ -148,6 +148,10 @@ mirror::Class* RegTypeCache::ResolveClass(std::string descriptor, mirror::ClassL klass = class_linker->FindClass(descriptor.c_str(), loader); } else { klass = class_linker->LookupClass(descriptor.c_str(), loader); + if (klass != NULL && !klass->IsLoaded()) { + // We found the class but without it being loaded its not safe for use. + klass = NULL; + } } return klass; } @@ -244,12 +248,12 @@ const RegType& RegTypeCache::FromClass(mirror::Class* klass, bool precise) { RegTypeCache::~RegTypeCache() { CHECK_LE(primitive_count_, entries_.size()); // Delete only the non primitive types. - if (primitive_count_ == static_cast(entries_.size())) { + if (entries_.size() == kNumPrimitives) { // All entries are primitive, nothing to delete. return; } std::vector::iterator non_primitive_begin = entries_.begin(); - std::advance(non_primitive_begin, primitive_count_); + std::advance(non_primitive_begin, kNumPrimitives); STLDeleteContainerPointers(non_primitive_begin, entries_.end()); } @@ -267,8 +271,8 @@ void RegTypeCache::ShutDown() { FloatType::GetInstance(); DoubleLoType::Destroy(); DoubleHiType::Destroy(); - RegTypeCache::primitive_initialized_ = false; - RegTypeCache::primitive_count_ = 0; + RegTypeCache::primitive_initialized_ = false; + RegTypeCache::primitive_count_ = 0; } } diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h index cacd517028..41d3c6967d 100644 --- a/src/verifier/reg_type_cache.h +++ b/src/verifier/reg_type_cache.h @@ -35,6 +35,7 @@ namespace verifier { class RegType; +const size_t kNumPrimitives = 12; class RegTypeCache { public: explicit RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) { @@ -45,7 +46,7 @@ class RegTypeCache { if(!RegTypeCache::primitive_initialized_) { CHECK_EQ(RegTypeCache::primitive_count_, 0); CreatePrimitiveTypes(); - CHECK_EQ(RegTypeCache::primitive_count_, 12); + CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives); RegTypeCache::primitive_initialized_ = true; } } diff --git a/test/003-omnibus-opcodes/expected.txt b/test/003-omnibus-opcodes/expected.txt index c5e67e5078..a62c4986b9 100644 --- a/test/003-omnibus-opcodes/expected.txt +++ b/test/003-omnibus-opcodes/expected.txt @@ -70,14 +70,6 @@ Throw.rethrow UnresTest1... UnresTest1... UnresTest2... -java.lang.NoClassDefFoundError: Failed resolution of: LUnresClass; - at UnresTest2.run(UnresTest2.java:33) - at Main.run(Main.java:64) - at Main.main(Main.java:26) -java.lang.NoClassDefFoundError: Failed resolution of: LUnresClassSubclass; - at UnresTest2.run(UnresTest2.java:41) - at Main.run(Main.java:64) - at Main.main(Main.java:26) UnresTest2 done InternedString.run Done! diff --git a/test/003-omnibus-opcodes/src/UnresTest2.java b/test/003-omnibus-opcodes/src/UnresTest2.java index c94f2265c9..4135d73f7c 100644 --- a/test/003-omnibus-opcodes/src/UnresTest2.java +++ b/test/003-omnibus-opcodes/src/UnresTest2.java @@ -33,22 +33,23 @@ class UnresTest2 { un = new UnresClass(); Main.assertTrue(false); } catch (NoClassDefFoundError ncdfe) { - ncdfe.printStackTrace(); + Main.assertTrue(ncdfe.getCause() instanceof ClassNotFoundException); // good } try { - new UnresClassSubclass(); - Main.assertTrue(false); + new UnresClassSubclass(); + Main.assertTrue(false); } catch (NoClassDefFoundError ncdfe) { - ncdfe.printStackTrace(); - // good + Main.assertTrue(ncdfe.getCause() instanceof ClassNotFoundException); + // good } try { UnresClass[] uar = new UnresClass[3]; Main.assertTrue(false); } catch (NoClassDefFoundError ncdfe) { + Main.assertTrue(ncdfe.getCause() instanceof ClassNotFoundException); // good } diff --git a/test/044-proxy/src/ReturnsAndArgPassing.java b/test/044-proxy/src/ReturnsAndArgPassing.java index 50eff77066..a1734100bc 100644 --- a/test/044-proxy/src/ReturnsAndArgPassing.java +++ b/test/044-proxy/src/ReturnsAndArgPassing.java @@ -51,6 +51,8 @@ public class ReturnsAndArgPassing { static int barInvocations = 0; static class MyInvocationHandler implements InvocationHandler { + boolean causeNpeOnReturn = false; + Class returnType = null; public Object invoke(Object proxy, Method method, Object[] args) { check(proxy instanceof Proxy); check(method.getDeclaringClass() == MyInterface.class); @@ -62,30 +64,29 @@ public class ReturnsAndArgPassing { check(args == null); barInvocations++; } - if (name.equals("voidFoo")) { return null; } - else if (name.equals("voidBar")) { return null; } - else if (name.equals("booleanFoo")) { return true; } - else if (name.equals("booleanBar")) { return false; } - else if (name.equals("byteFoo")) { return Byte.MAX_VALUE; } - else if (name.equals("byteBar")) { return Byte.MIN_VALUE; } - else if (name.equals("charFoo")) { return Character.MAX_VALUE; } - else if (name.equals("charBar")) { return Character.MIN_VALUE; } - else if (name.equals("shortFoo")) { return Short.MAX_VALUE; } - else if (name.equals("shortBar")) { return Short.MIN_VALUE; } - else if (name.equals("intFoo")) { return Integer.MAX_VALUE; } - else if (name.equals("intBar")) { return Integer.MIN_VALUE; } - else if (name.equals("longFoo")) { return Long.MAX_VALUE; } - else if (name.equals("longBar")) { return Long.MIN_VALUE; } - else if (name.equals("floatFoo")) { return Float.MAX_VALUE; } - else if (name.equals("floatBar")) { return Float.MIN_VALUE; } - else if (name.equals("doubleFoo")) { return Double.MAX_VALUE; } - else if (name.equals("doubleBar")) { return Double.MIN_VALUE; } - else if (name.equals("selectArg")) { + if (causeNpeOnReturn) { + return null; + } else if (name.equals("voidFoo") || name.equals("voidBar")) { + return null; + } else if (name.equals("booleanFoo")) { + return true; + } else if (name.equals("booleanBar")) { + return false; + } else if (name.equals("selectArg")) { check(args.length == 6); int select = (Integer)args[0]; return args[select]; } else { - throw new AssertionError("Unexpect method " + method); + try { + if (name.endsWith("Foo")) { + return returnType.getField("MAX_VALUE").get(null); + } else { + check(name.endsWith("Bar")); + return returnType.getField("MIN_VALUE").get(null); + } + } catch (Exception e) { + throw new Error("return type = " + returnType, e); + } } } } @@ -106,6 +107,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 1); check(fooInvocations == 1); + myHandler.returnType = Boolean.class; check(proxyMyInterface.booleanFoo() == true); check(fooInvocations == 2); @@ -114,6 +116,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 2); check(fooInvocations == 2); + myHandler.returnType = Byte.class; check(proxyMyInterface.byteFoo() == Byte.MAX_VALUE); check(fooInvocations == 3); @@ -122,6 +125,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 3); check(fooInvocations == 3); + myHandler.returnType = Character.class; check(proxyMyInterface.charFoo() == Character.MAX_VALUE); check(fooInvocations == 4); @@ -130,6 +134,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 4); check(fooInvocations == 4); + myHandler.returnType = Short.class; check(proxyMyInterface.shortFoo() == Short.MAX_VALUE); check(fooInvocations == 5); @@ -138,6 +143,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 5); check(fooInvocations == 5); + myHandler.returnType = Integer.class; check(proxyMyInterface.intFoo() == Integer.MAX_VALUE); check(fooInvocations == 6); @@ -146,6 +152,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 6); check(fooInvocations == 6); + myHandler.returnType = Long.class; check(proxyMyInterface.longFoo() == Long.MAX_VALUE); check(fooInvocations == 7); @@ -154,6 +161,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 7); check(fooInvocations == 7); + myHandler.returnType = Float.class; check(proxyMyInterface.floatFoo() == Float.MAX_VALUE); check(fooInvocations == 8); @@ -162,6 +170,7 @@ public class ReturnsAndArgPassing { check(barInvocations == 8); check(fooInvocations == 8); + myHandler.returnType = Double.class; check(proxyMyInterface.doubleFoo() == Double.MAX_VALUE); check(fooInvocations == 9); @@ -169,6 +178,259 @@ public class ReturnsAndArgPassing { check(proxyMyInterface.doubleBar() == Double.MIN_VALUE); check(barInvocations == 9); + // Toggle flag to get return values to cause NPEs + myHandler.causeNpeOnReturn = true; + + check(fooInvocations == 9); + try { + proxyMyInterface.booleanFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 10); + + check(barInvocations == 9); + try { + proxyMyInterface.booleanBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 10); + + check(fooInvocations == 10); + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 11); + + check(barInvocations == 10); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 11); + + check(fooInvocations == 11); + try { + proxyMyInterface.charFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 12); + + check(barInvocations == 11); + try { + proxyMyInterface.charBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 12); + + check(fooInvocations == 12); + try { + proxyMyInterface.shortFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 13); + + check(barInvocations == 12); + try { + proxyMyInterface.shortBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 13); + + check(fooInvocations == 13); + try { + proxyMyInterface.intFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 14); + + check(barInvocations == 13); + try { + proxyMyInterface.intBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 14); + + check(fooInvocations == 14); + try { + proxyMyInterface.longFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 15); + + check(barInvocations == 14); + try { + proxyMyInterface.longBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 15); + + check(fooInvocations == 15); + try { + proxyMyInterface.floatFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 16); + + check(barInvocations == 15); + try { + proxyMyInterface.floatBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 16); + + check(fooInvocations == 16); + try { + proxyMyInterface.doubleFoo(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(fooInvocations == 17); + + check(barInvocations == 16); + try { + proxyMyInterface.doubleBar(); + throw new AssertionError("Expected NPE"); + } catch (NullPointerException e) { + } + check(barInvocations == 17); + + // Toggle flag to stop NPEs + myHandler.causeNpeOnReturn = false; + + check(fooInvocations == 17); + myHandler.returnType = Double.class; // Double -> byte == fail + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 18); + + check(barInvocations == 17); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 18); + + check(fooInvocations == 18); + myHandler.returnType = Float.class; // Float -> byte == fail + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 19); + + check(barInvocations == 18); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 19); + + check(fooInvocations == 19); + myHandler.returnType = Long.class; // Long -> byte == fail + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 20); + + check(barInvocations == 19); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 20); + + check(fooInvocations == 20); + myHandler.returnType = Integer.class; // Int -> byte == fail + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 21); + + check(barInvocations == 20); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 21); + + check(fooInvocations == 21); + myHandler.returnType = Short.class; // Short -> byte == fail + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 22); + + check(barInvocations == 21); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 22); + + check(fooInvocations == 22); + myHandler.returnType = Character.class; // Char -> byte == fail + try { + proxyMyInterface.byteFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 23); + + check(barInvocations == 22); + try { + proxyMyInterface.byteBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 23); + + check(fooInvocations == 23); + myHandler.returnType = Character.class; // Char -> short == fail + try { + proxyMyInterface.shortFoo(); + throw new AssertionError("Expected ClassCastException"); + } catch (ClassCastException e) { + } + check(fooInvocations == 24); + + check(barInvocations == 23); + try { + proxyMyInterface.shortBar(); + throw new AssertionError("Expected NPE"); + } catch (ClassCastException e) { + } + check(barInvocations == 24); + System.out.println(testName + ".testProxyReturns PASSED"); } diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt index 9417174f18..0c567d49c5 100644 --- a/test/100-reflect2/expected.txt +++ b/test/100-reflect2/expected.txt @@ -22,7 +22,7 @@ z 30 62 14 -java.lang.IllegalArgumentException: invalid primitive conversion from int to short +java.lang.IllegalArgumentException: Invalid primitive conversion from int to short at java.lang.reflect.Field.set(Native Method) at Main.testFieldReflection(Main.java:121) at Main.main(Main.java:269) diff --git a/test/201-built-in-exception-detail-messages/src/Main.java b/test/201-built-in-exception-detail-messages/src/Main.java index 9b67db60fb..f8da6446cf 100644 --- a/test/201-built-in-exception-detail-messages/src/Main.java +++ b/test/201-built-in-exception-detail-messages/src/Main.java @@ -393,7 +393,7 @@ public class Main { m.invoke("hello"); // Wrong number of arguments. fail(); } catch (IllegalArgumentException iae) { - assertEquals("wrong number of arguments; expected 1, got 0", iae.getMessage()); + assertEquals("Wrong number of arguments; expected 1, got 0", iae.getMessage()); } try { Method m = String.class.getMethod("charAt", int.class); @@ -414,14 +414,14 @@ public class Main { m.invoke(new Integer(5)); // Wrong type for 'this'. fail(); } catch (IllegalArgumentException iae) { - assertEquals("expected receiver of type java.lang.String, but got java.lang.Integer", iae.getMessage()); + assertEquals("Expected receiver of type java.lang.String, but got java.lang.Integer", iae.getMessage()); } try { Method m = String.class.getMethod("charAt", int.class); m.invoke(null); // Null for 'this'. fail(); } catch (NullPointerException npe) { - assertEquals("expected receiver of type java.lang.String, but got null", npe.getMessage()); + assertEquals("null receiver", npe.getMessage()); } } diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 92cfa99b85..4b472daa5e 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -21,6 +21,7 @@ #include "gc_map.h" #include "mirror/abstract_method.h" #include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "object_utils.h" -- cgit v1.2.3-59-g8ed1b