summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/intrinsics.cc31
-rw-r--r--compiler/optimizing/register_allocator.cc18
-rw-r--r--runtime/asm_support.h6
-rw-r--r--runtime/check_jni.cc186
-rw-r--r--runtime/class_linker.cc38
-rw-r--r--runtime/class_linker_test.cc1
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h13
-rw-r--r--runtime/gc/collector/mark_sweep.cc23
-rw-r--r--runtime/gc/collector/mark_sweep.h13
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/mirror/class-inl.h13
-rw-r--r--runtime/mirror/class.cc1
-rw-r--r--runtime/mirror/class.h33
-rw-r--r--runtime/mirror/class_flags.h56
-rw-r--r--runtime/mirror/object-inl.h48
-rw-r--r--runtime/mirror/string.cc1
-rw-r--r--runtime/mirror/string.h7
-rw-r--r--runtime/modifiers.h24
-rw-r--r--runtime/verifier/method_verifier.cc26
-rw-r--r--runtime/verifier/register_line.h8
-rw-r--r--test/529-long-split/expected.txt0
-rw-r--r--test/529-long-split/info.txt2
-rw-r--r--test/529-long-split/src/Main.java185
-rw-r--r--test/800-smali/expected.txt3
-rw-r--r--test/800-smali/smali/b_17978759.smali28
-rw-r--r--test/800-smali/smali/b_23300986.smali23
-rw-r--r--test/800-smali/src/Main.java6
-rwxr-xr-xtools/setup-buildbot-device.sh3
29 files changed, 665 insertions, 135 deletions
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 075ec1ee2e..feaaaf45ec 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -16,12 +16,16 @@
#include "intrinsics.h"
+#include "art_method.h"
+#include "class_linker.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_driver.h"
#include "invoke_type.h"
+#include "mirror/dex_cache-inl.h"
#include "nodes.h"
#include "quick/inline_method_analyser.h"
+#include "scoped_thread_state_change.h"
#include "utils.h"
namespace art {
@@ -364,17 +368,34 @@ void IntrinsicsRecognizer::Run() {
if (inst->IsInvoke()) {
HInvoke* invoke = inst->AsInvoke();
InlineMethod method;
- DexFileMethodInliner* inliner =
- driver_->GetMethodInlinerMap()->GetMethodInliner(&invoke->GetDexFile());
+ const DexFile& dex_file = invoke->GetDexFile();
+ DexFileMethodInliner* inliner = driver_->GetMethodInlinerMap()->GetMethodInliner(&dex_file);
DCHECK(inliner != nullptr);
if (inliner->IsIntrinsic(invoke->GetDexMethodIndex(), &method)) {
Intrinsics intrinsic = GetIntrinsic(method, graph_->GetInstructionSet());
if (intrinsic != Intrinsics::kNone) {
if (!CheckInvokeType(intrinsic, invoke)) {
- LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
- << intrinsic << " for "
- << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile());
+ // We might be in a situation where we have inlined a method that calls an intrinsic,
+ // but that method is in a different dex file on which we do not have a
+ // verified_method that would have helped the compiler driver sharpen the call.
+ // We can still ensure the invoke types match by checking whether the called method
+ // is final or is in a final class.
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* art_method = class_linker->FindDexCache(dex_file)->GetResolvedMethod(
+ invoke->GetDexMethodIndex(), class_linker->GetImagePointerSize());
+ DCHECK(art_method != nullptr);
+ if (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal()) {
+ invoke->SetIntrinsic(intrinsic, NeedsEnvironmentOrCache(intrinsic));
+ } else {
+ LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
+ << intrinsic << " for "
+ << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile())
+ << invoke->DebugName();
+ }
+ }
} else {
invoke->SetIntrinsic(intrinsic, NeedsEnvironmentOrCache(intrinsic));
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 9f32a9eaf8..37c8bc5f51 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -587,9 +587,17 @@ void RegisterAllocator::LinearScan() {
while (!unhandled_->IsEmpty()) {
// (1) Remove interval with the lowest start position from unhandled.
LiveInterval* current = unhandled_->Pop();
+
+ // Make sure the interval is an expected state.
DCHECK(!current->IsFixed() && !current->HasSpillSlot());
+ // Make sure we are going in the right order.
DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
+ // Make sure a low interval is always with a high.
DCHECK(!current->IsLowInterval() || unhandled_->Peek()->IsHighInterval());
+ // Make sure a high interval is always with a low.
+ DCHECK(current->IsLowInterval() ||
+ unhandled_->IsEmpty() ||
+ !unhandled_->Peek()->IsHighInterval());
size_t position = current->GetStart();
@@ -916,13 +924,19 @@ bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position
if (active->IsHighInterval()) continue;
if (first_register_use > next_use[active->GetRegister()]) continue;
- // Split the first interval found.
- if (!active->IsLowInterval() || IsLowOfUnalignedPairInterval(active)) {
+ // Split the first interval found that is either:
+ // 1) A non-pair interval.
+ // 2) A pair interval whose high is not low + 1.
+ // 3) A pair interval whose low is not even.
+ if (!active->IsLowInterval() ||
+ IsLowOfUnalignedPairInterval(active) ||
+ !IsLowRegister(active->GetRegister())) {
LiveInterval* split = Split(active, position);
active_.DeleteAt(i);
if (split != active) {
handled_.Add(active);
}
+ PotentiallyRemoveOtherHalf(active, &active_, i);
AddSorted(unhandled_, split);
return true;
}
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 35acd424ba..084c88e239 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -141,10 +141,10 @@ ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (36 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
@@ -153,7 +153,7 @@ ADD_TEST_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED),
static_cast<uint32_t>(art::mirror::Class::kStatusInitialized))
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
ADD_TEST_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE),
- static_cast<uint32_t>(kAccClassIsFinalizable))
+ static_cast<uint32_t>(art::kAccClassIsFinalizable))
// Array offsets.
#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 4172b89244..b6ad5473ff 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -66,6 +66,8 @@ namespace art {
#define kFlag_Invocation 0x8000 // Part of the invocation interface (JavaVM*).
#define kFlag_ForceTrace 0x80000000 // Add this to a JNI function's flags if you want to trace every call.
+
+class VarArgs;
/*
* Java primitive types:
* B - jbyte
@@ -126,6 +128,116 @@ union JniValueType {
jshort S;
const void* V; // void
jboolean Z;
+ const VarArgs* va;
+};
+
+/*
+ * A structure containing all the information needed to validate varargs arguments.
+ *
+ * Note that actually getting the arguments from this structure mutates it so should only be done on
+ * owned copies.
+ */
+class VarArgs {
+ public:
+ VarArgs(jmethodID m, va_list var) : m_(m), type_(kTypeVaList), cnt_(0) {
+ va_copy(vargs_, var);
+ }
+
+ VarArgs(jmethodID m, const jvalue* vals) : m_(m), type_(kTypePtr), cnt_(0), ptr_(vals) {}
+
+ ~VarArgs() {
+ if (type_ == kTypeVaList) {
+ va_end(vargs_);
+ }
+ }
+
+ VarArgs(VarArgs&& other) {
+ m_ = other.m_;
+ cnt_ = other.cnt_;
+ type_ = other.type_;
+ if (other.type_ == kTypeVaList) {
+ va_copy(vargs_, other.vargs_);
+ } else {
+ ptr_ = other.ptr_;
+ }
+ }
+
+ // This method is const because we need to ensure that one only uses the GetValue method on an
+ // owned copy of the VarArgs. This is because getting the next argument from a va_list is a
+ // mutating operation. Therefore we pass around these VarArgs with the 'const' qualifier and when
+ // we want to use one we need to Clone() it.
+ VarArgs Clone() const {
+ if (type_ == kTypeVaList) {
+ // const_cast needed to make sure the compiler is okay with va_copy, which (being a macro) is
+ // messed up if the source argument is not the exact type 'va_list'.
+ return VarArgs(m_, cnt_, const_cast<VarArgs*>(this)->vargs_);
+ } else {
+ return VarArgs(m_, cnt_, ptr_);
+ }
+ }
+
+ jmethodID GetMethodID() const {
+ return m_;
+ }
+
+ JniValueType GetValue(char fmt) {
+ JniValueType o;
+ if (type_ == kTypeVaList) {
+ switch (fmt) {
+ case 'Z': o.Z = static_cast<jboolean>(va_arg(vargs_, jint)); break;
+ case 'B': o.B = static_cast<jbyte>(va_arg(vargs_, jint)); break;
+ case 'C': o.C = static_cast<jchar>(va_arg(vargs_, jint)); break;
+ case 'S': o.S = static_cast<jshort>(va_arg(vargs_, jint)); break;
+ case 'I': o.I = va_arg(vargs_, jint); break;
+ case 'J': o.J = va_arg(vargs_, jlong); break;
+ case 'F': o.F = static_cast<jfloat>(va_arg(vargs_, jdouble)); break;
+ case 'D': o.D = va_arg(vargs_, jdouble); break;
+ case 'L': o.L = va_arg(vargs_, jobject); break;
+ default:
+ LOG(FATAL) << "Illegal type format char " << fmt;
+ UNREACHABLE();
+ }
+ } else {
+ CHECK(type_ == kTypePtr);
+ jvalue v = ptr_[cnt_];
+ cnt_++;
+ switch (fmt) {
+ case 'Z': o.Z = v.z; break;
+ case 'B': o.B = v.b; break;
+ case 'C': o.C = v.c; break;
+ case 'S': o.S = v.s; break;
+ case 'I': o.I = v.i; break;
+ case 'J': o.J = v.j; break;
+ case 'F': o.F = v.f; break;
+ case 'D': o.D = v.d; break;
+ case 'L': o.L = v.l; break;
+ default:
+ LOG(FATAL) << "Illegal type format char " << fmt;
+ UNREACHABLE();
+ }
+ }
+ return o;
+ }
+
+ private:
+ VarArgs(jmethodID m, uint32_t cnt, va_list var) : m_(m), type_(kTypeVaList), cnt_(cnt) {
+ va_copy(vargs_, var);
+ }
+
+ VarArgs(jmethodID m, uint32_t cnt, const jvalue* vals) : m_(m), type_(kTypePtr), cnt_(cnt), ptr_(vals) {}
+
+ enum VarArgsType {
+ kTypeVaList,
+ kTypePtr,
+ };
+
+ jmethodID m_;
+ VarArgsType type_;
+ uint32_t cnt_;
+ union {
+ va_list vargs_;
+ const jvalue* ptr_;
+ };
};
class ScopedCheck {
@@ -339,7 +451,7 @@ class ScopedCheck {
* z - jsize (for lengths; use i if negative values are okay)
* v - JavaVM*
* E - JNIEnv*
- * . - no argument; just print "..." (used for varargs JNI calls)
+ * . - VarArgs* for Jni calls with variable length arguments
*
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
@@ -736,11 +848,35 @@ class ScopedCheck {
return CheckThread(arg.E);
case 'L': // jobject
return CheckInstance(soa, kObject, arg.L, true);
+ case '.': // A VarArgs list
+ return CheckVarArgs(soa, arg.va);
default:
return CheckNonHeapValue(fmt, arg);
}
}
+ bool CheckVarArgs(ScopedObjectAccess& soa, const VarArgs* args_p)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ CHECK(args_p != nullptr);
+ VarArgs args(args_p->Clone());
+ ArtMethod* m = CheckMethodID(soa, args.GetMethodID());
+ if (m == nullptr) {
+ return false;
+ }
+ uint32_t len = 0;
+ const char* shorty = m->GetShorty(&len);
+ // Skip the return type
+ CHECK_GE(len, 1u);
+ len--;
+ shorty++;
+ for (uint32_t i = 0; i < len; i++) {
+ if (!CheckPossibleHeapValue(soa, shorty[i], args.GetValue(shorty[i]))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
bool CheckNonHeapValue(char fmt, JniValueType arg) {
switch (fmt) {
case 'p': // TODO: pointer - null or readable?
@@ -833,6 +969,24 @@ class ScopedCheck {
}
break;
}
+ case '.': {
+ const VarArgs* va = arg.va;
+ VarArgs args(va->Clone());
+ ArtMethod* m = soa.DecodeMethod(args.GetMethodID());
+ uint32_t len;
+ const char* shorty = m->GetShorty(&len);
+ CHECK_GE(len, 1u);
+ // Skip past return value.
+ len--;
+ shorty++;
+ // Remove the previous ', ' from the message.
+ msg->erase(msg->length() - 2);
+ for (uint32_t i = 0; i < len; i++) {
+ *msg += ", ";
+ TracePossibleHeapValue(soa, entry, shorty[i], args.GetValue(shorty[i]), msg);
+ }
+ break;
+ }
default:
TraceNonHeapValue(fmt, arg, msg);
break;
@@ -1836,8 +1990,9 @@ class CheckJNI {
static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, __FUNCTION__);
- JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
- if (sc.Check(soa, true, "Ecm", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ VarArgs rest(mid, vargs);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}};
+ if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
sc.CheckConstructor(soa, mid)) {
JniValueType result;
result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
@@ -1859,8 +2014,9 @@ class CheckJNI {
static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, __FUNCTION__);
- JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
- if (sc.Check(soa, true, "Ecm", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ VarArgs rest(mid, vargs);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}};
+ if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
sc.CheckConstructor(soa, mid)) {
JniValueType result;
result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
@@ -2689,25 +2845,25 @@ class CheckJNI {
}
static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
- jclass c, jmethodID mid, InvokeType invoke)
+ jclass c, jmethodID mid, InvokeType invoke, const VarArgs* vargs)
SHARED_REQUIRES(Locks::mutator_lock_) {
bool checked;
switch (invoke) {
case kVirtual: {
DCHECK(c == nullptr);
- JniValueType args[3] = {{.E = env}, {.L = obj}, {.m = mid}};
- checked = sc.Check(soa, true, "ELm", args);
+ JniValueType args[4] = {{.E = env}, {.L = obj}, {.m = mid}, {.va = vargs}};
+ checked = sc.Check(soa, true, "ELm.", args);
break;
}
case kDirect: {
- JniValueType args[4] = {{.E = env}, {.L = obj}, {.c = c}, {.m = mid}};
- checked = sc.Check(soa, true, "ELcm", args);
+ JniValueType args[5] = {{.E = env}, {.L = obj}, {.c = c}, {.m = mid}, {.va = vargs}};
+ checked = sc.Check(soa, true, "ELcm.", args);
break;
}
case kStatic: {
DCHECK(obj == nullptr);
- JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
- checked = sc.Check(soa, true, "Ecm", args);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = vargs}};
+ checked = sc.Check(soa, true, "Ecm.", args);
break;
}
default:
@@ -2724,7 +2880,8 @@ class CheckJNI {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, function_name);
JniValueType result;
- if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+ VarArgs rest(mid, vargs);
+ if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke, &rest) &&
sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
const char* result_check;
switch (type) {
@@ -2907,7 +3064,8 @@ class CheckJNI {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, function_name);
JniValueType result;
- if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+ VarArgs rest(mid, vargs);
+ if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke, &rest) &&
sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
const char* result_check;
switch (type) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c179c64491..b547d079cd 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -358,9 +358,9 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Setup String.
Handle<mirror::Class> java_lang_String(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
+ java_lang_String->SetStringClass();
mirror::String::SetClass(java_lang_String.Get());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self);
- java_lang_String->SetStringClass();
// Setup java.lang.ref.Reference.
Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
@@ -570,16 +570,13 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
CHECK_EQ(java_lang_ref_Reference->GetClassSize(),
mirror::Reference::ClassSize(image_pointer_size_));
class_root = FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsFinalizerReference);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagFinalizerReference);
class_root = FindSystemClass(self, "Ljava/lang/ref/PhantomReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
- kAccClassIsPhantomReference);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagPhantomReference);
class_root = FindSystemClass(self, "Ljava/lang/ref/SoftReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagSoftReference);
class_root = FindSystemClass(self, "Ljava/lang/ref/WeakReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
- kAccClassIsWeakReference);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagWeakReference);
// Setup the ClassLoader, verifying the object_size_.
class_root = FindSystemClass(self, "Ljava/lang/ClassLoader;");
@@ -2701,6 +2698,11 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class->SetVTable(java_lang_Object->GetVTable());
new_class->SetPrimitiveType(Primitive::kPrimNot);
new_class->SetClassLoader(component_type->GetClassLoader());
+ if (component_type->IsPrimitive()) {
+ new_class->SetClassFlags(mirror::kClassFlagNoReferenceFields);
+ } else {
+ new_class->SetClassFlags(mirror::kClassFlagObjectArray);
+ }
mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self);
{
ArtMethod* imt[mirror::Class::kImtSize];
@@ -4385,9 +4387,9 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
}
// Inherit reference flags (if any) from the superclass.
- int reference_flags = (super->GetAccessFlags() & kAccReferenceFlagsMask);
+ int reference_flags = (super->GetClassFlags() & mirror::kClassFlagReference);
if (reference_flags != 0) {
- klass->SetAccessFlags(klass->GetAccessFlags() | reference_flags);
+ klass->SetClassFlags(klass->GetClassFlags() | reference_flags);
}
// Disallow custom direct subclasses of java.lang.ref.Reference.
if (init_done_ && super == GetClassRoot(kJavaLangRefReference)) {
@@ -5227,6 +5229,22 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
*class_size = size;
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (num_reference_fields == 0 || super_class == nullptr) {
+ // object has one reference field, klass, but we ignore it since we always visit the class.
+ // If the super_class is null then we are java.lang.Object.
+ if (super_class == nullptr ||
+ (super_class->GetClassFlags() & mirror::kClassFlagNoReferenceFields) != 0) {
+ klass->SetClassFlags(klass->GetClassFlags() | mirror::kClassFlagNoReferenceFields);
+ } else if (kIsDebugBuild) {
+ size_t total_reference_instance_fields = 0;
+ while (super_class != nullptr) {
+ total_reference_instance_fields += super_class->NumReferenceInstanceFields();
+ super_class = super_class->GetSuperClass();
+ }
+ CHECK_GT(total_reference_instance_fields, 1u);
+ }
+ }
if (!klass->IsVariableSize()) {
std::string temp;
DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 3c84d8fc0a..0d1c875fdf 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -488,6 +488,7 @@ struct ObjectOffsets : public CheckOffsets<mirror::Object> {
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, class_flags_), "classFlags");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize");
addOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId");
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index a3cc83132f..56edcc9d09 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -35,10 +35,17 @@ inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& v
obj->VisitReferences(visitor, ref_visitor);
if (kCountScannedTypes) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
- if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
+ uint32_t class_flags = klass->GetClassFlags();
+ if ((class_flags & mirror::kClassFlagNoReferenceFields) != 0) {
+ ++no_reference_class_count_;
+ } else if (class_flags == mirror::kClassFlagNormal) {
+ ++normal_count_;
+ } else if (class_flags == mirror::kClassFlagObjectArray) {
+ ++object_array_count_;
+ } else if (class_flags == mirror::kClassFlagClass) {
++class_count_;
- } else if (UNLIKELY(klass->IsArrayClass<kVerifyNone>())) {
- ++array_count_;
+ } else if ((class_flags & mirror::kClassFlagReference) != 0) {
+ ++reference_count_;
} else {
++other_count_;
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b0a8a5bf2b..7ddc7ccc63 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -70,7 +70,6 @@ static constexpr bool kParallelProcessMarkStack = true;
static constexpr bool kProfileLargeObjects = false;
static constexpr bool kMeasureOverhead = false;
static constexpr bool kCountTasks = false;
-static constexpr bool kCountJavaLangRefs = false;
static constexpr bool kCountMarkedObjects = false;
// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
@@ -114,15 +113,17 @@ void MarkSweep::InitializePhase() {
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
+ no_reference_class_count_.StoreRelaxed(0);
+ normal_count_.StoreRelaxed(0);
class_count_.StoreRelaxed(0);
- array_count_.StoreRelaxed(0);
+ object_array_count_.StoreRelaxed(0);
other_count_.StoreRelaxed(0);
+ reference_count_.StoreRelaxed(0);
large_object_test_.StoreRelaxed(0);
large_object_mark_.StoreRelaxed(0);
overhead_time_ .StoreRelaxed(0);
work_chunks_created_.StoreRelaxed(0);
work_chunks_deleted_.StoreRelaxed(0);
- reference_count_.StoreRelaxed(0);
mark_null_count_.StoreRelaxed(0);
mark_immune_count_.StoreRelaxed(0);
mark_fastpath_count_.StoreRelaxed(0);
@@ -1265,9 +1266,6 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
- if (kCountJavaLangRefs) {
- ++reference_count_;
- }
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
@@ -1386,8 +1384,14 @@ inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
void MarkSweep::FinishPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
if (kCountScannedTypes) {
- VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
- << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
+ VLOG(gc)
+ << "MarkSweep scanned"
+ << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
+ << " normal objects=" << normal_count_.LoadRelaxed()
+ << " classes=" << class_count_.LoadRelaxed()
+ << " object arrays=" << object_array_count_.LoadRelaxed()
+ << " references=" << reference_count_.LoadRelaxed()
+ << " other=" << other_count_.LoadRelaxed();
}
if (kCountTasks) {
VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
@@ -1399,9 +1403,6 @@ void MarkSweep::FinishPhase() {
VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
<< " marked " << large_object_mark_.LoadRelaxed();
}
- if (kCountJavaLangRefs) {
- VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
- }
if (kCountMarkedObjects) {
VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
<< " immune=" << mark_immune_count_.LoadRelaxed()
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 8bd1dc7cd5..371bba531d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -245,7 +245,7 @@ class MarkSweep : public GarbageCollector {
void RevokeAllThreadLocalBuffers();
// Whether or not we count how many of each type of object were scanned.
- static const bool kCountScannedTypes = false;
+ static constexpr bool kCountScannedTypes = false;
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
@@ -260,18 +260,23 @@ class MarkSweep : public GarbageCollector {
// Parallel finger.
AtomicInteger atomic_finger_;
+
+ AtomicInteger no_reference_class_count_;
+ AtomicInteger normal_count_;
// Number of classes scanned, if kCountScannedTypes.
AtomicInteger class_count_;
- // Number of arrays scanned, if kCountScannedTypes.
- AtomicInteger array_count_;
+ // Number of object arrays scanned, if kCountScannedTypes.
+ AtomicInteger object_array_count_;
// Number of non-class/arrays scanned, if kCountScannedTypes.
AtomicInteger other_count_;
+ // Number of java.lang.ref.Reference instances.
+ AtomicInteger reference_count_;
+
AtomicInteger large_object_test_;
AtomicInteger large_object_mark_;
AtomicInteger overhead_time_;
AtomicInteger work_chunks_created_;
AtomicInteger work_chunks_deleted_;
- AtomicInteger reference_count_;
AtomicInteger mark_null_count_;
AtomicInteger mark_immune_count_;
AtomicInteger mark_fastpath_count_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d7f918b4ff..b8c44781a3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3707,7 +3707,7 @@ void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count));
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 2586959e55..8df17c6929 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '9', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '0', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index cd678f670b..b2c6e4da12 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -547,6 +547,7 @@ inline uint32_t Class::GetAccessFlags() {
inline String* Class::GetName() {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
+
inline void Class::SetName(String* name) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name);
@@ -784,9 +785,17 @@ inline void Class::InitializeClassVisitor::operator()(
inline void Class::SetAccessFlags(uint32_t new_access_flags) {
// Called inside a transaction when setting pre-verified flag during boot image compilation.
if (Runtime::Current()->IsActiveTransaction()) {
- SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
+ SetField32<true>(AccessFlagsOffset(), new_access_flags);
+ } else {
+ SetField32<false>(AccessFlagsOffset(), new_access_flags);
+ }
+}
+
+inline void Class::SetClassFlags(uint32_t new_flags) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
} else {
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 055b3e5110..228fd27f28 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -44,6 +44,7 @@ void Class::SetClassClass(Class* java_lang_Class) {
<< java_lang_Class_.Read()
<< " " << java_lang_Class;
CHECK(java_lang_Class != nullptr);
+ java_lang_Class->SetClassFlags(java_lang_Class->GetClassFlags() | mirror::kClassFlagClass);
java_lang_Class_ = GcRoot<Class>(java_lang_Class);
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 3f375be9ca..cbcb517001 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -19,6 +19,7 @@
#include "base/iteration_range.h"
#include "dex_file.h"
+#include "class_flags.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
#include "invoke_type.h"
@@ -201,6 +202,12 @@ class MANAGED Class FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE uint32_t GetClassFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_));
+ }
+ void SetClassFlags(uint32_t new_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+
void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class is an interface.
@@ -228,21 +235,19 @@ class MANAGED Class FINAL : public Object {
}
ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0;
+ return (GetClassFlags() & kClassFlagString) != 0;
}
ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
- SetAccessFlags(flags | kAccClassIsStringClass);
+ SetClassFlags(GetClassFlags() | kClassFlagString | kClassFlagNoReferenceFields);
}
ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetField32(AccessFlagsOffset()) & kAccClassIsClassLoaderClass) != 0;
+ return (GetClassFlags() & kClassFlagClassLoader) != 0;
}
ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
- SetAccessFlags(flags | kAccClassIsClassLoaderClass);
+ SetClassFlags(GetClassFlags() | kClassFlagClassLoader);
}
// Returns true if the class is abstract.
@@ -272,27 +277,27 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagWeakReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagSoftReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagFinalizerReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagPhantomReference) != 0;
}
// Can references of this type be assigned to by things of another type? For non-array types
@@ -862,7 +867,8 @@ class MANAGED Class FINAL : public Object {
uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
- // Returns the number of instance fields containing reference types.
+ // Returns the number of instance fields containing reference types not counting fields in the
+ // super class.
uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
@@ -1225,6 +1231,9 @@ class MANAGED Class FINAL : public Object {
// length-prefixed array.
uint64_t virtual_methods_;
+ // Class flags to help speed up visiting object references.
+ uint32_t class_flags_;
+
// Total size of the Class instance; used when allocating storage on gc heap.
// See also object_size_.
uint32_t class_size_;
diff --git a/runtime/mirror/class_flags.h b/runtime/mirror/class_flags.h
new file mode 100644
index 0000000000..6c15639177
--- /dev/null
+++ b/runtime/mirror/class_flags.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_FLAGS_H_
+#define ART_RUNTIME_MIRROR_CLASS_FLAGS_H_
+
+#include <stdint.h>
+
+namespace art {
+namespace mirror {
+
+// Object types stored in class to help GC with faster object marking.
+static constexpr uint32_t kClassFlagNormal = 0x00000000;
+// Only normal objects which have no reference fields, e.g. string or primitive array or normal
+// class instance.
+static constexpr uint32_t kClassFlagNoReferenceFields = 0x00000001;
+static constexpr uint32_t kClassFlagString = 0x00000004;
+static constexpr uint32_t kClassFlagObjectArray = 0x00000008;
+static constexpr uint32_t kClassFlagClass = 0x00000010;
+
+// class is ClassLoader or one of its subclasses
+static constexpr uint32_t kClassFlagClassLoader = 0x00000020;
+
+// class is a soft/weak/phantom ref
+static constexpr uint32_t kClassFlagSoftReference = 0x00000040;
+// class is a weak reference
+static constexpr uint32_t kClassFlagWeakReference = 0x00000080;
+// class is a finalizer reference
+static constexpr uint32_t kClassFlagFinalizerReference = 0x00000100;
+// class is a phantom reference
+static constexpr uint32_t kClassFlagPhantomReference = 0x00000200;
+
+static constexpr uint32_t kClassFlagReference =
+ kClassFlagSoftReference |
+ kClassFlagWeakReference |
+ kClassFlagFinalizerReference |
+ kClassFlagPhantomReference;
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_CLASS_FLAGS_H_
+
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 586ae30d19..702a0f49ea 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -24,6 +24,7 @@
#include "atomic.h"
#include "array-inl.h"
#include "class.h"
+#include "class_flags.h"
#include "class_linker.h"
#include "class_loader-inl.h"
#include "lock_word-inl.h"
@@ -1010,20 +1011,43 @@ inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
mirror::Class* klass = GetClass<kVerifyFlags>();
visitor(this, ClassOffset(), false);
- if (klass == Class::GetJavaLangClass()) {
- AsClass<kVerifyNone>()->VisitReferences(klass, visitor);
- } else if (klass->IsArrayClass() || klass->IsStringClass()) {
- if (klass->IsObjectArrayClass<kVerifyNone>()) {
- AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
- }
- } else if (klass->IsClassLoaderClass()) {
- mirror::ClassLoader* class_loader = AsClassLoader<kVerifyFlags>();
- class_loader->VisitReferences<kVerifyFlags>(klass, visitor);
- } else {
+ const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
+ if (LIKELY(class_flags == kClassFlagNormal)) {
DCHECK(!klass->IsVariableSize());
VisitInstanceFieldsReferences(klass, visitor);
- if (UNLIKELY(klass->IsTypeOfReferenceClass<kVerifyNone>())) {
- ref_visitor(klass, AsReference());
+ DCHECK(!klass->IsClassClass());
+ } else {
+ if ((class_flags & kClassFlagNoReferenceFields) == 0) {
+ DCHECK(!klass->IsStringClass());
+ if (class_flags == kClassFlagClass) {
+ DCHECK(klass->IsClassClass());
+ AsClass<kVerifyNone>()->VisitReferences(klass, visitor);
+ } else if (class_flags == kClassFlagObjectArray) {
+ DCHECK(klass->IsObjectArrayClass());
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
+ } else if ((class_flags & kClassFlagReference) != 0) {
+ VisitInstanceFieldsReferences(klass, visitor);
+ ref_visitor(klass, AsReference());
+ } else {
+ mirror::ClassLoader* const class_loader = AsClassLoader<kVerifyFlags>();
+ class_loader->VisitReferences<kVerifyFlags>(klass, visitor);
+ }
+ } else if (kIsDebugBuild) {
+ CHECK(!klass->IsClassClass());
+ CHECK(!klass->IsObjectArrayClass());
+ // String still has instance fields for reflection purposes but these don't exist in
+ // actual string instances.
+ if (!klass->IsStringClass()) {
+ size_t total_reference_instance_fields = 0;
+ mirror::Class* super_class = klass;
+ do {
+ total_reference_instance_fields += super_class->NumReferenceInstanceFields();
+ super_class = super_class->GetSuperClass();
+ } while (super_class != nullptr);
+ // The only reference field should be the object's class. This field is handled at the
+ // beginning of the function.
+ CHECK_EQ(total_reference_instance_fields, 1u);
+ }
}
}
}
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index b6236b1eb3..45610dccc8 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -55,6 +55,7 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) {
void String::SetClass(Class* java_lang_String) {
CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != nullptr);
+ CHECK(java_lang_String->IsStringClass());
java_lang_String_ = GcRoot<Class>(java_lang_String);
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index eb2e1f6977..fbee2d7bf3 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -157,10 +157,9 @@ class MANAGED String FINAL : public Object {
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String);
- static void ResetClass();
- static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* java_lang_String) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 0d9ec29db3..f7ab10be9d 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -19,6 +19,8 @@
#include <stdint.h>
+namespace art {
+
static constexpr uint32_t kAccPublic = 0x0001; // class, field, method, ic
static constexpr uint32_t kAccPrivate = 0x0002; // field, method, ic
static constexpr uint32_t kAccProtected = 0x0004; // field, method, ic
@@ -49,28 +51,8 @@ static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
// Special runtime-only flags.
-// Note: if only kAccClassIsReference is set, we have a soft reference.
-
-// class is ClassLoader or one of its subclasses
-static constexpr uint32_t kAccClassIsClassLoaderClass = 0x10000000;
-
// class/ancestor overrides finalize()
static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
-// class is a soft/weak/phantom ref
-static constexpr uint32_t kAccClassIsReference = 0x08000000;
-// class is a weak reference
-static constexpr uint32_t kAccClassIsWeakReference = 0x04000000;
-// class is a finalizer reference
-static constexpr uint32_t kAccClassIsFinalizerReference = 0x02000000;
-// class is a phantom reference
-static constexpr uint32_t kAccClassIsPhantomReference = 0x01000000;
-// class is the string class
-static constexpr uint32_t kAccClassIsStringClass = 0x00800000;
-
-static constexpr uint32_t kAccReferenceFlagsMask = (kAccClassIsReference
- | kAccClassIsWeakReference
- | kAccClassIsFinalizerReference
- | kAccClassIsPhantomReference);
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
@@ -95,5 +77,7 @@ static constexpr uint32_t kAccValidClassFlags = kAccPublic | kAccFinal | kAccSup
static constexpr uint32_t kAccValidInterfaceFlags = kAccPublic | kAccInterface |
kAccAbstract | kAccSynthetic | kAccAnnotation;
+} // namespace art
+
#endif // ART_RUNTIME_MODIFIERS_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1828b91e2a..a50607105f 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1956,6 +1956,32 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::MONITOR_ENTER:
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
+ // Check whether the previous instruction is a move-object with vAA as a source, creating
+ // untracked lock aliasing.
+ if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+ uint32_t prev_idx = work_insn_idx_ - 1;
+ while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+ prev_idx--;
+ }
+ const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
+ switch (prev_inst->Opcode()) {
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ if (prev_inst->VRegB() == inst->VRegA_11x()) {
+ // Redo the copy. This won't change the register types, but update the lock status
+ // for the aliased register.
+ work_line_->CopyRegister1(this,
+ prev_inst->VRegA(),
+ prev_inst->VRegB(),
+ kTypeCategoryRef);
+ }
+ break;
+
+ default: // Other instruction types ignored.
+ break;
+ }
+ }
break;
case Instruction::MONITOR_EXIT:
/*
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index f61e51fb23..a9c4c951fd 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -344,6 +344,14 @@ class RegisterLine {
} else {
reg_to_lock_depths_.erase(it);
}
+ // Need to unlock every register at the same lock depth. These are aliased locks.
+ uint32_t mask = 1 << depth;
+ for (auto& pair : reg_to_lock_depths_) {
+ if ((pair.second & mask) != 0) {
+ VLOG(verifier) << "Also unlocking " << pair.first;
+ pair.second ^= mask;
+ }
+ }
}
void ClearAllRegToLockDepths(size_t reg) {
diff --git a/test/529-long-split/expected.txt b/test/529-long-split/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/529-long-split/expected.txt
diff --git a/test/529-long-split/info.txt b/test/529-long-split/info.txt
new file mode 100644
index 0000000000..39b5b96eb6
--- /dev/null
+++ b/test/529-long-split/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing that used to trip
+during register allocation.
diff --git a/test/529-long-split/src/Main.java b/test/529-long-split/src/Main.java
new file mode 100644
index 0000000000..dc52d88486
--- /dev/null
+++ b/test/529-long-split/src/Main.java
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ if (testOddLow1(5L)) {
+ throw new Error();
+ }
+
+ if (testNonFollowingHigh(5)) {
+ throw new Error();
+ }
+
+ if (testOddLow2()) {
+ throw new Error();
+ }
+ }
+
+ public static boolean testOddLow1(long a /* ECX-EDX */) {
+ // class instance is in EBP
+ long b = myLongField1; // ESI-EDI
+ int f = myField1; // EBX
+ int e = myField2; // EAX
+ int g = myField3; // ESI (by spilling ESI-EDI, see below)
+ int h = myField4; // EDI
+ myLongField2 = a; // Make sure ESI-EDI gets spilled and not ECX-EDX
+ myField2 = f; // use of EBX
+ myField1 = e; // use of EAX
+ myField3 = h; // use of ESI
+ myField4 = g; // use if EDI
+
+ // At this point `b` has been spilled and needs to have a pair. The ordering
+ // in the register allocator triggers the allocation of `res` before `b`.
+ // `res` being used after the `doCall`, we want a callee saved register.
+ //
+ // EBP is taken by the class instance and EDI is taken by `g` (both used in the `myField4`
+ // assignment below). So we end up allocating ESI for `res`.
+ //
+ // When we try to allocate a pair for `b` we're in the following situation:
+ // EAX is free
+ // ECX is taken
+ // EDX is taken
+ // EBX is free
+ // ESP is blocked
+ // EBP could be spilled
+ // ESI is taken
+ // EDI could be spilled
+ //
+ // So there is no consecutive registers available to please the register allocator.
+ // The compiler used to trip then because of a bogus implementation of trying to split
+ // an unaligned register pair (here ECX and EDX). The implementation would not find
+ // a register and the register allocator would then complain about not having
+ // enough registers for the operation.
+ boolean res = a == b;
+ $noinline$doCall();
+ myField4 = g;
+ return res;
+ }
+
+ public static boolean testNonFollowingHigh(int i) {
+ // class instance is in EBP
+ long b = myLongField1; // ESI-EDI
+ long a = (long)i; // EAX-EDX
+ int f = myField1; // EBX
+ int e = myField2; // ECX
+ int g = myField3; // ESI (by spilling ESI-EDI, see below)
+ int h = myField4; // EDI
+ myLongField2 = a; // Make sure ESI-EDI gets spilled and not ECX-EDX
+ myField2 = f; // use of EBX
+ myField1 = e; // use of ECX
+ myField3 = h; // use of EDI
+ myField4 = g; // use of ESI
+
+ // At this point `b` has been spilled and needs to have a pair. The ordering
+ // in the register allocator triggers the allocation of `res` before `b`.
+ // `res` being used after the `doCall`, we want a callee saved register.
+ //
+ // EBP is taken by the class instance and ESI is taken by `g` (both used in the `myField4`
+ // assignment below). So we end up allocating EDI for `res`.
+ //
+ // When we try to allocate a pair for `b` we're in the following situation:
+ // EAX is taken
+ // ECX is free
+ // EDX is taken
+ // EBX is free
+ // ESP is blocked
+ // EBP could be spilled
+ // ESI is taken
+ // EDI could be spilled
+ //
+ // So there is no consecutive registers available to please the register allocator.
+ // The compiler used to be in a bad state because of a bogus implementation of trying
+ // to split an unaligned register pair (here EAX and EDX).
+ boolean res = a == b;
+ $noinline$doCall();
+ myField4 = g;
+ return res;
+ }
+
+ public static boolean testOddLow2() {
+ // class instance is in EBP
+ long b = myLongField1; // ECX-EDX (hint due to call below).
+ long a = myLongField2; // ESI-EDI
+ int f = myField1; // EBX
+ int e = myField2; // EAX
+ int g = myField3; // ECX
+ int h = myField4; // EDX
+ int i = myField5; // ESI - callee saved due to assignment after call to $noinline$doCall.
+ myField2 = f; // use of EBX
+ myField1 = e; // use of EAX
+ myField3 = h; // use of EDX
+ myField4 = i; // use of ESI
+ myField5 = g; // use of ECX
+
+ // At this point `a` and `b` have been spilled and need to have a pairs. The ordering
+ // in the register allocator triggers the allocation of `res` before `a` and `b`.
+ // `res` being used after the `doCall`, we want a callee saved register.
+ //
+ // EBP is taken by the class instance and ESI is taken by `i` (both used in the `myField4`
+ // assignment below). So we end up allocating EDI for `res`.
+ //
+ // We first try to allocator a pair for `b`. We're in the following situation:
+ // EAX is free
+ // ECX is free
+ // EDX is free
+ // EBX is free
+ // ESP is blocked
+ // EBP could be spilled
+ // ESI could be spilled
+ // EDI is taken
+ //
+ // Because `b` is used as a first argument to a call, we take its hint and allocate
+ // ECX-EDX to it.
+ //
+ // We then try to allocate a pair for `a`. We're in the following situation:
+ // EAX is free
+ // ECX could be spilled
+ // EDX could be spilled
+ // EBX is free
+ // ESP is blocked
+ // EBP could be spilled
+ // ESI could be spilled
+ // EDI is taken
+ //
+ // So no consecutive two free registers are available. When trying to find a slot, we pick
+ // the first unaligned or non-pair interval. In this case, this is the unaligned ECX-EDX.
+ // The compiler used to then trip because it forgot to remove the high interval containing
+ // the pair from the active list.
+
+ boolean res = a == b;
+ $noinline$doCall(b);
+ myField4 = i; // use of ESI
+ return res;
+ }
+
+ public static void $noinline$doCall() {
+ if (doThrow) throw new Error();
+ }
+
+ public static void $noinline$doCall(long e) {
+ if (doThrow) throw new Error();
+ }
+
+ public static boolean doThrow;
+ public static int myField1;
+ public static int myField2;
+ public static int myField3;
+ public static int myField4;
+ public static int myField5;
+ public static long myLongField1;
+ public static long myLongField2;
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index dd37cdbaf5..6a452eb0c0 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -1,6 +1,5 @@
PackedSwitch
b/17790197
-b/17978759
FloatBadArgReg
negLong
sameFieldNames
@@ -41,4 +40,6 @@ b/22881413
b/20843113
b/23201502 (float)
b/23201502 (double)
+b/23300986
+b/23300986 (2)
Done!
diff --git a/test/800-smali/smali/b_17978759.smali b/test/800-smali/smali/b_17978759.smali
deleted file mode 100644
index 07bcae5bb4..0000000000
--- a/test/800-smali/smali/b_17978759.smali
+++ /dev/null
@@ -1,28 +0,0 @@
-.class public LB17978759;
-.super Ljava/lang/Object;
-
- .method public constructor <init>()V
- .registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
- .end method
-
- .method public test()V
- .registers 2
-
- move-object v0, p0
- # v0 and p0 alias
- monitor-enter p0
- # monitor-enter on p0
- monitor-exit v0
- # monitor-exit on v0, however, verifier doesn't track this and so this is
- # a warning. Verifier will still think p0 is locked.
-
- move-object v0, p0
- # v0 will now appear locked.
- monitor-enter v0
- # Attempt to lock v0 twice is a verifier failure.
- monitor-exit v0
-
- return-void
- .end method
diff --git a/test/800-smali/smali/b_23300986.smali b/test/800-smali/smali/b_23300986.smali
new file mode 100644
index 0000000000..f008b92925
--- /dev/null
+++ b/test/800-smali/smali/b_23300986.smali
@@ -0,0 +1,23 @@
+.class public LB23300986;
+
+.super Ljava/lang/Object;
+
+.method public static runAliasAfterEnter(Ljava/lang/Object;)V
+ .registers 3
+ monitor-enter v2 # Lock on parameter
+ move-object v1, v2 # Copy parameter into v1, establishing an alias.
+ monitor-exit v1 # Unlock on alias
+ monitor-enter v2 # Do it again.
+ monitor-exit v1
+ return-void
+.end method
+
+.method public static runAliasBeforeEnter(Ljava/lang/Object;)V
+ .registers 3
+ move-object v1, v2 # Copy parameter into v1, establishing an alias.
+ monitor-enter v2 # Lock on parameter
+ monitor-exit v1 # Unlock on alias
+ monitor-enter v2 # Do it again.
+ monitor-exit v1
+ return-void
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index b481a1dbc4..183958aef6 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -53,8 +53,6 @@ public class Main {
new Object[]{123}, null, 123));
testCases.add(new TestCase("b/17790197", "B17790197", "getInt", null, null, 100));
- testCases.add(new TestCase("b/17978759", "B17978759", "test", null, new VerifyError(),
- null));
testCases.add(new TestCase("FloatBadArgReg", "FloatBadArgReg", "getInt",
new Object[]{100}, null, 100));
testCases.add(new TestCase("negLong", "negLong", "negLong", null, null, 122142L));
@@ -127,6 +125,10 @@ public class Main {
new NullPointerException(), null));
testCases.add(new TestCase("b/23201502 (double)", "B23201502", "runDouble", null,
new NullPointerException(), null));
+ testCases.add(new TestCase("b/23300986", "B23300986", "runAliasAfterEnter",
+ new Object[] { new Object() }, null, null));
+ testCases.add(new TestCase("b/23300986 (2)", "B23300986", "runAliasBeforeEnter",
+ new Object[] { new Object() }, null, null));
}
public void runTests() {
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 7faf86ed5c..8466bb314c 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -30,3 +30,6 @@ adb shell ifconfig
echo -e "${green}List properties${nc}"
adb shell getprop
+
+echo -e "${green}Stopping framework${nc}"
+adb shell stop