summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/art_method-inl.h6
-rw-r--r--runtime/art_method.cc32
-rw-r--r--runtime/art_method.h10
-rw-r--r--runtime/base/timing_logger.cc2
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h110
-rw-r--r--runtime/gc/collector/concurrent_copying.cc241
-rw-r--r--runtime/gc/collector/concurrent_copying.h4
-rw-r--r--runtime/jit/jit_code_cache.cc15
-rw-r--r--runtime/jit/jit_instrumentation.cc6
-rw-r--r--runtime/mirror/object.h2
-rw-r--r--runtime/mirror/object_array-inl.h2
-rw-r--r--runtime/oat_file_assistant.cc45
-rw-r--r--runtime/oat_file_assistant_test.cc32
-rw-r--r--runtime/read_barrier-inl.h2
-rw-r--r--runtime/stack.cc24
-rw-r--r--runtime/thread_list.cc3
-rw-r--r--runtime/verifier/method_verifier.cc37
18 files changed, 339 insertions, 236 deletions
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index f741732046..cf548ada33 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -468,12 +468,6 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
}
}
-inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
- memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
- Size(image_pointer_size));
- declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
-}
-
} // namespace art
#endif // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index c1279bf6b1..f4a5f233ff 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -367,7 +367,7 @@ const uint8_t* ArtMethod::GetQuickenedInfo() {
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
- if (IsRuntimeMethod() || IsProxyMethod()) {
+ if (IsRuntimeMethod()) {
return nullptr;
}
@@ -381,6 +381,12 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return nullptr;
}
+ if (existing_entry_point == GetQuickProxyInvokeHandler()) {
+ DCHECK(IsProxyMethod() && !IsConstructor());
+ // The proxy entry point does not have any method header.
+ return nullptr;
+ }
+
// Check whether the current entry point contains this pc.
if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
!class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
@@ -452,4 +458,28 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return method_header;
}
+
+void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) {
+ memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
+ Size(image_pointer_size));
+ declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
+
+ // If the entry point of the method we are copying from is from JIT code, we just
+ // put the entry point of the new method to interpreter. We could set the entry point
+ // to the JIT code, but this would require taking the JIT code cache lock to notify
+ // it, which we do not want at this level.
+ Runtime* runtime = Runtime::Current();
+ if (runtime->GetJit() != nullptr) {
+ if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) {
+ SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
+ }
+ }
+ // Clear the profiling info for the same reasons as the JIT code.
+ if (!src->IsNative()) {
+ SetProfilingInfoPtrSize(nullptr, image_pointer_size);
+ }
+ // Clear hotness to let the JIT properly decide when to compile this method.
+ hotness_count_ = 0;
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 551989d182..ce9f2025ce 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -49,8 +49,8 @@ class ArtMethod FINAL {
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
method_index_(0) { }
- ArtMethod(const ArtMethod& src, size_t image_pointer_size) {
- CopyFrom(&src, image_pointer_size);
+ ArtMethod(ArtMethod* src, size_t image_pointer_size) {
+ CopyFrom(src, image_pointer_size);
}
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
@@ -313,6 +313,10 @@ class ArtMethod FINAL {
SetEntryPointFromJniPtrSize(info, sizeof(void*));
}
+ ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, size_t pointer_size) {
+ SetEntryPointFromJniPtrSize(info, pointer_size);
+ }
+
static MemberOffset ProfilingInfoOffset() {
return EntryPointFromJniOffset(sizeof(void*));
}
@@ -429,7 +433,7 @@ class ArtMethod FINAL {
return pointer_size;
}
- void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
+ void CopyFrom(ArtMethod* src, size_t image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size)
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index f1f6f9b1c1..1942e1dc1b 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -125,7 +125,7 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) const {
histogram->CreateHistogram(&cumulative_data);
histogram->PrintConfidenceIntervals(os, 0.99, cumulative_data);
}
- os << "Done Dumping histograms \n";
+ os << "Done Dumping histograms\n";
}
TimingLogger::TimingLogger(const char* name, bool precise, bool verbose)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 5de1cacba8..da70456369 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -5279,7 +5279,7 @@ bool ClassLinker::LinkInterfaceMethods(
miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
CHECK(miranda_method != nullptr);
// Point the interface table at a phantom slot.
- new(miranda_method) ArtMethod(*interface_method, image_pointer_size_);
+ new(miranda_method) ArtMethod(interface_method, image_pointer_size_);
miranda_methods.push_back(miranda_method);
}
method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_);
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
new file mode 100644
index 0000000000..26f5ad3df5
--- /dev/null
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
+#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
+
+#include "concurrent_copying.h"
+
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/region_space.h"
+#include "lock_word.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
+ if (from_ref == nullptr) {
+ return nullptr;
+ }
+ DCHECK(heap_->collector_type_ == kCollectorTypeCC);
+ if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
+ // In the lock word forward address state, the read barrier bits
+ // in the lock word are part of the stored forwarding address and
+ // invalid. This is usually OK as the from-space copy of objects
+ // aren't accessed by mutators due to the to-space
+ // invariant. However, during the dex2oat image writing relocation
+ // and the zygote compaction, objects can be in the forward
+ // address state (to store the forward/relocation addresses) and
+ // they can still be accessed and the invalid read barrier bits
+ // are consulted. If they look like gray but aren't really, the
+ // read barriers slow path can trigger when it shouldn't. To guard
+ // against this, return here if the CC collector isn't running.
+ return from_ref;
+ }
+ DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
+ space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
+ switch (rtype) {
+ case space::RegionSpace::RegionType::kRegionTypeToSpace:
+ // It's already marked.
+ return from_ref;
+ case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
+ mirror::Object* to_ref = GetFwdPtr(from_ref);
+ if (kUseBakerReadBarrier) {
+ DCHECK_NE(to_ref, ReadBarrier::GrayPtr())
+ << "from_ref=" << from_ref << " to_ref=" << to_ref;
+ }
+ if (to_ref == nullptr) {
+ // It isn't marked yet. Mark it by copying it to the to-space.
+ to_ref = Copy(from_ref);
+ }
+ DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
+ << "from_ref=" << from_ref << " to_ref=" << to_ref;
+ return to_ref;
+ }
+ case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
+ // This may or may not succeed, which is ok.
+ if (kUseBakerReadBarrier) {
+ from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ }
+ mirror::Object* to_ref = from_ref;
+ if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
+ // Already marked.
+ } else {
+ // Newly marked.
+ if (kUseBakerReadBarrier) {
+ DCHECK_EQ(to_ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+ }
+ PushOntoMarkStack(to_ref);
+ }
+ return to_ref;
+ }
+ case space::RegionSpace::RegionType::kRegionTypeNone:
+ return MarkNonMoving(from_ref);
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
+ DCHECK(region_space_->IsInFromSpace(from_ref));
+ LockWord lw = from_ref->GetLockWord(false);
+ if (lw.GetState() == LockWord::kForwardingAddress) {
+ mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
+ DCHECK(fwd_ptr != nullptr);
+ return fwd_ptr;
+ } else {
+ return nullptr;
+ }
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 20e775c7aa..4a49712cbc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -368,30 +368,15 @@ void ConcurrentCopying::MarkingPhase() {
}
}
}
- // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
- // to also use the same function.
{
- TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
- Runtime::Current()->VisitConstantRoots(this);
- }
- {
- TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
- Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
- }
- {
- TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
- Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
+ TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+ Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
}
{
// TODO: don't visit the transaction roots if it's not active.
TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Runtime::Current()->VisitNonThreadRoots(this);
}
- {
- TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings());
- Dbg::VisitRoots(this);
- }
- Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
// Immune spaces.
for (auto& space : heap_->GetContinuousSpaces()) {
@@ -594,8 +579,8 @@ void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
CHECK(thread_running_gc_ != nullptr);
MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
- if (mark_stack_mode == kMarkStackModeThreadLocal) {
- if (self == thread_running_gc_) {
+ if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
+ if (LIKELY(self == thread_running_gc_)) {
// If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
CHECK(self->GetThreadLocalMarkStack() == nullptr);
if (UNLIKELY(gc_mark_stack_->IsFull())) {
@@ -663,18 +648,6 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
return heap_->live_stack_.get();
}
-inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
- DCHECK(region_space_->IsInFromSpace(from_ref));
- LockWord lw = from_ref->GetLockWord(false);
- if (lw.GetState() == LockWord::kForwardingAddress) {
- mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
- CHECK(fwd_ptr != nullptr);
- return fwd_ptr;
- } else {
- return nullptr;
- }
-}
-
// The following visitors are that used to verify that there's no
// references to the from-space left after marking.
class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
@@ -1080,7 +1053,7 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
return count;
}
-void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
+inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
if (kUseBakerReadBarrier) {
DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
@@ -1095,9 +1068,10 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
<< " " << to_ref << " " << to_ref->GetReadBarrierPointer()
<< " is_marked=" << IsMarked(to_ref);
}
- if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
- to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
- !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
+#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
+ if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
+ to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
+ !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
// Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
// will change it to black or white later in ReferenceQueue::DequeuePendingReference().
CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
@@ -1106,14 +1080,13 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
// be concurrently marked after the Scan() call above has enqueued the Reference, in which case
// the above IsInToSpace() evaluates to true and we change the color from gray to black or white
// here in this else block.
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
if (kUseBakerReadBarrier) {
if (region_space_->IsInToSpace(to_ref)) {
// If to-space, change from gray to white.
bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
ReadBarrier::WhitePtr());
CHECK(success) << "Must succeed as we won the race.";
- CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
} else {
// If non-moving space/unevac from space, change from gray
// to black. We can't change gray to white because it's not
@@ -1125,13 +1098,13 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
ReadBarrier::BlackPtr());
CHECK(success) << "Must succeed as we won the race.";
- CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
}
+ }
#else
- DCHECK(!kUseBakerReadBarrier);
+ DCHECK(!kUseBakerReadBarrier);
#endif
- }
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
visitor(to_ref);
@@ -1622,6 +1595,7 @@ class ConcurrentCopyingRefFieldsVisitor {
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
@@ -1629,6 +1603,7 @@ class ConcurrentCopyingRefFieldsVisitor {
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_) {
collector_->MarkRoot(root);
}
@@ -1638,7 +1613,7 @@ class ConcurrentCopyingRefFieldsVisitor {
};
// Scan ref fields of an object.
-void ConcurrentCopying::Scan(mirror::Object* to_ref) {
+inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
ConcurrentCopyingRefFieldsVisitor visitor(this);
to_ref->VisitReferences(visitor, visitor);
@@ -1648,9 +1623,6 @@ void ConcurrentCopying::Scan(mirror::Object* to_ref) {
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- return;
- }
mirror::Object* to_ref = Mark(ref);
if (to_ref == ref) {
return;
@@ -1669,14 +1641,11 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
}
// Process some roots.
-void ConcurrentCopying::VisitRoots(
+inline void ConcurrentCopying::VisitRoots(
mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
mirror::Object** root = roots[i];
mirror::Object* ref = *root;
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- continue;
- }
mirror::Object* to_ref = Mark(ref);
if (to_ref == ref) {
continue;
@@ -1693,12 +1662,9 @@ void ConcurrentCopying::VisitRoots(
}
}
-void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
+inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
DCHECK(!root->IsNull());
mirror::Object* const ref = root->AsMirrorPtr();
- if (region_space_->IsInToSpace(ref)) {
- return;
- }
mirror::Object* to_ref = Mark(ref);
if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
@@ -1714,7 +1680,7 @@ void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* ro
}
}
-void ConcurrentCopying::VisitRoots(
+inline void ConcurrentCopying::VisitRoots(
mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
@@ -2013,148 +1979,85 @@ bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
return alloc_stack->Contains(ref);
}
-mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
- if (from_ref == nullptr) {
- return nullptr;
- }
- DCHECK(from_ref != nullptr);
- DCHECK(heap_->collector_type_ == kCollectorTypeCC);
- if (kUseBakerReadBarrier && !is_active_) {
- // In the lock word forward address state, the read barrier bits
- // in the lock word are part of the stored forwarding address and
- // invalid. This is usually OK as the from-space copy of objects
- // aren't accessed by mutators due to the to-space
- // invariant. However, during the dex2oat image writing relocation
- // and the zygote compaction, objects can be in the forward
- // address state (to store the forward/relocation addresses) and
- // they can still be accessed and the invalid read barrier bits
- // are consulted. If they look like gray but aren't really, the
- // read barriers slow path can trigger when it shouldn't. To guard
- // against this, return here if the CC collector isn't running.
- return from_ref;
- }
- DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
- space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
- if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
- // It's already marked.
- return from_ref;
- }
- mirror::Object* to_ref;
- if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
- to_ref = GetFwdPtr(from_ref);
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
- }
- if (to_ref == nullptr) {
- // It isn't marked yet. Mark it by copying it to the to-space.
- to_ref = Copy(from_ref);
+mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
+ // ref is in a non-moving space (from_ref == to_ref).
+ DCHECK(!region_space_->HasAddress(ref)) << ref;
+ if (immune_region_.ContainsObject(ref)) {
+ accounting::ContinuousSpaceBitmap* cc_bitmap =
+ cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
+ DCHECK(cc_bitmap != nullptr)
+ << "An immune space object must have a bitmap";
+ if (kIsDebugBuild) {
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
+ << "Immune space object must be already marked";
}
- DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
- << "from_ref=" << from_ref << " to_ref=" << to_ref;
- } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
// This may or may not succeed, which is ok.
if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
}
- if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
+ if (cc_bitmap->AtomicTestAndSet(ref)) {
// Already marked.
- to_ref = from_ref;
} else {
// Newly marked.
- to_ref = from_ref;
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
}
- PushOntoMarkStack(to_ref);
+ PushOntoMarkStack(ref);
}
} else {
- // from_ref is in a non-moving space.
- DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
- if (immune_region_.ContainsObject(from_ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
- << "Immune space object must be already marked";
- }
- // This may or may not succeed, which is ok.
+ // Use the mark bitmap.
+ accounting::ContinuousSpaceBitmap* mark_bitmap =
+ heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
+ accounting::LargeObjectBitmap* los_bitmap =
+ heap_mark_bitmap_->GetLargeObjectBitmap(ref);
+ CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
+ bool is_los = mark_bitmap == nullptr;
+ if (!is_los && mark_bitmap->Test(ref)) {
+ // Already marked.
if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
- if (cc_bitmap->AtomicTestAndSet(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- } else {
- // Newly marked.
- to_ref = from_ref;
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(to_ref);
+ } else if (is_los && los_bitmap->Test(ref)) {
+ // Already marked in LOS.
+ if (kUseBakerReadBarrier) {
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
} else {
- // Use the mark bitmap.
- accounting::ContinuousSpaceBitmap* mark_bitmap =
- heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
- accounting::LargeObjectBitmap* los_bitmap =
- heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
- CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
- bool is_los = mark_bitmap == nullptr;
- if (!is_los && mark_bitmap->Test(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ // Not marked.
+ if (IsOnAllocStack(ref)) {
+ // If it's on the allocation stack, it's considered marked. Keep it white.
+ // Objects on the allocation stack need not be marked.
+ if (!is_los) {
+ DCHECK(!mark_bitmap->Test(ref));
+ } else {
+ DCHECK(!los_bitmap->Test(ref));
}
- } else if (is_los && los_bitmap->Test(from_ref)) {
- // Already marked in LOS.
- to_ref = from_ref;
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
}
} else {
- // Not marked.
- if (IsOnAllocStack(from_ref)) {
- // If it's on the allocation stack, it's considered marked. Keep it white.
- to_ref = from_ref;
- // Objects on the allocation stack need not be marked.
- if (!is_los) {
- DCHECK(!mark_bitmap->Test(to_ref));
- } else {
- DCHECK(!los_bitmap->Test(to_ref));
- }
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
- }
+ // Not marked or on the allocation stack. Try to mark it.
+ // This may or may not succeed, which is ok.
+ if (kUseBakerReadBarrier) {
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ }
+ if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked.
+ } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked in LOS.
} else {
- // Not marked or on the allocation stack. Try to mark it.
- // This may or may not succeed, which is ok.
+ // Newly marked.
if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
- // Already marked in LOS.
- to_ref = from_ref;
- } else {
- // Newly marked.
- to_ref = from_ref;
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(to_ref);
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
}
+ PushOntoMarkStack(ref);
}
}
}
}
- return to_ref;
+ return ref;
}
void ConcurrentCopying::FinishPhase() {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index c32b19ea3a..27726e23c1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -93,7 +93,7 @@ class ConcurrentCopying : public GarbageCollector {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
- mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
bool IsMarking() const {
return is_marking_;
@@ -183,6 +183,8 @@ class ConcurrentCopying : public GarbageCollector {
void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cfccec87cf..ce972ef976 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -25,6 +25,7 @@
#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
+#include "scoped_thread_state_change.h"
#include "thread_list.h"
namespace art {
@@ -407,9 +408,17 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
// Run a checkpoint on all threads to mark the JIT compiled code they are running.
{
Barrier barrier(0);
- MarkCodeClosure closure(this, &barrier);
- size_t threads_running_checkpoint =
- Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ size_t threads_running_checkpoint = 0;
+ {
+ // Walking the stack requires the mutator lock.
+ // We only take the lock when running the checkpoint and not waiting so that
+ // when we go back to suspended, we can execute checkpoints that were requested
+ // concurrently, and then move to waiting for our own checkpoint to finish.
+ ScopedObjectAccess soa(self);
+ MarkCodeClosure closure(this, &barrier);
+ threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ }
if (threads_running_checkpoint != 0) {
barrier.Increment(self, threads_running_checkpoint);
}
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 8aaa5fa304..7931306ff6 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -102,15 +102,13 @@ void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t
} else {
// We failed allocating. Instead of doing the collection on the Java thread, we push
// an allocation to a compiler thread, that will do the collection.
- thread_pool_->AddTask(self, new JitCompileTask(
- method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kAllocateProfile));
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
thread_pool_->StartWorkers(self);
}
}
if (sample_count == hot_method_threshold_) {
- thread_pool_->AddTask(self, new JitCompileTask(
- method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kCompile));
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
thread_pool_->StartWorkers(self);
}
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 022f31dc53..5c6520fcab 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -99,7 +99,7 @@ class MANAGED LOCKABLE Object {
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
+ ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_REQUIRES(Locks::mutator_lock_);
void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 5b73557941..5337760fb8 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -270,7 +270,7 @@ inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
}
template<class T> template<typename Visitor>
-void ObjectArray<T>::VisitReferences(const Visitor& visitor) {
+inline void ObjectArray<T>::VisitReferences(const Visitor& visitor) {
const size_t length = static_cast<size_t>(GetLength());
for (size_t i = 0; i < length; ++i) {
visitor(this, OffsetOfElement(i), false);
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8d5418d07d..99080f611c 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -662,23 +662,19 @@ bool OatFileAssistant::RelocateOatFile(const std::string* input_file,
bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
CHECK(error_msg != nullptr);
- if (OatFileName() == nullptr) {
+ Runtime* runtime = Runtime::Current();
+ if (!runtime->IsDex2OatEnabled()) {
*error_msg = "Generation of oat file for dex location " + dex_location_
- + " not attempted because the oat file name could not be determined.";
+ + " not attempted because dex2oat is disabled.";
return false;
}
- const std::string& oat_file_name = *OatFileName();
- Runtime* runtime = Runtime::Current();
- if (!runtime->IsDex2OatEnabled()) {
- *error_msg = "Generation of oat file " + oat_file_name
- + " not attempted because dex2oat is disabled";
+ if (OatFileName() == nullptr) {
+ *error_msg = "Generation of oat file for dex location " + dex_location_
+ + " not attempted because the oat file name could not be determined.";
return false;
}
-
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location_);
- args.push_back("--oat-file=" + oat_file_name);
+ const std::string& oat_file_name = *OatFileName();
// dex2oat ignores missing dex files and doesn't report an error.
// Check explicitly here so we can detect the error properly.
@@ -688,9 +684,36 @@ bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
return false;
}
+ std::unique_ptr<File> oat_file;
+ oat_file.reset(OS::CreateEmptyFile(oat_file_name.c_str()));
+ if (oat_file.get() == nullptr) {
+ *error_msg = "Generation of oat file " + oat_file_name
+ + " not attempted because the oat file could not be created.";
+ return false;
+ }
+
+ if (fchmod(oat_file->Fd(), 0644) != 0) {
+ *error_msg = "Generation of oat file " + oat_file_name
+ + " not attempted because the oat file could not be made world readable.";
+ oat_file->Erase();
+ return false;
+ }
+
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location_);
+ args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
+ args.push_back("--oat-location=" + oat_file_name);
+
if (!Dex2Oat(args, error_msg)) {
// Manually delete the file. This ensures there is no garbage left over if
// the process unexpectedly died.
+ oat_file->Erase();
+ TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ return false;
+ }
+
+ if (oat_file->FlushCloseOrErase() != 0) {
+ *error_msg = "Unable to close oat file " + oat_file_name;
TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
return false;
}
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 2c81eddf39..c54d7f8761 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -849,6 +849,38 @@ TEST_F(OatFileAssistantTest, LoadDexNoAlternateOat) {
EXPECT_FALSE(ofm.OatFileExists());
}
+// Case: We have a DEX file but can't write the oat file.
+// Expect: We should fail to make the oat file up to date.
+TEST_F(OatFileAssistantTest, LoadDexUnwriteableAlternateOat) {
+ std::string dex_location = GetScratchDir() + "/LoadDexUnwriteableAlternateOat.jar";
+
+ // Make the oat location unwritable by inserting some non-existent
+ // intermediate directories.
+ std::string oat_location = GetScratchDir() + "/foo/bar/LoadDexUnwriteableAlternateOat.oat";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(
+ dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
+ std::string error_msg;
+ ASSERT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg));
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() == nullptr);
+}
+
+// Case: We don't have a DEX file and can't write the oat file.
+// Expect: We should fail to generate the oat file without crashing.
+TEST_F(OatFileAssistantTest, GenNoDex) {
+ std::string dex_location = GetScratchDir() + "/GenNoDex.jar";
+ std::string oat_location = GetScratchDir() + "/GenNoDex.oat";
+
+ OatFileAssistant oat_file_assistant(
+ dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
+ std::string error_msg;
+ ASSERT_FALSE(oat_file_assistant.GenerateOatFile(&error_msg));
+}
+
// Turn an absolute path into a path relative to the current working
// directory.
static std::string MakePathRelative(std::string target) {
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 4998a6a478..7de6c06f2b 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -19,7 +19,7 @@
#include "read_barrier.h"
-#include "gc/collector/concurrent_copying.h"
+#include "gc/collector/concurrent_copying-inl.h"
#include "gc/heap.h"
#include "mirror/object_reference.h"
#include "mirror/reference.h"
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b0727daa15..d7edfade15 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -958,26 +958,18 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
return runtime->GetRuntimeMethodFrameInfo(method);
}
- // For Proxy method we add special handling for the direct method case (there is only one
- // direct method - constructor). Direct method is cloned from original
- // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
- // quick compiled method without any stubs. So the frame info should be returned as it is a
- // quick method not a stub. However, if instrumentation stubs are installed, the
- // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
- // oat code pointer, thus we have to add a special case here.
if (method->IsProxyMethod()) {
- if (method->IsDirect()) {
- CHECK(method->IsConstructor());
- const void* code_pointer =
- EntryPointToCodePointer(method->GetEntryPointFromQuickCompiledCode());
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
- } else {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
+ // There is only one direct method of a proxy class: the constructor. A direct method is
+ // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
+ // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
+ DCHECK(!method->IsDirect() && !method->IsConstructor())
+ << "Constructors of proxy classes must have a OatQuickMethodHeader";
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
}
- ClassLinker* class_linker = runtime->GetClassLinker();
+ // The only remaining case is if the method is native and uses the generic JNI stub.
DCHECK(method->IsNative());
+ ClassLinker* class_linker = runtime->GetClassLinker();
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
// Generic JNI frame.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index dcf9601b4b..b09b87fb58 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -275,9 +275,6 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking && gAborting == 0) {
- CHECK_NE(self->GetState(), kRunnable);
- }
std::vector<Thread*> suspended_count_modified_threads;
size_t count = 0;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index e1d4160aac..2db79ab229 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -665,20 +665,22 @@ bool MethodVerifier::Verify() {
// Interfaces may always have static initializers for their fields. If we are running with
// default methods enabled we also allow other public, static, non-final methods to have code.
// Otherwise that is the only type of method allowed.
- if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
- if (IsInstanceConstructor()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
- return false;
- } else if (method_access_flags_ & kAccFinal) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
- return false;
- } else if (!(method_access_flags_ & kAccPublic)) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members";
+ if (!(IsConstructor() && IsStatic())) {
+ if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
+ if (IsInstanceConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
+ return false;
+ } else if (method_access_flags_ & kAccFinal) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
+ return false;
+ } else if (!(method_access_flags_ & kAccPublic)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members";
+ return false;
+ }
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
return false;
}
- } else if (!IsConstructor() || !IsStatic()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
- return false;
}
}
@@ -3662,8 +3664,15 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
<< PrettyMethod(res_method);
return nullptr;
}
- // Check that interface methods match interface classes.
- if (klass->IsInterface() && method_type != METHOD_INTERFACE) {
+ // Check that interface methods are static or match interface classes.
+ // We only allow statics if we don't have default methods enabled.
+ Runtime* runtime = Runtime::Current();
+ const bool default_methods_supported =
+ runtime == nullptr ||
+ runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods);
+ if (klass->IsInterface() &&
+ method_type != METHOD_INTERFACE &&
+ (!default_methods_supported || method_type != METHOD_STATIC)) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method)
<< " is in an interface class " << PrettyClass(klass);
return nullptr;