summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Alex Light <allight@google.com> 2019-10-29 11:15:05 -0700
committer Treehugger Robot <treehugger-gerrit@google.com> 2019-11-16 00:26:03 +0000
commit283bb322de84ac570b987c65a1015e2dbcbfad7c (patch)
tree18f5d68c78e56a2d398192c8340269a60706b7a0
parent2c5dfe16ab2b0f8fbd14dedc161eb4658a8673fc (diff)
Initial support for adding virtuals with structural redefinition
Initial implementation of adding virtual methods and non-static fields using structural redefinition. Currently this is limited to 'final', non-finalizable classes. These restrictions will be removed or loosened in the future. All non-collected instances of the redefined class will be made obsolete and reallocated. This can cause significant GC load. This feature does not work with any of the -quick opcodes and should only be used with dex files that haven't undergone dex2dex compilation (that is --debuggable and BCP dex files). Test: ./test.py --host Bug: 134162467 Bug: 144168550 Change-Id: Ia401d97395cfe498eb849a661ea9a900dfaa6da3
-rw-r--r--openjdkjvmti/Android.bp1
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc3
-rw-r--r--openjdkjvmti/alloc_manager.cc187
-rw-r--r--openjdkjvmti/alloc_manager.h114
-rw-r--r--openjdkjvmti/events.cc18
-rw-r--r--openjdkjvmti/events.h4
-rw-r--r--openjdkjvmti/ti_heap.cc127
-rw-r--r--openjdkjvmti/ti_heap.h10
-rw-r--r--openjdkjvmti/ti_redefine.cc430
-rw-r--r--openjdkjvmti/ti_redefine.h16
-rw-r--r--runtime/base/locks.h3
-rw-r--r--runtime/gc/allocation_listener.h22
-rw-r--r--runtime/gc/heap-inl.h32
-rw-r--r--runtime/gc/heap.cc49
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/offsets.h3
-rw-r--r--test/1983-structural-redefinition-failures/expected.txt2
-rw-r--r--test/1994-final-virtual-structural/expected.txt5
-rw-r--r--test/1994-final-virtual-structural/info.txt3
-rwxr-xr-xtest/1994-final-virtual-structural/run17
-rw-r--r--test/1994-final-virtual-structural/src/Main.java21
l---------test/1994-final-virtual-structural/src/art/Redefinition.java1
-rw-r--r--test/1994-final-virtual-structural/src/art/Test1994.java88
-rw-r--r--test/1995-final-virtual-structural-multithread/expected.txt0
-rw-r--r--test/1995-final-virtual-structural-multithread/info.txt4
-rwxr-xr-xtest/1995-final-virtual-structural-multithread/run21
-rw-r--r--test/1995-final-virtual-structural-multithread/src/Main.java21
l---------test/1995-final-virtual-structural-multithread/src/art/Redefinition.java1
-rw-r--r--test/1995-final-virtual-structural-multithread/src/art/Test1995.java168
-rw-r--r--test/1996-final-override-virtual-structural/expected.txt6
-rw-r--r--test/1996-final-override-virtual-structural/info.txt3
-rwxr-xr-xtest/1996-final-override-virtual-structural/run17
-rw-r--r--test/1996-final-override-virtual-structural/src/Main.java21
l---------test/1996-final-override-virtual-structural/src/art/Redefinition.java1
-rw-r--r--test/1996-final-override-virtual-structural/src/art/Test1996.java94
-rw-r--r--test/knownfailures.json3
36 files changed, 1358 insertions, 162 deletions
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index e7306ba095..37ae951720 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -26,6 +26,7 @@ cc_defaults {
defaults: ["art_defaults"],
host_supported: true,
srcs: [
+ "alloc_manager.cc",
"deopt_manager.cc",
"events.cc",
"fixed_up_dex_file.cc",
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 665fa9f496..4ce376ff9b 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -40,6 +40,7 @@
#include "jvmti.h"
+#include "alloc_manager.h"
#include "art_jvmti.h"
#include "base/logging.h" // For gLogVerbosity.
#include "base/mutex.h"
@@ -79,6 +80,7 @@ namespace openjdkjvmti {
// These should never be null.
EventHandler* gEventHandler;
DeoptManager* gDeoptManager;
+AllocationManager* gAllocManager;
#define ENSURE_NON_NULL(n) \
do { \
@@ -1497,6 +1499,7 @@ static jint GetEnvHandler(art::JavaVMExt* vm, /*out*/void** env, jint version) {
extern "C" bool ArtPlugin_Initialize() {
art::Runtime* runtime = art::Runtime::Current();
+ gAllocManager = new AllocationManager;
gDeoptManager = new DeoptManager;
gEventHandler = new EventHandler;
diff --git a/openjdkjvmti/alloc_manager.cc b/openjdkjvmti/alloc_manager.cc
new file mode 100644
index 0000000000..597ab05ba0
--- /dev/null
+++ b/openjdkjvmti/alloc_manager.cc
@@ -0,0 +1,187 @@
+
+/* Copyright (C) 2019 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "alloc_manager.h"
+
+#include <atomic>
+#include <sstream>
+
+#include "base/logging.h"
+#include "gc/allocation_listener.h"
+#include "gc/heap.h"
+#include "handle.h"
+#include "mirror/class-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+#include "thread_pool.h"
+
+namespace openjdkjvmti {
+
+template<typename T>
+void AllocationManager::PauseForAllocation(art::Thread* self, T msg) {
+ // The suspension can pause us for arbitrary times. We need to do it to sleep unfortunately. So we
+ // do test, suspend, test again, sleep, repeat.
+ std::string cause;
+ const bool is_logging = VLOG_IS_ON(plugin);
+ while (true) {
+ // We always return when there is no pause and we are runnable.
+ art::Thread* pausing_thread = allocations_paused_thread_.load(std::memory_order_seq_cst);
+ if (LIKELY(pausing_thread == nullptr || pausing_thread == self)) {
+ return;
+ }
+ if (UNLIKELY(is_logging && cause.empty())) {
+ cause = msg();
+ }
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+ art::MutexLock mu(self, alloc_listener_mutex_);
+ pausing_thread = allocations_paused_thread_.load(std::memory_order_seq_cst);
+ CHECK_NE(pausing_thread, self) << "We should always be setting pausing_thread = self!"
+ << " How did this happen? " << *self;
+ if (pausing_thread != nullptr) {
+ VLOG(plugin) << "Suspending " << *self << " due to " << cause << ". Allocation pause "
+ << "initiated by " << *pausing_thread;
+ alloc_pause_cv_.Wait(self);
+ }
+ }
+}
+
+extern AllocationManager* gAllocManager;
+AllocationManager* AllocationManager::Get() {
+ return gAllocManager;
+}
+
+void JvmtiAllocationListener::ObjectAllocated(art::Thread* self,
+ art::ObjPtr<art::mirror::Object>* obj,
+ size_t cnt) {
+ auto cb = manager_->callback_;
+ if (cb != nullptr && manager_->callback_enabled_.load(std::memory_order_seq_cst)) {
+ cb->ObjectAllocated(self, obj, cnt);
+ }
+}
+
+bool JvmtiAllocationListener::HasPreAlloc() const {
+ return manager_->allocations_paused_thread_.load(std::memory_order_seq_cst) != nullptr;
+}
+
+void JvmtiAllocationListener::PreObjectAllocated(art::Thread* self,
+ art::MutableHandle<art::mirror::Class> type,
+ size_t* byte_count) {
+ manager_->PauseForAllocation(self, [&]() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ std::ostringstream oss;
+ oss << "allocating " << *byte_count << " bytes of type " << type->PrettyClass();
+ return oss.str();
+ });
+ if (!type->IsVariableSize()) {
+ *byte_count = type->GetObjectSize();
+ }
+}
+
+AllocationManager::AllocationManager()
+ : alloc_listener_(nullptr),
+ alloc_listener_mutex_("JVMTI Alloc listener",
+ art::LockLevel::kPostUserCodeSuspensionTopLevelLock),
+ alloc_pause_cv_("JVMTI Allocation Pause Condvar", alloc_listener_mutex_) {
+ alloc_listener_.reset(new JvmtiAllocationListener(this));
+}
+
+void AllocationManager::DisableAllocationCallback(art::Thread* self) {
+ callback_enabled_.store(false);
+ DecrListenerInstall(self);
+}
+
+void AllocationManager::EnableAllocationCallback(art::Thread* self) {
+ IncrListenerInstall(self);
+ callback_enabled_.store(true);
+}
+
+void AllocationManager::SetAllocListener(AllocationCallback* callback) {
+ CHECK(callback_ == nullptr) << "Already setup!";
+ callback_ = callback;
+ alloc_listener_.reset(new JvmtiAllocationListener(this));
+}
+
+void AllocationManager::RemoveAllocListener() {
+ callback_enabled_.store(false, std::memory_order_seq_cst);
+ callback_ = nullptr;
+}
+
+void AllocationManager::DecrListenerInstall(art::Thread* self) {
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+ art::MutexLock mu(self, alloc_listener_mutex_);
+ // We don't need any particular memory-order here since we're under the lock, they aren't
+ // changing.
+ if (--listener_refcount_ == 0) {
+ art::Runtime::Current()->GetHeap()->RemoveAllocationListener();
+ }
+}
+
+void AllocationManager::IncrListenerInstall(art::Thread* self) {
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+ art::MutexLock mu(self, alloc_listener_mutex_);
+ // We don't need any particular memory-order here since we're under the lock, they aren't
+ // changing.
+ if (listener_refcount_++ == 0) {
+ art::Runtime::Current()->GetHeap()->SetAllocationListener(alloc_listener_.get());
+ }
+}
+
+void AllocationManager::PauseAllocations(art::Thread* self) {
+ art::Thread* null_thr = nullptr;
+ IncrListenerInstall(self);
+ do {
+ PauseForAllocation(self, []() { return "request to pause allocations on other threads"; });
+ } while (allocations_paused_thread_.compare_exchange_strong(
+ null_thr, self, std::memory_order_seq_cst));
+ // Make sure everything else can see this and isn't in the middle of final allocation.
+ // Force every thread to either be suspended or pass through a barrier.
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+ art::Barrier barrier(0);
+ art::FunctionClosure fc([&](art::Thread* thr ATTRIBUTE_UNUSED) {
+ barrier.Pass(art::Thread::Current());
+ });
+ size_t requested = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&fc);
+ barrier.Increment(self, requested);
+}
+
+void AllocationManager::ResumeAllocations(art::Thread* self) {
+ CHECK_EQ(allocations_paused_thread_.load(), self) << "not paused! ";
+ DecrListenerInstall(self);
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+ art::MutexLock mu(self, alloc_listener_mutex_);
+ allocations_paused_thread_.store(nullptr, std::memory_order_seq_cst);
+ alloc_pause_cv_.Broadcast(self);
+}
+
+} // namespace openjdkjvmti
diff --git a/openjdkjvmti/alloc_manager.h b/openjdkjvmti/alloc_manager.h
new file mode 100644
index 0000000000..c89d9a633a
--- /dev/null
+++ b/openjdkjvmti/alloc_manager.h
@@ -0,0 +1,114 @@
+/* Copyright (C) 2019 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_ALLOC_MANAGER_H_
+#define ART_OPENJDKJVMTI_ALLOC_MANAGER_H_
+
+#include <jvmti.h>
+
+#include <atomic>
+
+#include "base/locks.h"
+#include "base/mutex.h"
+#include "gc/allocation_listener.h"
+
+namespace art {
+template <typename T> class MutableHandle;
+template <typename T> class ObjPtr;
+class Thread;
+namespace mirror {
+class Class;
+class Object;
+} // namespace mirror
+} // namespace art
+
+namespace openjdkjvmti {
+
+class AllocationManager;
+
+class JvmtiAllocationListener : public art::gc::AllocationListener {
+ public:
+ explicit JvmtiAllocationListener(AllocationManager* manager) : manager_(manager) {}
+ void ObjectAllocated(art::Thread* self,
+ art::ObjPtr<art::mirror::Object>* obj,
+ size_t cnt) override REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool HasPreAlloc() const override REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void PreObjectAllocated(art::Thread* self,
+ art::MutableHandle<art::mirror::Class> type,
+ size_t* byte_count) override REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ AllocationManager* manager_;
+};
+
+class AllocationManager {
+ public:
+ class AllocationCallback {
+ public:
+ virtual ~AllocationCallback() {}
+ virtual void ObjectAllocated(art::Thread* self,
+ art::ObjPtr<art::mirror::Object>* obj,
+ size_t byte_count) REQUIRES_SHARED(art::Locks::mutator_lock_) = 0;
+ };
+
+ AllocationManager();
+
+ void SetAllocListener(AllocationCallback* callback);
+ void RemoveAllocListener();
+
+ static AllocationManager* Get();
+
+ void PauseAllocations(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void ResumeAllocations(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void EnableAllocationCallback(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void DisableAllocationCallback(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ template<typename T>
+ void PauseForAllocation(art::Thread* self, T msg) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void IncrListenerInstall(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void DecrListenerInstall(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ AllocationCallback* callback_ = nullptr;
+ uint32_t listener_refcount_ GUARDED_BY(alloc_listener_mutex_) = 0;
+ std::atomic<art::Thread*> allocations_paused_thread_ = nullptr;
+ std::atomic<bool> callback_enabled_ = false;
+ std::unique_ptr<JvmtiAllocationListener> alloc_listener_ = nullptr;
+ art::Mutex alloc_listener_mutex_ ACQUIRED_AFTER(art::Locks::user_code_suspension_lock_);
+ art::ConditionVariable alloc_pause_cv_;
+
+ friend class JvmtiAllocationListener;
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_OPENJDKJVMTI_ALLOC_MANAGER_H_
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 56406fc81d..64a02e874c 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -31,6 +31,7 @@
#include <android-base/thread_annotations.h>
+#include "alloc_manager.h"
#include "base/locks.h"
#include "base/mutex.h"
#include "events-inl.h"
@@ -312,9 +313,9 @@ class JvmtiDdmChunkListener : public art::DdmCallback {
DISALLOW_COPY_AND_ASSIGN(JvmtiDdmChunkListener);
};
-class JvmtiAllocationListener : public art::gc::AllocationListener {
+class JvmtiEventAllocationListener : public AllocationManager::AllocationCallback {
public:
- explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
+ explicit JvmtiEventAllocationListener(EventHandler* handler) : handler_(handler) {}
void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
override REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -349,15 +350,14 @@ class JvmtiAllocationListener : public art::gc::AllocationListener {
EventHandler* handler_;
};
-static void SetupObjectAllocationTracking(art::gc::AllocationListener* listener, bool enable) {
+static void SetupObjectAllocationTracking(bool enable) {
// We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
// now, do a workaround: (possibly) acquire and release.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::ScopedThreadSuspension sts(soa.Self(), art::ThreadState::kSuspended);
if (enable) {
- art::Runtime::Current()->GetHeap()->SetAllocationListener(listener);
+ AllocationManager::Get()->EnableAllocationCallback(soa.Self());
} else {
- art::Runtime::Current()->GetHeap()->RemoveAllocationListener();
+ AllocationManager::Get()->DisableAllocationCallback(soa.Self());
}
}
@@ -1327,7 +1327,7 @@ void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
SetupDdmTracking(ddm_listener_.get(), enable);
return;
case ArtJvmtiEvent::kVmObjectAlloc:
- SetupObjectAllocationTracking(alloc_listener_.get(), enable);
+ SetupObjectAllocationTracking(enable);
return;
case ArtJvmtiEvent::kGarbageCollectionStart:
case ArtJvmtiEvent::kGarbageCollectionFinish:
@@ -1665,13 +1665,15 @@ void EventHandler::Shutdown() {
art::ScopedSuspendAll ssa("jvmti method tracing uninstallation");
// Just remove every possible event.
art::Runtime::Current()->GetInstrumentation()->RemoveListener(method_trace_listener_.get(), ~0);
+ AllocationManager::Get()->RemoveAllocListener();
}
EventHandler::EventHandler()
: envs_lock_("JVMTI Environment List Lock", art::LockLevel::kPostMutatorTopLockLevel),
frame_pop_enabled(false),
internal_event_refcount_({0}) {
- alloc_listener_.reset(new JvmtiAllocationListener(this));
+ alloc_listener_.reset(new JvmtiEventAllocationListener(this));
+ AllocationManager::Get()->SetAllocListener(alloc_listener_.get());
ddm_listener_.reset(new JvmtiDdmChunkListener(this));
gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
method_trace_listener_.reset(new JvmtiMethodTraceListener(this));
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index c9d587af94..d4eb17137e 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -34,7 +34,7 @@
namespace openjdkjvmti {
struct ArtJvmTiEnv;
-class JvmtiAllocationListener;
+class JvmtiEventAllocationListener;
class JvmtiDdmChunkListener;
class JvmtiGcPauseListener;
class JvmtiMethodTraceListener;
@@ -425,7 +425,7 @@ class EventHandler {
// A union of all enabled events, anywhere.
EventMask global_mask;
- std::unique_ptr<JvmtiAllocationListener> alloc_listener_;
+ std::unique_ptr<JvmtiEventAllocationListener> alloc_listener_;
std::unique_ptr<JvmtiDdmChunkListener> ddm_listener_;
std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
std::unique_ptr<JvmtiMethodTraceListener> method_trace_listener_;
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 1d1839014a..b25b4d11e2 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -17,6 +17,7 @@
#include "ti_heap.h"
#include <ios>
+#include <unordered_map>
#include "android-base/logging.h"
#include "android-base/thread_annotations.h"
@@ -1613,8 +1614,9 @@ jvmtiError HeapExtensions::IterateThroughHeapExt(jvmtiEnv* env,
namespace {
using ObjectPtr = art::ObjPtr<art::mirror::Object>;
+using ObjectMap = std::unordered_map<ObjectPtr, ObjectPtr, art::HashObjPtr>;
-static void ReplaceObjectReferences(ObjectPtr old_obj_ptr, ObjectPtr new_obj_ptr)
+static void ReplaceObjectReferences(const ObjectMap& map)
REQUIRES(art::Locks::mutator_lock_,
art::Roles::uninterruptible_) {
art::Runtime::Current()->GetHeap()->VisitObjectsPaused(
@@ -1623,8 +1625,7 @@ static void ReplaceObjectReferences(ObjectPtr old_obj_ptr, ObjectPtr new_obj_ptr
class ResizeReferenceVisitor {
public:
using CompressedObj = art::mirror::CompressedReference<art::mirror::Object>;
- ResizeReferenceVisitor(ObjectPtr old_arr, ObjectPtr new_arr)
- : old_obj_(old_arr), new_obj_(new_arr) {}
+ explicit ResizeReferenceVisitor(const ObjectMap& map) : map_(map) {}
// Ignore class roots.
void VisitRootIfNonNull(CompressedObj* root) const
@@ -1634,20 +1635,29 @@ static void ReplaceObjectReferences(ObjectPtr old_obj_ptr, ObjectPtr new_obj_ptr
}
}
void VisitRoot(CompressedObj* root) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
- if (root->AsMirrorPtr() == old_obj_) {
- root->Assign(new_obj_);
- art::WriteBarrier::ForEveryFieldWrite(new_obj_);
+ auto it = map_.find(root->AsMirrorPtr());
+ if (it != map_.end()) {
+ root->Assign(it->second);
+ art::WriteBarrier::ForEveryFieldWrite(it->second);
}
}
void operator()(art::ObjPtr<art::mirror::Object> obj,
art::MemberOffset off,
- bool is_static ATTRIBUTE_UNUSED) const
+ bool is_static) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- if (obj->GetFieldObject<art::mirror::Object>(off) == old_obj_) {
+ auto it = map_.find(obj->GetFieldObject<art::mirror::Object>(off));
+ if (it != map_.end()) {
+ UNUSED(is_static);
+ if (UNLIKELY(!is_static && off == art::mirror::Object::ClassOffset())) {
+ // We don't want to update the declaring class of any objects. They will be replaced
+ // in the heap and we need the declaring class to know its size.
+ return;
+ }
VLOG(plugin) << "Updating field at offset " << off.Uint32Value() << " of type "
<< obj->GetClass()->PrettyClass();
- obj->SetFieldObject</*transaction*/ false>(off, new_obj_);
+ obj->SetFieldObject</*transaction*/ false>(off, it->second);
+ art::WriteBarrier::ForEveryFieldWrite(obj);
}
}
@@ -1659,11 +1669,10 @@ static void ReplaceObjectReferences(ObjectPtr old_obj_ptr, ObjectPtr new_obj_ptr
}
private:
- ObjectPtr old_obj_;
- ObjectPtr new_obj_;
+ const ObjectMap& map_;
};
- ResizeReferenceVisitor rrv(old_obj_ptr, new_obj_ptr);
+ ResizeReferenceVisitor rrv(map);
if (ref->IsClass()) {
// Class object native roots are the ArtField and ArtMethod 'declaring_class_' fields
// which we don't want to be messing with as it would break ref-visitor assumptions about
@@ -1678,13 +1687,12 @@ static void ReplaceObjectReferences(ObjectPtr old_obj_ptr, ObjectPtr new_obj_ptr
});
}
-static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectPtr new_obj_ptr)
+static void ReplaceStrongRoots(art::Thread* self, const ObjectMap& map)
REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
// replace root references expcept java frames.
struct ResizeRootVisitor : public art::RootVisitor {
public:
- ResizeRootVisitor(ObjectPtr new_val, ObjectPtr old_val)
- : new_val_(new_val), old_val_(old_val) {}
+ explicit ResizeRootVisitor(const ObjectMap& map) : map_(map) {}
// TODO It's somewhat annoying to have to have this function implemented twice. It might be
// good/useful to implement operator= for CompressedReference to allow us to use a template to
@@ -1693,7 +1701,8 @@ static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectP
REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::mirror::Object*** end = roots + count;
for (art::mirror::Object** obj = *roots; roots != end; obj = *(++roots)) {
- if (*obj == old_val_) {
+ auto it = map_.find(*obj);
+ if (it != map_.end()) {
// Java frames might have the JIT doing optimizations (for example loop-unrolling or
// eliding bounds checks) so we need deopt them once we're done here.
if (info.GetType() == art::RootType::kRootJavaFrame) {
@@ -1708,7 +1717,7 @@ static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectP
threads_with_roots_.insert(info.GetThreadId());
}
}
- *obj = new_val_.Ptr();
+ *obj = it->second.Ptr();
}
}
}
@@ -1719,7 +1728,8 @@ static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectP
art::mirror::CompressedReference<art::mirror::Object>** end = roots + count;
for (art::mirror::CompressedReference<art::mirror::Object>* obj = *roots; roots != end;
obj = *(++roots)) {
- if (obj->AsMirrorPtr() == old_val_) {
+ auto it = map_.find(obj->AsMirrorPtr());
+ if (it != map_.end()) {
// Java frames might have the JIT doing optimizations (for example loop-unrolling or
// eliding bounds checks) so we need deopt them once we're done here.
if (info.GetType() == art::RootType::kRootJavaFrame) {
@@ -1734,7 +1744,7 @@ static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectP
threads_with_roots_.insert(info.GetThreadId());
}
}
- obj->Assign(new_val_);
+ obj->Assign(it->second);
}
}
}
@@ -1744,11 +1754,10 @@ static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectP
}
private:
- ObjectPtr new_val_;
- ObjectPtr old_val_;
+ const ObjectMap& map_;
std::unordered_set<uint32_t> threads_with_roots_;
};
- ResizeRootVisitor rrv(new_obj_ptr, old_obj_ptr);
+ ResizeRootVisitor rrv(map);
art::Runtime::Current()->VisitRoots(&rrv, art::VisitRootFlags::kVisitRootFlagAllRoots);
// Handle java Frames. Annoyingly the JIT can embed information about the length of the array into
// the compiled code. By changing the length of the array we potentially invalidate these
@@ -1773,8 +1782,7 @@ static void ReplaceStrongRoots(art::Thread* self, ObjectPtr old_obj_ptr, ObjectP
static void ReplaceWeakRoots(art::Thread* self,
EventHandler* event_handler,
- ObjectPtr old_obj_ptr,
- ObjectPtr new_obj_ptr)
+ const ObjectMap& map)
REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
// Handle tags. We want to do this seprately from other weak-refs (handled below) because we need
// to send additional events and handle cases where the agent might have tagged the new
@@ -1786,25 +1794,33 @@ static void ReplaceWeakRoots(art::Thread* self,
// situations where the order of weak-ref visiting affects the final tagging state. Since we have
// the mutator_lock_ and gc-paused throughout this whole process no threads should be able to see
// the interval where the objects are not tagged.
- std::unordered_map<ArtJvmTiEnv*, jlong> obsolete_tags;
- std::unordered_map<ArtJvmTiEnv*, jlong> non_obsolete_tags;
+ struct NewTagValue {
+ public:
+ ObjectPtr obsolete_obj_;
+ jlong obsolete_tag_;
+ ObjectPtr new_obj_;
+ jlong new_tag_;
+ };
+
+ // Map from the environment to the list of <obsolete_tag, new_tag> pairs that were changed.
+ std::unordered_map<ArtJvmTiEnv*, std::vector<NewTagValue>> changed_tags;
event_handler->ForEachEnv(self, [&](ArtJvmTiEnv* env) {
// Cannot have REQUIRES(art::Locks::mutator_lock_) since ForEachEnv doesn't require it.
art::Locks::mutator_lock_->AssertExclusiveHeld(self);
env->object_tag_table->Lock();
// Get the tags and clear them (so we don't need to special-case the normal weak-ref visitor)
- jlong new_tag = 0;
- jlong obsolete_tag = 0;
- bool had_new_tag = env->object_tag_table->RemoveLocked(new_obj_ptr, &new_tag);
- bool had_obsolete_tag = env->object_tag_table->RemoveLocked(old_obj_ptr, &obsolete_tag);
- // Dispatch event.
- if (had_obsolete_tag || had_new_tag) {
- event_handler->DispatchEventOnEnv<ArtJvmtiEvent::kObsoleteObjectCreated>(env,
- self,
- &obsolete_tag,
- &new_tag);
- obsolete_tags[env] = obsolete_tag;
- non_obsolete_tags[env] = new_tag;
+ for (auto it : map) {
+ jlong new_tag = 0;
+ jlong obsolete_tag = 0;
+ bool had_obsolete_tag = env->object_tag_table->RemoveLocked(it.first, &obsolete_tag);
+ bool had_new_tag = env->object_tag_table->RemoveLocked(it.second, &new_tag);
+ // Dispatch event.
+ if (had_obsolete_tag || had_new_tag) {
+ event_handler->DispatchEventOnEnv<ArtJvmtiEvent::kObsoleteObjectCreated>(
+ env, self, &obsolete_tag, &new_tag);
+ changed_tags.try_emplace(env).first->second.push_back(
+ { it.first, obsolete_tag, it.second, new_tag });
+ }
}
// After weak-ref update we need to go back and re-add obsoletes. We wait to avoid having to
// deal with the visit-weaks overwriting the initial new_obj_ptr tag and generally making things
@@ -1814,34 +1830,34 @@ static void ReplaceWeakRoots(art::Thread* self,
// Handle weak-refs.
struct ReplaceWeaksVisitor : public art::IsMarkedVisitor {
public:
- ReplaceWeaksVisitor(ObjectPtr old_obj, ObjectPtr new_obj)
- : old_obj_(old_obj), new_obj_(new_obj) {}
+ ReplaceWeaksVisitor(const ObjectMap& map) : map_(map) {}
art::mirror::Object* IsMarked(art::mirror::Object* obj)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- if (obj == old_obj_) {
- return new_obj_.Ptr();
+ auto it = map_.find(obj);
+ if (it != map_.end()) {
+ return it->second.Ptr();
} else {
return obj;
}
}
private:
- ObjectPtr old_obj_;
- ObjectPtr new_obj_;
+ const ObjectMap& map_;
};
- ReplaceWeaksVisitor rwv(old_obj_ptr, new_obj_ptr);
+ ReplaceWeaksVisitor rwv(map);
art::Runtime::Current()->SweepSystemWeaks(&rwv);
// Re-add the object tags. At this point all weak-references to the old_obj_ptr are gone.
event_handler->ForEachEnv(self, [&](ArtJvmTiEnv* env) {
// Cannot have REQUIRES(art::Locks::mutator_lock_) since ForEachEnv doesn't require it.
art::Locks::mutator_lock_->AssertExclusiveHeld(self);
env->object_tag_table->Lock();
- if (obsolete_tags.find(env) != obsolete_tags.end()) {
- env->object_tag_table->SetLocked(old_obj_ptr, obsolete_tags[env]);
- }
- if (non_obsolete_tags.find(env) != non_obsolete_tags.end()) {
- env->object_tag_table->SetLocked(new_obj_ptr, non_obsolete_tags[env]);
+ auto it = changed_tags.find(env);
+ if (it != changed_tags.end()) {
+ for (const NewTagValue& v : it->second) {
+ env->object_tag_table->SetLocked(v.obsolete_obj_, v.obsolete_tag_);
+ env->object_tag_table->SetLocked(v.new_obj_, v.new_tag_);
+ }
}
env->object_tag_table->Unlock();
});
@@ -1852,9 +1868,14 @@ static void ReplaceWeakRoots(art::Thread* self,
void HeapExtensions::ReplaceReference(art::Thread* self,
art::ObjPtr<art::mirror::Object> old_obj_ptr,
art::ObjPtr<art::mirror::Object> new_obj_ptr) {
- ReplaceObjectReferences(old_obj_ptr, new_obj_ptr);
- ReplaceStrongRoots(self, old_obj_ptr, new_obj_ptr);
- ReplaceWeakRoots(self, HeapExtensions::gEventHandler, old_obj_ptr, new_obj_ptr);
+ ObjectMap map { { old_obj_ptr, new_obj_ptr } };
+ ReplaceReferences(self, map);
+}
+
+void HeapExtensions::ReplaceReferences(art::Thread* self, const ObjectMap& map) {
+ ReplaceObjectReferences(map);
+ ReplaceStrongRoots(self, map);
+ ReplaceWeakRoots(self, HeapExtensions::gEventHandler, map);
}
jvmtiError HeapExtensions::ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new_size) {
diff --git a/openjdkjvmti/ti_heap.h b/openjdkjvmti/ti_heap.h
index 2e27cc7f35..ee8b4d6128 100644
--- a/openjdkjvmti/ti_heap.h
+++ b/openjdkjvmti/ti_heap.h
@@ -17,6 +17,8 @@
#ifndef ART_OPENJDKJVMTI_TI_HEAP_H_
#define ART_OPENJDKJVMTI_TI_HEAP_H_
+#include <unordered_map>
+
#include "jvmti.h"
#include "base/locks.h"
@@ -24,6 +26,7 @@
namespace art {
class Thread;
template<typename T> class ObjPtr;
+class HashObjPtr;
namespace mirror {
class Object;
} // namespace mirror
@@ -88,6 +91,13 @@ class HeapExtensions {
static jvmtiError JNICALL ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new_size);
+ static void ReplaceReferences(
+ art::Thread* self,
+ const std::unordered_map<art::ObjPtr<art::mirror::Object>,
+ art::ObjPtr<art::mirror::Object>,
+ art::HashObjPtr>& refs)
+ REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_);
+
static void ReplaceReference(art::Thread* self,
art::ObjPtr<art::mirror::Object> original,
art::ObjPtr<art::mirror::Object> replacement)
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 87080caa4e..ebbe6acdbd 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -42,6 +42,8 @@
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
+#include "alloc_manager.h"
+#include "android-base/macros.h"
#include "android-base/thread_annotations.h"
#include "art_field-inl.h"
#include "art_field.h"
@@ -115,6 +117,7 @@
#include "reflective_value_visitor.h"
#include "runtime.h"
#include "runtime_globals.h"
+#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
#include "thread_list.h"
@@ -460,30 +463,33 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
}
// Check for already existing non-static fields/methods.
// TODO Remove this once we support generic method/field addition.
- bool non_static_method = false;
- klass->VisitMethods([&](art::ArtMethod* m) REQUIRES_SHARED(art::Locks::mutator_lock_) {
- // Since direct-methods (ie privates + <init> are not in any vtable/iftable we can update
- // them).
- if (!m->IsDirect()) {
- non_static_method = true;
- *error_msg = StringPrintf("%s has a non-direct function %s",
- klass->PrettyClass().c_str(),
- m->PrettyMethod().c_str());
+ if (!klass->IsFinal()) {
+ bool non_static_method = false;
+ klass->VisitMethods([&](art::ArtMethod* m) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Since direct-methods (ie privates + <init> are not in any vtable/iftable we can update
+ // them).
+ if (!m->IsDirect()) {
+ non_static_method = true;
+ *error_msg = StringPrintf("%s has a non-direct function %s",
+ klass->PrettyClass().c_str(),
+ m->PrettyMethod().c_str());
+ }
+ }, art::kRuntimePointerSize);
+ if (non_static_method) {
+ return ERR(UNMODIFIABLE_CLASS);
}
- }, art::kRuntimePointerSize);
- if (non_static_method) {
- return ERR(UNMODIFIABLE_CLASS);
- }
- bool non_static_field = false;
- klass->VisitFields([&](art::ArtField* f) REQUIRES_SHARED(art::Locks::mutator_lock_) {
- if (!f->IsStatic()) {
- non_static_field = true;
- *error_msg = StringPrintf(
- "%s has a non-static field %s", klass->PrettyClass().c_str(), f->PrettyField().c_str());
+ bool non_static_field = false;
+ klass->VisitFields([&](art::ArtField* f) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (!f->IsStatic()) {
+ non_static_field = true;
+ *error_msg = StringPrintf("%s has a non-static field %s",
+ klass->PrettyClass().c_str(),
+ f->PrettyField().c_str());
+ }
+ });
+ if (non_static_field) {
+ return ERR(UNMODIFIABLE_CLASS);
}
- });
- if (non_static_field) {
- return ERR(UNMODIFIABLE_CLASS);
}
// Check for fields/methods which were returned before moving to index jni id type.
// TODO We might want to rework how this is done. Once full redefinition is implemented we will
@@ -985,9 +991,12 @@ bool Redefiner::ClassRedefinition::CheckMethods() {
return old_method_id == new_method_id;
});
+ if (!new_method.IsStaticOrDirect()) {
+ RecordHasVirtualMembers();
+ }
if (old_iter == old_methods.cend()) {
// TODO Support adding non-static methods.
- if (is_structural && new_method.IsStaticOrDirect()) {
+ if (is_structural && (new_method.IsStaticOrDirect() || h_klass->IsFinal())) {
RecordNewMethodAdded();
} else {
RecordFailure(
@@ -1046,9 +1055,12 @@ bool Redefiner::ClassRedefinition::CheckFields() {
FieldNameAndSignature old_field_id(&old_dex_file, old_iter.GetIndex());
return old_field_id == new_field_id;
});
+ if (!new_field.IsStatic()) {
+ RecordHasVirtualMembers();
+ }
if (old_iter == old_fields.cend()) {
// TODO Support adding non-static fields.
- if (driver_->IsStructuralRedefinition() && new_field.IsStatic()) {
+ if (driver_->IsStructuralRedefinition() && (new_field.IsStatic() || h_klass->IsFinal())) {
RecordNewFieldAdded();
} else {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
@@ -1169,6 +1181,10 @@ bool Redefiner::ClassRedefinition::CheckRedefinable() {
jvmtiError res;
if (driver_->type_ == RedefinitionType::kStructural && this->IsStructuralRedefinition()) {
res = Redefiner::GetClassRedefinitionError<RedefinitionType::kStructural>(h_klass, &err);
+ if (res == OK && HasVirtualMembers() && h_klass->IsFinalizable()) {
+ res = ERR(INTERNAL);
+ err = "Cannot redefine finalizable objects at this time.";
+ }
} else {
res = Redefiner::GetClassRedefinitionError<RedefinitionType::kNormal>(h_klass, &err);
}
@@ -1201,9 +1217,11 @@ class RedefinitionDataHolder {
kSlotOldObsoleteMethods = 6,
kSlotOldDexCaches = 7,
kSlotNewClassObject = 8,
+ kSlotOldInstanceObjects = 9,
+ kSlotNewInstanceObjects = 10,
// Must be last one.
- kNumSlots = 9,
+ kNumSlots = 11,
};
// This needs to have a HandleScope passed in that is capable of creating a new Handle without
@@ -1269,6 +1287,18 @@ class RedefinitionDataHolder {
return art::ObjPtr<art::mirror::Class>::DownCast(GetSlot(klass_index, kSlotNewClassObject));
}
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetOldInstanceObjects(
+ jint klass_index) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>>::DownCast(
+ GetSlot(klass_index, kSlotOldInstanceObjects));
+ }
+
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetNewInstanceObjects(
+ jint klass_index) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>>::DownCast(
+ GetSlot(klass_index, kSlotNewInstanceObjects));
+ }
+
void SetSourceClassLoader(jint klass_index, art::ObjPtr<art::mirror::ClassLoader> loader)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotSourceClassLoader, loader);
@@ -1308,6 +1338,16 @@ class RedefinitionDataHolder {
SetSlot(klass_index, kSlotNewClassObject, klass);
}
+ void SetOldInstanceObjects(jint klass_index,
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotOldInstanceObjects, objs);
+ }
+ void SetNewInstanceObjects(jint klass_index,
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotNewInstanceObjects, objs);
+ }
int32_t Length() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_->GetLength() / kNumSlots;
}
@@ -1392,6 +1432,11 @@ class RedefinitionDataIter {
return *this;
}
+ // Compat for STL iterators.
+ RedefinitionDataIter& operator*() {
+ return *this;
+ }
+
Redefiner::ClassRedefinition& GetRedefinition() REQUIRES_SHARED(art::Locks::mutator_lock_) {
return (*holder_.GetRedefinitions())[idx_];
}
@@ -1438,6 +1483,14 @@ class RedefinitionDataIter {
return holder_.GetNewClassObject(idx_);
}
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetOldInstanceObjects() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOldInstanceObjects(idx_);
+ }
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetNewInstanceObjects() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewInstanceObjects(idx_);
+ }
int32_t GetIndex() const {
return idx_;
}
@@ -1478,6 +1531,14 @@ class RedefinitionDataIter {
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetNewClassObject(idx_, klass);
}
+ void SetOldInstanceObjects(art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOldInstanceObjects(idx_, objs);
+ }
+ void SetNewInstanceObjects(art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewInstanceObjects(idx_, objs);
+ }
private:
int32_t idx_;
@@ -1580,6 +1641,75 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
return true;
}
+bool Redefiner::ClassRedefinition::CollectAndCreateNewInstances(
+ /*out*/ RedefinitionDataIter* cur_data) {
+ if (!IsStructuralRedefinition()) {
+ return true;
+ }
+ art::VariableSizedHandleScope hs(driver_->self_);
+ art::Handle<art::mirror::Class> old_klass(hs.NewHandle(cur_data->GetMirrorClass()));
+ std::vector<art::Handle<art::mirror::Object>> old_instances;
+ art::gc::Heap* heap = driver_->runtime_->GetHeap();
+ auto is_instance = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (HasVirtualMembers()) {
+ return old_klass->IsAssignableFrom(obj->GetClass());
+ } else {
+ // We don't need to deal with objects of subtypes when we don't modify virtuals since the
+ // vtable + field layout will remain the same.
+ return old_klass.Get() == obj->GetClass();
+ }
+ };
+ heap->VisitObjects([&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (is_instance(obj)) {
+ CHECK(old_klass.Get() == obj->GetClass()) << "No support for subtypes yet!";
+ old_instances.push_back(hs.NewHandle(obj));
+ }
+ });
+ VLOG(plugin) << "Collected " << old_instances.size() << " instances to recreate!";
+
+ art::Handle<art::mirror::Class> obj_array_class(
+ hs.NewHandle(art::GetClassRoot<art::mirror::ObjectArray<art::mirror::Object>>(
+ driver_->runtime_->GetClassLinker())));
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> old_instances_arr(
+ hs.NewHandle(art::mirror::ObjectArray<art::mirror::Object>::Alloc(
+ driver_->self_, obj_array_class.Get(), old_instances.size())));
+ if (old_instances_arr.IsNull()) {
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate old_instance arrays!");
+ return false;
+ }
+ for (uint32_t i = 0; i < old_instances.size(); ++i) {
+ old_instances_arr->Set(i, old_instances[i].Get());
+ }
+ cur_data->SetOldInstanceObjects(old_instances_arr.Get());
+
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> new_instances_arr(
+ hs.NewHandle(art::mirror::ObjectArray<art::mirror::Object>::Alloc(
+ driver_->self_, obj_array_class.Get(), old_instances.size())));
+ if (new_instances_arr.IsNull()) {
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate new_instance arrays!");
+ return false;
+ }
+ art::Handle<art::mirror::Class> new_klass(hs.NewHandle(cur_data->GetNewClassObject()));
+ for (uint32_t i = 0; i < old_instances.size(); ++i) {
+ art::ObjPtr<art::mirror::Object> new_instance(new_klass->AllocObject(driver_->self_));
+ if (new_instance.IsNull()) {
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ std::string msg(
+ StringPrintf("Could not allocate instance %d of %zu", i, old_instances.size()));
+ RecordFailure(ERR(OUT_OF_MEMORY), msg);
+ return false;
+ }
+ new_instances_arr->Set(i, new_instance);
+ }
+ cur_data->SetNewInstanceObjects(new_instances_arr.Get());
+ return true;
+}
+
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
/*out*/RedefinitionDataIter* cur_data) {
art::ScopedObjectAccessUnchecked soa(driver_->self_);
@@ -1801,6 +1931,16 @@ bool Redefiner::EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder
return true;
}
+bool Redefiner::CollectAndCreateNewInstances(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ // Allocate the data this redefinition requires.
+ if (!data.GetRedefinition().CollectAndCreateNewInstances(&data)) {
+ return false;
+ }
+ }
+ return true;
+}
+
bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
// Allocate the data this redefinition requires.
@@ -1849,6 +1989,36 @@ class ScopedDisableConcurrentAndMovingGc {
art::Thread* self_;
};
+class ScopedSuspendAllocations {
+ public:
+ ScopedSuspendAllocations(art::Runtime* runtime, RedefinitionDataHolder& h)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ : paused_(false) {
+ if (std::any_of(h.begin(),
+ h.end(),
+ [](auto r) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return r.GetRedefinition().IsStructuralRedefinition();
+ })) {
+ VLOG(plugin) << "Pausing allocations for structural redefinition.";
+ paused_ = true;
+ AllocationManager::Get()->PauseAllocations(art::Thread::Current());
+ // Collect garbage so we don't need to recreate as much.
+ runtime->GetHeap()->CollectGarbage(/*clear_soft_references=*/false);
+ }
+ }
+
+ ~ScopedSuspendAllocations() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (paused_) {
+ AllocationManager::Get()->ResumeAllocations(art::Thread::Current());
+ }
+ }
+
+ private:
+ bool paused_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSuspendAllocations);
+};
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<1> hs(self_);
// Allocate an array to hold onto all java temporary objects associated with this redefinition.
@@ -1873,6 +2043,11 @@ jvmtiError Redefiner::Run() {
return result_;
}
+ ScopedSuspendAllocations suspend_alloc(runtime_, holder);
+ if (!CollectAndCreateNewInstances(holder)) {
+ return result_;
+ }
+
// At this point we can no longer fail without corrupting the runtime state.
for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ClassLinker* cl = runtime_->GetClassLinker();
@@ -2023,6 +2198,120 @@ void Redefiner::ClassRedefinition::CollectNewFieldAndMethodMappings(
}
}
+static void CopyField(art::ObjPtr<art::mirror::Object> target,
+ art::ArtField* new_field,
+ art::ObjPtr<art::mirror::Object> source,
+ art::ArtField& old_field) REQUIRES(art::Locks::mutator_lock_) {
+ art::Primitive::Type ftype = old_field.GetTypeAsPrimitiveType();
+ CHECK_EQ(ftype, new_field->GetTypeAsPrimitiveType())
+ << old_field.PrettyField() << " vs " << new_field->PrettyField();
+ if (ftype == art::Primitive::kPrimNot) {
+ new_field->SetObject<false>(target, old_field.GetObject(source));
+ } else {
+ switch (ftype) {
+#define UPDATE_FIELD(TYPE) \
+ case art::Primitive::kPrim##TYPE: \
+ new_field->Set##TYPE<false>(target, old_field.Get##TYPE(source)); \
+ break
+ UPDATE_FIELD(Int);
+ UPDATE_FIELD(Float);
+ UPDATE_FIELD(Long);
+ UPDATE_FIELD(Double);
+ UPDATE_FIELD(Short);
+ UPDATE_FIELD(Char);
+ UPDATE_FIELD(Byte);
+ UPDATE_FIELD(Boolean);
+ case art::Primitive::kPrimNot:
+ case art::Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected field with type " << ftype << " found!";
+ UNREACHABLE();
+#undef UPDATE_FIELD
+ }
+ }
+}
+
+static void CopyFields(bool is_static,
+ art::ObjPtr<art::mirror::Object> target,
+ art::ObjPtr<art::mirror::Class> target_class,
+ art::ObjPtr<art::mirror::Object> source,
+ art::ObjPtr<art::mirror::Class> source_class)
+ REQUIRES(art::Locks::mutator_lock_) {
+ DCHECK(!source_class->IsObjectClass() && !target_class->IsObjectClass())
+ << "Should not be overriding object class fields. Target: " << target_class->PrettyClass()
+ << " Source: " << source_class->PrettyClass();
+ for (art::ArtField& f : (is_static ? source_class->GetSFields() : source_class->GetIFields())) {
+ art::ArtField* new_field =
+ (is_static ? target_class->FindDeclaredStaticField(f.GetName(), f.GetTypeDescriptor())
+ : target_class->FindDeclaredInstanceField(f.GetName(), f.GetTypeDescriptor()));
+ CHECK(new_field != nullptr) << "could not find new version of " << f.PrettyField();
+ CopyField(target, new_field, source, f);
+ }
+ if (!is_static && !target_class->GetSuperClass()->IsObjectClass()) {
+ CopyFields(
+ is_static, target, target_class->GetSuperClass(), source, source_class->GetSuperClass());
+ }
+}
+
+static void ClearField(art::ObjPtr<art::mirror::Object> target, art::ArtField& field)
+ REQUIRES(art::Locks::mutator_lock_) {
+ art::Primitive::Type ftype = field.GetTypeAsPrimitiveType();
+ if (ftype == art::Primitive::kPrimNot) {
+ field.SetObject<false>(target, nullptr);
+ } else {
+ switch (ftype) {
+#define UPDATE_FIELD(TYPE) \
+ case art::Primitive::kPrim##TYPE: \
+ field.Set##TYPE<false>(target, 0); \
+ break
+ UPDATE_FIELD(Int);
+ UPDATE_FIELD(Float);
+ UPDATE_FIELD(Long);
+ UPDATE_FIELD(Double);
+ UPDATE_FIELD(Short);
+ UPDATE_FIELD(Char);
+ UPDATE_FIELD(Byte);
+ UPDATE_FIELD(Boolean);
+ case art::Primitive::kPrimNot:
+ case art::Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected field with type " << ftype << " found!";
+ UNREACHABLE();
+#undef UPDATE_FIELD
+ }
+ }
+}
+
+static void ClearFields(bool is_static,
+ art::ObjPtr<art::mirror::Object> target,
+ art::ObjPtr<art::mirror::Class> target_class)
+ REQUIRES(art::Locks::mutator_lock_) {
+ DCHECK(!target_class->IsObjectClass());
+ for (art::ArtField& f : (is_static ? target_class->GetSFields() : target_class->GetIFields())) {
+ ClearField(target, f);
+ }
+ if (!is_static && !target_class->GetSuperClass()->IsObjectClass()) {
+ ClearFields(is_static, target, target_class->GetSuperClass());
+ }
+}
+
+static void CopyAndClearFields(bool is_static,
+ art::ObjPtr<art::mirror::Object> target,
+ art::ObjPtr<art::mirror::Class> target_class,
+ art::ObjPtr<art::mirror::Object> source,
+ art::ObjPtr<art::mirror::Class> source_class)
+ REQUIRES(art::Locks::mutator_lock_) {
+ // Copy all non-j.l.Object fields
+ CopyFields(is_static, target, target_class, source, source_class);
+ // Copy the lock-word.
+ target->SetLockWord(source->GetLockWord(false), false);
+ // Clear (reset) the old one.
+ source->SetLockWord(art::LockWord::Default(), false);
+ art::WriteBarrier::ForEveryFieldWrite(target);
+
+ // Clear the fields from the old class. We don't need it anymore.
+ ClearFields(is_static, source, source_class);
+ art::WriteBarrier::ForEveryFieldWrite(source);
+}
+
void Redefiner::ClassRedefinition::UpdateClassStructurally(const RedefinitionDataIter& holder) {
DCHECK(IsStructuralRedefinition());
// LETS GO. We've got all new class structures so no need to do all the updating of the stacks.
@@ -2036,40 +2325,24 @@ void Redefiner::ClassRedefinition::UpdateClassStructurally(const RedefinitionDat
std::map<art::ArtMethod*, art::ArtMethod*> method_map;
std::map<art::ArtField*, art::ArtField*> field_map;
CollectNewFieldAndMethodMappings(holder, &method_map, &field_map);
- // Copy over the fields of the object.
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> new_instances(
+ holder.GetNewInstanceObjects());
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> old_instances(
+ holder.GetOldInstanceObjects());
CHECK(!orig.IsNull());
CHECK(!replacement.IsNull());
- for (art::ArtField& f : orig->GetSFields()) {
- art::ArtField* new_field =
- replacement->FindDeclaredStaticField(f.GetName(), f.GetTypeDescriptor());
- CHECK(new_field != nullptr) << "could not find new version of " << f.PrettyField();
- art::Primitive::Type ftype = f.GetTypeAsPrimitiveType();
- CHECK_EQ(ftype, new_field->GetTypeAsPrimitiveType())
- << f.PrettyField() << " vs " << new_field->PrettyField();
- if (ftype == art::Primitive::kPrimNot) {
- new_field->SetObject<false>(replacement, f.GetObject(orig));
- } else {
- switch (ftype) {
-#define UPDATE_FIELD(TYPE) \
- case art::Primitive::kPrim##TYPE: \
- new_field->Set##TYPE<false>(replacement, f.Get##TYPE(orig)); \
- break
-
- UPDATE_FIELD(Int);
- UPDATE_FIELD(Float);
- UPDATE_FIELD(Long);
- UPDATE_FIELD(Double);
- UPDATE_FIELD(Short);
- UPDATE_FIELD(Char);
- UPDATE_FIELD(Byte);
- UPDATE_FIELD(Boolean);
- case art::Primitive::kPrimNot:
- case art::Primitive::kPrimVoid:
- LOG(FATAL) << "Unexpected field with type " << ftype << " found!";
- UNREACHABLE();
-#undef UPDATE_FIELD
- }
- }
+ // Copy over the static fields of the class and all the instance fields.
+ CopyAndClearFields(/*is_static=*/true, replacement, replacement, orig, orig);
+
+ // Copy and clear the fields of the old-instances.
+ for (int32_t i = 0; i < old_instances->GetLength(); i++) {
+ art::ObjPtr<art::mirror::Object> old_instance(old_instances->Get(i));
+ art::ObjPtr<art::mirror::Object> new_instance(new_instances->Get(i));
+ CopyAndClearFields(/*is_static=*/false,
+ new_instance,
+ new_instance->GetClass(),
+ old_instance,
+ old_instance->GetClass());
}
// Mark old class obsolete.
orig->SetObsoleteObject();
@@ -2079,9 +2352,6 @@ void Redefiner::ClassRedefinition::UpdateClassStructurally(const RedefinitionDat
m.SetDontCompile();
DCHECK_EQ(orig, m.GetDeclaringClass());
}
- // Copy the lock-word
- replacement->SetLockWord(orig->GetLockWord(false), false);
- orig->SetLockWord(art::LockWord::Default(), false);
// Update live pointers in ART code.
auto could_change_resolution_of = [&](auto* field_or_method,
const auto& info) REQUIRES(art::Locks::mutator_lock_) {
@@ -2166,39 +2436,23 @@ void Redefiner::ClassRedefinition::UpdateClassStructurally(const RedefinitionDat
// Force every frame of every thread to deoptimize (any frame might have eg offsets compiled in).
driver_->runtime_->GetInstrumentation()->DeoptimizeAllThreadFrames();
- // Actually perform the general replacement. This doesn't affect ArtMethod/ArtFields.
- // This replaces the mirror::Class in 'holder' as well. It's magic!
- HeapExtensions::ReplaceReference(driver_->self_, orig, replacement);
+ std::unordered_map<art::ObjPtr<art::mirror::Object>,
+ art::ObjPtr<art::mirror::Object>,
+ art::HashObjPtr> map;
+ map.emplace(orig, replacement);
+ for (int32_t i = 0; i < old_instances->GetLength(); i++) {
+ map.emplace(old_instances->Get(i), new_instances->Get(i));
+ }
+
+ // Actually perform the general replacement. This doesn't affect ArtMethod/ArtFields. It does
+ // affect the declaring_class field of all the obsolete objects, which is unfortunate and needs to
+ // be undone. This replaces the mirror::Class in 'holder' as well. It's magic!
+ HeapExtensions::ReplaceReferences(driver_->self_, map);
// Save the old class so that the JIT gc doesn't get confused by it being collected before the
// jit code. This is also needed to keep the dex-caches of any obsolete methods live.
replacement->GetExtData()->SetObsoleteClass(orig);
- // Clear the static fields of the old-class.
- for (art::ArtField& f : orig->GetSFields()) {
- switch (f.GetTypeAsPrimitiveType()) {
- #define UPDATE_FIELD(TYPE) \
- case art::Primitive::kPrim ## TYPE: \
- f.Set ## TYPE <false>(orig, 0); \
- break
-
- UPDATE_FIELD(Int);
- UPDATE_FIELD(Float);
- UPDATE_FIELD(Long);
- UPDATE_FIELD(Double);
- UPDATE_FIELD(Short);
- UPDATE_FIELD(Char);
- UPDATE_FIELD(Byte);
- UPDATE_FIELD(Boolean);
- case art::Primitive::kPrimNot:
- f.SetObject<false>(orig, nullptr);
- break;
- case art::Primitive::kPrimVoid:
- LOG(FATAL) << "Unexpected field with type void found!";
- UNREACHABLE();
- #undef UPDATE_FIELD
- }
- }
art::jit::Jit* jit = driver_->runtime_->GetJit();
if (jit != nullptr) {
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index 58a688c1a0..cedce92806 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -41,6 +41,7 @@
#include "art_jvmti.h"
#include "base/array_ref.h"
#include "base/globals.h"
+#include "dex/class_accessor.h"
#include "dex/dex_file.h"
#include "dex/dex_file_structs.h"
#include "jni/jni_env_ext-inl.h"
@@ -155,6 +156,9 @@ class Redefiner {
bool FinishRemainingAllocations(/*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool CollectAndCreateNewInstances(/*out*/RedefinitionDataIter* cur_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
bool AllocateAndRememberNewDexFileCookie(
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
@@ -234,8 +238,14 @@ class Redefiner {
void RecordNewMethodAdded();
void RecordNewFieldAdded();
+ void RecordHasVirtualMembers() {
+ has_virtuals_ = true;
+ }
+
+ bool HasVirtualMembers() const {
+ return has_virtuals_;
+ }
- private:
bool IsStructuralRedefinition() const {
DCHECK(!(added_fields_ || added_methods_) || driver_->IsStructuralRedefinition())
<< "added_fields_: " << added_fields_ << " added_methods_: " << added_methods_
@@ -243,6 +253,7 @@ class Redefiner {
return driver_->IsStructuralRedefinition() && (added_fields_ || added_methods_);
}
+ private:
void UpdateClassStructurally(const RedefinitionDataIter& cur_data)
REQUIRES(art::Locks::mutator_lock_);
@@ -257,6 +268,7 @@ class Redefiner {
bool added_fields_ = false;
bool added_methods_ = false;
+ bool has_virtuals_ = false;
// Does the class need to be reverified due to verification soft-fails possibly forcing
// interpreter or lock-counting?
@@ -311,6 +323,8 @@ class Redefiner {
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool CollectAndCreateNewInstances(RedefinitionDataHolder& holder)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReverifyClasses(RedefinitionDataHolder& holder) REQUIRES_SHARED(art::Locks::mutator_lock_);
void UnregisterAllBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index 4b85df0ea6..c3518f3ccc 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -129,6 +129,9 @@ enum LockLevel : uint8_t {
kMutatorLock,
kInstrumentEntrypointsLock,
+ // This is a generic lock level for a top-level lock meant to be gained after having the
+ // UserCodeSuspensionLock.
+ kPostUserCodeSuspensionTopLevelLock,
kUserCodeSuspensionLock,
kZygoteCreationLock,
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index a578252e7a..376b524862 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -23,11 +23,13 @@
#include "base/locks.h"
#include "base/macros.h"
#include "gc_root.h"
+#include "handle.h"
#include "obj_ptr.h"
namespace art {
namespace mirror {
+class Class;
class Object;
} // namespace mirror
@@ -39,6 +41,26 @@ class AllocationListener {
public:
virtual ~AllocationListener() {}
+ // An event to allow a listener to intercept and modify an allocation before it takes place.
+ // The listener can change the byte_count and type as they see fit. Extreme caution should be used
+ // when doing so. This can also be used to control allocation occurring on another thread.
+ //
+ // Concurrency guarantees: This might be called multiple times for each single allocation. It's
+ // guaranteed that, between the final call to the callback and the object being visible to
+ // heap-walks there are no suspensions. If a suspension was allowed between these events the
+ // callback will be invoked again after passing the suspend point.
+ //
+ // If the alloc succeeds it is guaranteed there are no suspend-points between the last return of
+ // PreObjectAlloc and the newly allocated object being visible to heap-walks.
+ //
+ // This can also be used to make any last-minute changes to the type or size of the allocation.
+ virtual void PreObjectAllocated(Thread* self ATTRIBUTE_UNUSED,
+ MutableHandle<mirror::Class> type ATTRIBUTE_UNUSED,
+ size_t* byte_count ATTRIBUTE_UNUSED)
+ REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) {}
+ // Fast check if we want to get the PreObjectAllocated callback, to avoid the expense of creating
+ // handles. Defaults to false.
+ virtual bool HasPreAlloc() const { return false; }
virtual void ObjectAllocated(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index c1b3a63307..04632ef96b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -65,10 +65,30 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
self->PoisonObjectPointers();
}
+ auto send_pre_object_allocated = [&]() REQUIRES_SHARED(Locks::mutator_lock_)
+ ACQUIRE(Roles::uninterruptible_) {
+ if constexpr (kInstrumented) {
+ AllocationListener* l = nullptr;
+ l = alloc_listener_.load(std::memory_order_seq_cst);
+ if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(&klass));
+ l->PreObjectAllocated(self, h_klass, &byte_count);
+ }
+ }
+ return self->StartAssertNoThreadSuspension("Called PreObjectAllocated, no suspend until alloc");
+ };
+ // Do the initial pre-alloc
+ const char* old_cause = send_pre_object_allocated();
+ // We shouldn't have any NoThreadSuspension here!
+ DCHECK(old_cause == nullptr) << old_cause;
+
// Need to check that we aren't the large object allocator since the large object allocation code
// path includes this function. If we didn't check we would have an infinite loop.
ObjPtr<mirror::Object> obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
+ // AllocLargeObject can suspend and will recall PreObjectAllocated if needed.
+ self->EndAssertNoThreadSuspension(old_cause);
obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
pre_fence_visitor);
if (obj != nullptr) {
@@ -80,6 +100,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
// If the large object allocation failed, try to use the normal spaces (main space,
// non moving space). This can happen if there is significant virtual address space
// fragmentation.
+ // We need to send the PreObjectAllocated again, we might have suspended during our failure.
+ old_cause = send_pre_object_allocated();
}
// bytes allocated for the (individual) object.
size_t bytes_allocated;
@@ -100,6 +122,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
usable_size = bytes_allocated;
no_suspend_pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ self->EndAssertNoThreadSuspension(old_cause);
} else if (
!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
(obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
@@ -112,6 +135,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
usable_size = bytes_allocated;
no_suspend_pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ self->EndAssertNoThreadSuspension(old_cause);
} else {
// Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
// non-TLAB object allocations.
@@ -121,14 +145,19 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
if (UNLIKELY(obj == nullptr)) {
// AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
// or changes the allocator in a suspend point here, we need to retry the allocation.
+ // It will send the pre-alloc event again.
+ self->EndAssertNoThreadSuspension(old_cause);
obj = AllocateInternalWithGc(self,
allocator,
kInstrumented,
byte_count,
&bytes_allocated,
&usable_size,
- &bytes_tl_bulk_allocated, &klass);
+ &bytes_tl_bulk_allocated,
+ &klass,
+ &old_cause);
if (obj == nullptr) {
+ self->EndAssertNoThreadSuspension(old_cause);
// The only way that we can get a null return if there is no pending exception is if the
// allocator or instrumentation changed.
if (!self->IsExceptionPending()) {
@@ -156,6 +185,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
no_suspend_pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ self->EndAssertNoThreadSuspension(old_cause);
if (bytes_tl_bulk_allocated > 0) {
size_t num_bytes_allocated_before =
num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 85b79da329..a97ff985e6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,7 @@
#include "heap.h"
#include <limits>
+#include "android-base/thread_annotations.h"
#if defined(__BIONIC__) || defined(__GLIBC__)
#include <malloc.h> // For mallinfo()
#endif
@@ -1723,11 +1724,37 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- ObjPtr<mirror::Class>* klass) {
+ ObjPtr<mirror::Class>* klass,
+ /*out*/const char** old_no_thread_suspend_cause) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
DCHECK(klass != nullptr);
+ auto release_no_suspend = [&]() RELEASE(Roles::uninterruptible_) {
+ self->EndAssertNoThreadSuspension(*old_no_thread_suspend_cause);
+ };
+ auto send_object_pre_alloc = [&]() ACQUIRE(Roles::uninterruptible_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (UNLIKELY(instrumented)) {
+ AllocationListener* l = nullptr;
+ l = alloc_listener_.load(std::memory_order_seq_cst);
+ if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
+ l->PreObjectAllocated(self, h_klass, &alloc_size);
+ }
+ }
+ *old_no_thread_suspend_cause =
+ self->StartAssertNoThreadSuspension("Called PreObjectAllocated, no suspend until alloc");
+};
+#define PERFORM_SUSPENDING_OPERATION(op) \
+ [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
+ release_no_suspend(); \
+ auto res = (op); \
+ send_object_pre_alloc(); \
+ return res; \
+ }()
+
StackHandleScope<1> hs(self);
HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
// The allocation failed. If the GC is running, block until it completes, and then retry the
@@ -1735,6 +1762,8 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
+ // We just waited, call the pre-alloc again.
+ send_object_pre_alloc();
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1749,8 +1778,9 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
}
collector::GcType tried_type = next_gc_type_;
- const bool gc_ran =
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1769,8 +1799,8 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
continue;
}
// Attempt to run the collector, if we succeed, re-try the allocation.
- const bool plan_gc_ran =
- CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1800,7 +1830,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
// TODO: Run finalization, but this may cause more allocations to occur.
// We don't need a WaitForGcToComplete here either.
DCHECK(!gc_plan_.empty());
- CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+ PERFORM_SUSPENDING_OPERATION(CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true));
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1817,7 +1847,8 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
current_time - last_time_homogeneous_space_compaction_by_oom_ >
min_interval_homogeneous_space_compaction_by_oom_) {
last_time_homogeneous_space_compaction_by_oom_ = current_time;
- HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+ HomogeneousSpaceCompactResult result =
+ PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact());
// Thread suspension could have occurred.
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
@@ -1862,9 +1893,13 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
}
}
}
+#undef PERFORM_SUSPENDING_OPERATION
// If the allocation hasn't succeeded by this point, throw an OOM error.
if (ptr == nullptr) {
+ release_no_suspend();
ThrowOutOfMemoryError(self, alloc_size, allocator);
+ *old_no_thread_suspend_cause =
+ self->StartAssertNoThreadSuspension("Failed allocation fallback");
}
return ptr;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9ef6af5c97..6f6cfd1785 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1011,8 +1011,10 @@ class Heap {
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- ObjPtr<mirror::Class>* klass)
+ ObjPtr<mirror::Class>* klass,
+ /*out*/const char** old_no_thread_suspend_cause)
REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
+ ACQUIRE(Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate into a specific space.
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 6d1a8e0ed6..2f36fe6142 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -37,6 +37,9 @@ class Offset {
constexpr size_t SizeValue() const {
return val_;
}
+ constexpr bool operator==(Offset o) const {
+ return SizeValue() == o.SizeValue();
+ }
protected:
size_t val_;
diff --git a/test/1983-structural-redefinition-failures/expected.txt b/test/1983-structural-redefinition-failures/expected.txt
index 54e1bcc2fb..40a0914278 100644
--- a/test/1983-structural-redefinition-failures/expected.txt
+++ b/test/1983-structural-redefinition-failures/expected.txt
@@ -28,7 +28,7 @@ Is Structurally modifiable class java.util.ArrayList false
Is Structurally modifiable class java.util.Objects true
Is Structurally modifiable class java.util.Arrays true
Is Structurally modifiable class [Ljava.lang.Object; false
-Is Structurally modifiable class java.lang.Integer false
+Is Structurally modifiable class java.lang.Integer true
Is Structurally modifiable class java.lang.Number false
Is Structurally modifiable class art.Test1983$NoVirtuals true
Is Structurally modifiable class art.Test1983$WithVirtuals false
diff --git a/test/1994-final-virtual-structural/expected.txt b/test/1994-final-virtual-structural/expected.txt
new file mode 100644
index 0000000000..9b74d30dd9
--- /dev/null
+++ b/test/1994-final-virtual-structural/expected.txt
@@ -0,0 +1,5 @@
+Hi!
+Hello world!
+Hej Verden!
+Bonjour le monde!
+こんにちは世界!
diff --git a/test/1994-final-virtual-structural/info.txt b/test/1994-final-virtual-structural/info.txt
new file mode 100644
index 0000000000..606c984f7a
--- /dev/null
+++ b/test/1994-final-virtual-structural/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition can add new virtual methods and fields.
diff --git a/test/1994-final-virtual-structural/run b/test/1994-final-virtual-structural/run
new file mode 100755
index 0000000000..03e41a58e7
--- /dev/null
+++ b/test/1994-final-virtual-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1994-final-virtual-structural/src/Main.java b/test/1994-final-virtual-structural/src/Main.java
new file mode 100644
index 0000000000..3f0cb14820
--- /dev/null
+++ b/test/1994-final-virtual-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1994.run();
+ }
+}
diff --git a/test/1994-final-virtual-structural/src/art/Redefinition.java b/test/1994-final-virtual-structural/src/art/Redefinition.java
new file mode 120000
index 0000000000..81eaf31bbb
--- /dev/null
+++ b/test/1994-final-virtual-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java \ No newline at end of file
diff --git a/test/1994-final-virtual-structural/src/art/Test1994.java b/test/1994-final-virtual-structural/src/art/Test1994.java
new file mode 100644
index 0000000000..9ae7772682
--- /dev/null
+++ b/test/1994-final-virtual-structural/src/art/Test1994.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1994 {
+
+ public static final class Transform {
+ public void sayHi() {
+ System.out.println("Hi!");
+ }
+ }
+
+ /**
+ * base64 encoded class/dex file for
+ * public static final class Transform {
+ * public void sayHi() {
+ * sayHiEnglish();
+ * sayHiDanish();
+ * sayHiFrance();
+ * sayHiJapan();
+ * }
+ * public void sayHiEnglish() {
+ * System.out.println("Hello world!");
+ * }
+ * public void sayHiDanish() {
+ * System.out.println("Hej Verden!");
+ * }
+ * public void sayHiJapan() {
+ * System.out.println("こんにちは世界!");
+ * }
+ * public void sayHiFrance() {
+ * System.out.println("Bonjour le monde!");
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQA87tn3VIDgMrF+Md2W4r58elaMPcSfk2CMBQAAcAAAAHhWNBIAAAAAAAAAAMgEAAAc" +
+ "AAAAcAAAAAkAAADgAAAAAgAAAAQBAAABAAAAHAEAAAgAAAAkAQAAAQAAAGQBAAAIBAAAhAEAAG4C" +
+ "AAB2AgAAiQIAAJYCAACkAgAAvgIAAM4CAADyAgAAEgMAACkDAAA9AwAAUQMAAGUDAAB0AwAAfwMA" +
+ "AIIDAACGAwAAkwMAAJkDAACeAwAApwMAAK4DAAC7AwAAyQMAANYDAADiAwAA6QMAAGEEAAAEAAAA" +
+ "BQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAA4AAAAOAAAACAAAAAAAAAAPAAAACAAAAGgCAAAH" +
+ "AAQAEgAAAAAAAAAAAAAAAAAAABQAAAAAAAAAFQAAAAAAAAAWAAAAAAAAABcAAAAAAAAAGAAAAAQA" +
+ "AQATAAAABQAAAAAAAAAAAAAAEQAAAAUAAAAAAAAADAAAALgEAACIBAAAAAAAAAEAAQABAAAASAIA" +
+ "AAQAAABwEAcAAAAOAAEAAQABAAAATAIAAA0AAABuEAMAAABuEAIAAABuEAQAAABuEAUAAAAOAAAA" +
+ "AwABAAIAAABUAgAACAAAAGIAAAAaAQIAbiAGABAADgADAAEAAgAAAFkCAAAIAAAAYgAAABoBAwBu" +
+ "IAYAEAAOAAMAAQACAAAAXgIAAAgAAABiAAAAGgEBAG4gBgAQAA4AAwABAAIAAABjAgAACAAAAGIA" +
+ "AAAaARsAbiAGABAADgADAA4ABQAOPDw8PAAOAA54AAsADngAFAAOeAARAA54AAEAAAAGAAY8aW5p" +
+ "dD4AEUJvbmpvdXIgbGUgbW9uZGUhAAtIZWogVmVyZGVuIQAMSGVsbG8gd29ybGQhABhMYXJ0L1Rl" +
+ "c3QxOTk0JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5OTQ7ACJMZGFsdmlrL2Fubm90YXRpb24vRW5j" +
+ "bG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsAFUxqYXZhL2lvL1By" +
+ "aW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZh" +
+ "L2xhbmcvU3lzdGVtOwANVGVzdDE5OTQuamF2YQAJVHJhbnNmb3JtAAFWAAJWTAALYWNjZXNzRmxh" +
+ "Z3MABG5hbWUAA291dAAHcHJpbnRsbgAFc2F5SGkAC3NheUhpRGFuaXNoAAxzYXlIaUVuZ2xpc2gA" +
+ "C3NheUhpRnJhbmNlAApzYXlIaUphcGFuAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6" +
+ "ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJjZDkwMDIzOTMwZDk3M2Y1NzcxMWYxZDRmZGFh" +
+ "ZDdhM2U0NzE0NjM3IiwidmVyc2lvbiI6IjEuNy4xNC1kZXYifQAI44GT44KT44Gr44Gh44Gv5LiW" +
+ "55WMIQACAgEZGAECAwIQBBkRFw0AAAEFAIGABIQDAQGcAwEByAMBAegDAQGIBAEBqAQAAAAAAAAC" +
+ "AAAAeQQAAH8EAACsBAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAcAAAAcAAAAAIA" +
+ "AAAJAAAA4AAAAAMAAAACAAAABAEAAAQAAAABAAAAHAEAAAUAAAAIAAAAJAEAAAYAAAABAAAAZAEA" +
+ "AAEgAAAGAAAAhAEAAAMgAAAGAAAASAIAAAEQAAABAAAAaAIAAAIgAAAcAAAAbgIAAAQgAAACAAAA" +
+ "eQQAAAAgAAABAAAAiAQAAAMQAAACAAAAqAQAAAYgAAABAAAAuAQAAAAQAAABAAAAyAQAAA==");
+
+ public static void run() {
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+ t.sayHi();
+ }
+}
diff --git a/test/1995-final-virtual-structural-multithread/expected.txt b/test/1995-final-virtual-structural-multithread/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/expected.txt
diff --git a/test/1995-final-virtual-structural-multithread/info.txt b/test/1995-final-virtual-structural-multithread/info.txt
new file mode 100644
index 0000000000..f9b7bdd04f
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/info.txt
@@ -0,0 +1,4 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while concurrently using the class being redefined
+doesn't cause any unexpected problems.
diff --git a/test/1995-final-virtual-structural-multithread/run b/test/1995-final-virtual-structural-multithread/run
new file mode 100755
index 0000000000..421f7b0bf2
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(b/144168550) This test uses access patterns that can be replaced by
+# iget-object-quick during dex2dex compilation. This breaks the test since the
+# -quick opcode encodes the exact byte offset of fields. Since this test changes
+# the offset this causes problems.
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true -Xcompiler-option --debuggable
diff --git a/test/1995-final-virtual-structural-multithread/src/Main.java b/test/1995-final-virtual-structural-multithread/src/Main.java
new file mode 100644
index 0000000000..f19358d626
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1995.run();
+ }
+}
diff --git a/test/1995-final-virtual-structural-multithread/src/art/Redefinition.java b/test/1995-final-virtual-structural-multithread/src/art/Redefinition.java
new file mode 120000
index 0000000000..81eaf31bbb
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java \ No newline at end of file
diff --git a/test/1995-final-virtual-structural-multithread/src/art/Test1995.java b/test/1995-final-virtual-structural-multithread/src/art/Test1995.java
new file mode 100644
index 0000000000..1ffee60a06
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/src/art/Test1995.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+public class Test1995 {
+ private static final int NUM_THREADS = 20;
+
+ public static final class Transform {
+ public String greetingEnglish;
+ public Transform() {
+ this.greetingEnglish = "Hello";
+ }
+ public String sayHi() {
+ return greetingEnglish + " from " + Thread.currentThread().getName();
+ }
+ }
+
+ /**
+ * base64 encoded class/dex file for
+ * public static final class Transform {
+ * public String greetingEnglish;
+ * public String greetingFrench;
+ * public String greetingDanish;
+ * public String greetingJapanese;
+ *
+ * public Transform() {
+ * this.greetingEnglish = "Hello World";
+ * this.greetingFrench = "Bonjour le Monde";
+ * this.greetingDanish = "Hej Verden";
+ * this.greetingJapanese = "こんにちは世界";
+ * }
+ * public String sayHi() {
+ * return sayHiEnglish() + ", " + sayHiFrench() + ", " + sayHiDanish() + ", " + sayHiJapanese() + " from " + Thread.currentThread().getName();
+ * }
+ * public String sayHiEnglish() {
+ * return greetingEnglish;
+ * }
+ * public String sayHiDanish() {
+ * return greetingDanish;
+ * }
+ * public String sayHiJapanese() {
+ * return greetingJapanese;
+ * }
+ * public String sayHiFrench() {
+ * return greetingFrench;
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQCsHrUqkb8cYgT2oYN7HlVbeOxJT/kONRvgBgAAcAAAAHhWNBIAAAAAAAAAABwGAAAl" +
+"AAAAcAAAAAkAAAAEAQAABAAAACgBAAAEAAAAWAEAAAwAAAB4AQAAAQAAANgBAADoBAAA+AEAAEoD" +
+"AABSAwAAVgMAAF4DAABwAwAAfAMAAIkDAACMAwAAkAMAAKoDAAC6AwAA3gMAAP4DAAASBAAAJgQA" +
+"AEEEAABVBAAAZAQAAG8EAAByBAAAfwQAAIcEAACWBAAAnwQAAK8EAADABAAA0AQAAOIEAADoBAAA" +
+"7wQAAPwEAAAKBQAAFwUAACYFAAAwBQAANwUAAK8FAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAO" +
+"AAAADwAAABIAAAAGAAAABQAAAAAAAAAHAAAABgAAAEQDAAAGAAAABwAAAAAAAAASAAAACAAAAAAA" +
+"AAAAAAUAFwAAAAAABQAYAAAAAAAFABkAAAAAAAUAGgAAAAAAAwACAAAAAAAAABwAAAAAAAAAHQAA" +
+"AAAAAAAeAAAAAAAAAB8AAAAAAAAAIAAAAAQAAwACAAAABgADAAIAAAAGAAEAFAAAAAYAAAAhAAAA" +
+"BwACABUAAAAHAAAAFgAAAAAAAAARAAAABAAAAAAAAAAQAAAADAYAANUFAAAAAAAABwABAAIAAAAt" +
+"AwAAQQAAAG4QAwAGAAwAbhAEAAYADAFuEAIABgAMAm4QBQAGAAwDcQAKAAAADARuEAsABAAMBCIF" +
+"BgBwEAcABQBuIAgABQAaAAEAbiAIAAUAbiAIABUAbiAIAAUAbiAIACUAbiAIAAUAbiAIADUAGgAA" +
+"AG4gCAAFAG4gCABFAG4QCQAFAAwAEQAAAAIAAQAAAAAAMQMAAAMAAABUEAAAEQAAAAIAAQAAAAAA" +
+"NQMAAAMAAABUEAEAEQAAAAIAAQAAAAAAOQMAAAMAAABUEAIAEQAAAAIAAQAAAAAAPQMAAAMAAABU" +
+"EAMAEQAAAAIAAQABAAAAJAMAABQAAABwEAYAAQAaAAUAWxABABoAAwBbEAIAGgAEAFsQAAAaACQA" +
+"WxADAA4ACQAOPEtLS0sAEAAOABYADgATAA4AHAAOABkADgAAAAABAAAABQAGIGZyb20gAAIsIAAG" +
+"PGluaXQ+ABBCb25qb3VyIGxlIE1vbmRlAApIZWogVmVyZGVuAAtIZWxsbyBXb3JsZAABTAACTEwA" +
+"GExhcnQvVGVzdDE5OTUkVHJhbnNmb3JtOwAOTGFydC9UZXN0MTk5NTsAIkxkYWx2aWsvYW5ub3Rh" +
+"dGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwASTGph" +
+"dmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVp" +
+"bGRlcjsAEkxqYXZhL2xhbmcvVGhyZWFkOwANVGVzdDE5OTUuamF2YQAJVHJhbnNmb3JtAAFWAAth" +
+"Y2Nlc3NGbGFncwAGYXBwZW5kAA1jdXJyZW50VGhyZWFkAAdnZXROYW1lAA5ncmVldGluZ0Rhbmlz" +
+"aAAPZ3JlZXRpbmdFbmdsaXNoAA5ncmVldGluZ0ZyZW5jaAAQZ3JlZXRpbmdKYXBhbmVzZQAEbmFt" +
+"ZQAFc2F5SGkAC3NheUhpRGFuaXNoAAxzYXlIaUVuZ2xpc2gAC3NheUhpRnJlbmNoAA1zYXlIaUph" +
+"cGFuZXNlAAh0b1N0cmluZwAFdmFsdWUAdn5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIs" +
+"Im1pbi1hcGkiOjEsInNoYS0xIjoiNjBkYTRkNjdiMzgxYzQyNDY3NzU3YzQ5ZmI2ZTU1NzU2ZDg4" +
+"YTJmMyIsInZlcnNpb24iOiIxLjcuMTItZGV2In0AB+OBk+OCk+OBq+OBoeOBr+S4lueVjAACAgEi" +
+"GAECAwITBBkbFxEABAEFAAEBAQEBAQEAgYAE7AUBAfgDAQGMBQEBpAUBAbwFAQHUBQAAAAAAAgAA" +
+"AMYFAADMBQAAAAYAAAAAAAAAAAAAAAAAABAAAAAAAAAAAQAAAAAAAAABAAAAJQAAAHAAAAACAAAA" +
+"CQAAAAQBAAADAAAABAAAACgBAAAEAAAABAAAAFgBAAAFAAAADAAAAHgBAAAGAAAAAQAAANgBAAAB" +
+"IAAABgAAAPgBAAADIAAABgAAACQDAAABEAAAAQAAAEQDAAACIAAAJQAAAEoDAAAEIAAAAgAAAMYF" +
+"AAAAIAAAAQAAANUFAAADEAAAAgAAAPwFAAAGIAAAAQAAAAwGAAAAEAAAAQAAABwGAAA=");
+
+
+ public static void run() throws Exception {
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+ doTest();
+ }
+
+ public static final class MyThread extends Thread {
+ public MyThread(CountDownLatch delay, int id) {
+ super("Thread: " + id);
+ this.thr_id = id;
+ this.results = new ArrayList<>(1000);
+ this.finish = false;
+ this.delay = delay;
+ }
+
+ public void run() {
+ delay.countDown();
+ while (!finish) {
+ Transform t = new Transform();
+ results.add(t.sayHi());
+ }
+ }
+
+ public void finish() throws Exception {
+ finish = true;
+ this.join();
+ }
+
+ public void Check() throws Exception {
+ for (String s : results) {
+ if (!s.equals("Hello from " + getName()) &&
+ !s.equals("Hello, null, null, null from " + getName()) &&
+ !s.equals("Hello World, Bonjour le Monde, Hej Verden, こんにちは世界 from " + getName())) {
+ System.out.println("FAIL " + thr_id + ": Unexpected result: " + s);
+ }
+ }
+ }
+
+ public ArrayList<String> results;
+ public volatile boolean finish;
+ public int thr_id;
+ public CountDownLatch delay;
+ }
+
+ public static MyThread[] startThreads(int num_threads) throws Exception {
+ CountDownLatch cdl = new CountDownLatch(num_threads);
+ MyThread[] res = new MyThread[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ res[i] = new MyThread(cdl, i);
+ res[i].start();
+ }
+ cdl.await();
+ return res;
+ }
+ public static void finishThreads(MyThread[] thrs) throws Exception {
+ for (MyThread t : thrs) {
+ t.finish();
+ }
+ for (MyThread t : thrs) {
+ t.Check();
+ }
+ }
+
+ public static void doTest() throws Exception {
+ MyThread[] threads = startThreads(NUM_THREADS);
+ Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+ finishThreads(threads);
+ }
+}
diff --git a/test/1996-final-override-virtual-structural/expected.txt b/test/1996-final-override-virtual-structural/expected.txt
new file mode 100644
index 0000000000..20cd98f1ac
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/expected.txt
@@ -0,0 +1,6 @@
+Not doing anything
+super: Hi this: Hi
+Redefining calling class
+super: Hi this: SALUTATIONS
+Not doing anything
+super: Hi and then this: SALUTATIONS
diff --git a/test/1996-final-override-virtual-structural/info.txt b/test/1996-final-override-virtual-structural/info.txt
new file mode 100644
index 0000000000..55adf7c257
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition allows one to override a superclass method.
diff --git a/test/1996-final-override-virtual-structural/run b/test/1996-final-override-virtual-structural/run
new file mode 100755
index 0000000000..03e41a58e7
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1996-final-override-virtual-structural/src/Main.java b/test/1996-final-override-virtual-structural/src/Main.java
new file mode 100644
index 0000000000..ade69cfe2f
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1996.run();
+ }
+}
diff --git a/test/1996-final-override-virtual-structural/src/art/Redefinition.java b/test/1996-final-override-virtual-structural/src/art/Redefinition.java
new file mode 120000
index 0000000000..81eaf31bbb
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java \ No newline at end of file
diff --git a/test/1996-final-override-virtual-structural/src/art/Test1996.java b/test/1996-final-override-virtual-structural/src/art/Test1996.java
new file mode 100644
index 0000000000..c2b1125528
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/src/art/Test1996.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1996 {
+
+ public static class SuperTransform {
+ public String hiValue = "Hi";
+ public String sayHi() {
+ return this.hiValue;
+ }
+ }
+ public static final class Transform extends SuperTransform {
+ public void PostTransform() { }
+ public String sayHiTwice(Runnable run) {
+ run.run();
+ return "super: " + super.sayHi() + " this: " + sayHi();
+ }
+ }
+
+ /**
+ * base64 encoded class/dex file for
+ * public static final class Transform extends SuperTransform {
+ * public String myGreeting;
+ * public void PostTransform() {
+ * myGreeting = "SALUTATIONS";
+ * }
+ * public String sayHiTwice(Runnable run) {
+ * run.run();
+ * return "super: " + super.sayHi() + " and then this: " + sayHi();
+ * }
+ * public String sayHi() {
+ * return myGreeting;
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQAO4Dwurw97RcUtfH7np7S5RR8gsJYOfmeABQAAcAAAAHhWNBIAAAAAAAAAALwEAAAc" +
+"AAAAcAAAAAkAAADgAAAABAAAAAQBAAABAAAANAEAAAoAAAA8AQAAAQAAAIwBAADUAwAArAEAAHYC" +
+"AACIAgAAkAIAAJMCAACXAgAAtgIAANACAADgAgAABAMAACQDAAA6AwAATgMAAGkDAAB4AwAAhQMA" +
+"AJQDAACfAwAAogMAAK8DAAC3AwAAwwMAAMkDAADOAwAA1QMAAOEDAADqAwAA9AMAAPsDAAAEAAAA" +
+"BQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAABAAAAACAAAABgAAAAAAAAADAAAABgAAAGgCAAAD" +
+"AAAABwAAAHACAAAQAAAACAAAAAAAAAABAAYAEwAAAAAAAwABAAAAAAAAABYAAAABAAMAAQAAAAEA" +
+"AwAMAAAAAQAAABYAAAABAAEAFwAAAAUAAwAVAAAABwADAAEAAAAHAAIAEgAAAAcAAAAZAAAAAQAA" +
+"ABEAAAAAAAAAAAAAAA4AAACsBAAAggQAAAAAAAACAAEAAAAAAFsCAAADAAAAVBAAABEAAAAFAAIA" +
+"AgAAAF8CAAAlAAAAchAGAAQAbxABAAMADARuEAQAAwAMACIBBwBwEAcAAQAaAhgAbiAIACEAbiAI" +
+"AEEAGgQAAG4gCABBAG4gCAABAG4QCQABAAwEEQQAAAEAAQABAAAAUgIAAAQAAABwEAAAAAAOAAIA" +
+"AQAAAAAAVgIAAAUAAAAaAA0AWxAAAA4ACgAOAA0ADksAFAAOABABAA48AAAAAAEAAAAFAAAAAQAA" +
+"AAYAECBhbmQgdGhlbiB0aGlzOiAABjxpbml0PgABTAACTEwAHUxhcnQvVGVzdDE5OTYkU3VwZXJU" +
+"cmFuc2Zvcm07ABhMYXJ0L1Rlc3QxOTk2JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5OTY7ACJMZGFs" +
+"dmlrL2Fubm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJD" +
+"bGFzczsAFExqYXZhL2xhbmcvUnVubmFibGU7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xh" +
+"bmcvU3RyaW5nQnVpbGRlcjsADVBvc3RUcmFuc2Zvcm0AC1NBTFVUQVRJT05TAA1UZXN0MTk5Ni5q" +
+"YXZhAAlUcmFuc2Zvcm0AAVYAC2FjY2Vzc0ZsYWdzAAZhcHBlbmQACm15R3JlZXRpbmcABG5hbWUA" +
+"A3J1bgAFc2F5SGkACnNheUhpVHdpY2UAB3N1cGVyOiAACHRvU3RyaW5nAAV2YWx1ZQB2fn5EOHsi" +
+"Y29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiI2MGRhNGQ2N2Iz" +
+"ODFjNDI0Njc3NTdjNDlmYjZlNTU3NTZkODhhMmYzIiwidmVyc2lvbiI6IjEuNy4xMi1kZXYifQAC" +
+"AwEaGAICBAIRBBkUFw8AAQEDAAECgYAEoAQDAbgEAQGsAwEBxAMAAAAAAAACAAAAcwQAAHkEAACg" +
+"BAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAcAAAAcAAAAAIAAAAJAAAA4AAAAAMA" +
+"AAAEAAAABAEAAAQAAAABAAAANAEAAAUAAAAKAAAAPAEAAAYAAAABAAAAjAEAAAEgAAAEAAAArAEA" +
+"AAMgAAAEAAAAUgIAAAEQAAACAAAAaAIAAAIgAAAcAAAAdgIAAAQgAAACAAAAcwQAAAAgAAABAAAA" +
+"ggQAAAMQAAACAAAAnAQAAAYgAAABAAAArAQAAAAQAAABAAAAvAQAAA==");
+
+ public static void run() {
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+ doTest(new Transform());
+ }
+
+ public static void doTest(final Transform t) {
+ System.out.println(t.sayHiTwice(() -> { System.out.println("Not doing anything"); }));
+ System.out.println(t.sayHiTwice(
+ () -> {
+ System.out.println("Redefining calling class");
+ Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+ t.PostTransform();
+ }));
+ System.out.println(t.sayHiTwice(() -> { System.out.println("Not doing anything"); }));
+ }
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index cd66472aa8..069cecb27c 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1146,6 +1146,9 @@
"1991-hello-structural-retransform",
"1992-retransform-no-such-field",
"1993-fallback-non-structural",
+ "1994-final-virtual-structural",
+ "1995-final-virtual-structural-multithread",
+ "1996-final-override-virtual-structural",
"1997-structural-shadow-method",
"1998-structural-shadow-field"
],