diff options
author | 2019-03-12 15:46:40 +0000 | |
---|---|---|
committer | 2019-03-14 22:36:28 +0000 | |
commit | b214694e13890559ae587263f93eb3cfdd63eaa1 (patch) | |
tree | b74af3a4ef6b90f1e627ef97956caf2dcc502cd5 /openjdkjvmti/deopt_manager.cc | |
parent | a5c3a808020d81447bc19d07a99288e9d28a6e6c (diff) |
Revert^4 "Add extension and agent for dumping internal jvmti plugin data."
This reverts commit a55e8b5c9827cc4dc4e7f7c7ee07ef1fdafc35e0.
Reason for revert: Fixed underlying issue causing libjdwp test
failure.
Test: ./test.py --host
Change-Id: Ibe831884192db42eb54b25364d31fa21f11fab26
Diffstat (limited to 'openjdkjvmti/deopt_manager.cc')
-rw-r--r-- | openjdkjvmti/deopt_manager.cc | 52 |
1 files changed, 50 insertions, 2 deletions
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc index ee77b7bb77..ec29f2cdda 100644 --- a/openjdkjvmti/deopt_manager.cc +++ b/openjdkjvmti/deopt_manager.cc @@ -30,6 +30,8 @@ */ #include <functional> +#include <iosfwd> +#include <mutex> #include "deopt_manager.h" @@ -109,6 +111,53 @@ void DeoptManager::Shutdown() { callbacks->RemoveMethodInspectionCallback(&inspection_callback_); } +void DeoptManager::DumpDeoptInfo(art::Thread* self, std::ostream& stream) { + art::ScopedObjectAccess soa(self); + art::MutexLock mutll(self, *art::Locks::thread_list_lock_); + art::MutexLock mudsl(self, deoptimization_status_lock_); + art::MutexLock mubsl(self, breakpoint_status_lock_); + stream << "Deoptimizer count: " << deopter_count_ << "\n"; + stream << "Global deopt count: " << global_deopt_count_ << "\n"; + stream << "Can perform OSR: " << !set_local_variable_called_.load() << "\n"; + for (const auto& [bp, loc] : this->breakpoint_status_) { + stream << "Breakpoint: " << bp->PrettyMethod() << " @ 0x" << std::hex << loc << "\n"; + } + struct DumpThreadDeoptCount : public art::Closure { + public: + DumpThreadDeoptCount(std::ostream& stream, std::mutex& mu) + : cnt_(0), stream_(stream), mu_(mu) {} + void Run(art::Thread* self) override { + { + std::lock_guard<std::mutex> lg(mu_); + std::string name; + self->GetThreadName(name); + stream_ << "Thread " << name << " (id: " << std::dec << self->GetThreadId() + << ") force interpreter count " << self->ForceInterpreterCount() << "\n"; + } + // Increment this after unlocking the mutex so we won't race its destructor. + cnt_++; + } + + void WaitForCount(size_t threads) { + while (cnt_.load() != threads) { + sched_yield(); + } + } + + private: + std::atomic<size_t> cnt_; + std::ostream& stream_; + std::mutex& mu_; + }; + + std::mutex mu; + DumpThreadDeoptCount dtdc(stream, mu); + auto func = [](art::Thread* thread, void* ctx) { + reinterpret_cast<DumpThreadDeoptCount*>(ctx)->Run(thread); + }; + art::Runtime::Current()->GetThreadList()->ForEach(func, &dtdc); +} + void DeoptManager::FinishSetup() { art::Thread* self = art::Thread::Current(); art::MutexLock mu(self, deoptimization_status_lock_); @@ -366,8 +415,7 @@ jvmtiError DeoptManager::AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnche return err; } // We don't need additional locking here because we hold the Thread_list_lock_. - target->SetForceInterpreterCount(target->ForceInterpreterCount() + 1); - if (target->ForceInterpreterCount() == 1) { + if (target->IncrementForceInterpreterCount() == 1) { struct DeoptClosure : public art::Closure { public: explicit DeoptClosure(DeoptManager* man) : man_(man) {} |