diff options
Diffstat (limited to 'runtime/stack.h')
| -rw-r--r-- | runtime/stack.h | 107 |
1 files changed, 93 insertions, 14 deletions
diff --git a/runtime/stack.h b/runtime/stack.h index 31acf0eb64..aa7b6160fe 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -21,9 +21,12 @@ #include <string> #include "arch/instruction_set.h" +#include "base/macros.h" +#include "base/mutex.h" #include "dex_file.h" #include "gc_root.h" #include "mirror/object_reference.h" +#include "quick/quick_method_frame_info.h" #include "read_barrier.h" #include "verify_object.h" @@ -37,6 +40,7 @@ class ArtMethod; class Context; class HandleScope; class InlineInfo; +class OatQuickMethodHeader; class ScopedObjectAccess; class ShadowFrame; class StackVisitor; @@ -66,6 +70,72 @@ class MANAGED StackReference : public mirror::CompressedReference<MirrorType> { struct ShadowFrameDeleter; using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; +// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks. +// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are +// thread roots). +// Note: implementation is split so that the call sites may be optimized to no-ops in case no +// lock counting is necessary. The actual implementation is in the cc file to avoid +// dependencies. +class LockCountData { + public: + // Add the given object to the list of monitors, that is, objects that have been locked. This + // will not throw (but be skipped if there is an exception pending on entry). + template <bool kLockCounting> + void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(self != nullptr); + if (!kLockCounting) { + return; + } + AddMonitorInternal(self, obj); + } + + // Try to remove the given object from the monitor list, indicating an unlock operation. + // This will throw an IllegalMonitorStateException (clearing any already pending exception), in + // case that there wasn't a lock recorded for the object. + template <bool kLockCounting> + void RemoveMonitorOrThrow(Thread* self, + const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(self != nullptr); + if (!kLockCounting) { + return; + } + RemoveMonitorInternal(self, obj); + } + + // Check whether all acquired monitors have been released. This will potentially throw an + // IllegalMonitorStateException, clearing any already pending exception. Returns true if the + // check shows that everything is OK wrt/ lock counting, false otherwise. + template <bool kLockCounting> + bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(self != nullptr); + if (!kLockCounting) { + return true; + } + return CheckAllMonitorsReleasedInternal(self); + } + + template <typename T, typename... Args> + void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) { + if (monitors_ != nullptr) { + // Visitors may change the Object*. Be careful with the foreach loop. + for (mirror::Object*& obj : *monitors_) { + visitor(/* inout */ &obj, std::forward<Args>(args)...); + } + } + } + + private: + // Internal implementations. + void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); + void RemoveMonitorInternal(Thread* self, const mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_); + bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + + // Stores references to the locked-on objects. As noted, this should be visited during thread + // marking. + std::unique_ptr<std::vector<mirror::Object*>> monitors_; +}; + // ShadowFrame has 2 possible layouts: // - interpreter - separate VRegs and reference arrays. References are in the reference array. // - JNI - just VRegs, but where every VReg holds a reference. @@ -272,6 +342,10 @@ class ShadowFrame { } } + LockCountData& GetLockCountData() { + return lock_count_data_; + } + static size_t LinkOffset() { return OFFSETOF_MEMBER(ShadowFrame, link_); } @@ -330,6 +404,7 @@ class ShadowFrame { ShadowFrame* link_; ArtMethod* method_; uint32_t dex_pc_; + LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. // This is a two-part array: // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 @@ -458,6 +533,9 @@ class StackVisitor { StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind) SHARED_REQUIRES(Locks::mutator_lock_); + bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const + SHARED_REQUIRES(Locks::mutator_lock_); + public: virtual ~StackVisitor() {} @@ -487,18 +565,6 @@ class StackVisitor { size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_); - uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const - SHARED_REQUIRES(Locks::mutator_lock_) { - // Callee saves are held at the top of the frame - DCHECK(GetMethod() != nullptr); - uint8_t* save_addr = - reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size - ((num + 1) * sizeof(void*)); -#if defined(__i386__) || defined(__x86_64__) - save_addr -= sizeof(void*); // account for return address -#endif - return reinterpret_cast<uintptr_t*>(save_addr); - } - // Returns the height of the stack in the managed stack frames, including transitions. size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) { return GetNumFrames() - cur_depth_ - 1; @@ -632,6 +698,10 @@ class StackVisitor { return current_inlining_depth_ != 0; } + size_t GetCurrentInliningDepth() const { + return current_inlining_depth_; + } + uintptr_t GetCurrentQuickFramePc() const { return cur_quick_frame_pc_; } @@ -644,6 +714,10 @@ class StackVisitor { return cur_shadow_frame_; } + bool IsCurrentFrameInInterpreter() const { + return cur_shadow_frame_ != nullptr; + } + HandleScope* GetCurrentHandleScope(size_t pointer_size) const { ArtMethod** sp = GetCurrentQuickFrame(); // Skip ArtMethod*; handle scope comes next; @@ -657,6 +731,12 @@ class StackVisitor { static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); + const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const { + return cur_oat_quick_method_header_; + } + + QuickMethodFrameInfo GetCurrentQuickFrameInfo() const SHARED_REQUIRES(Locks::mutator_lock_); + private: // Private constructor known in the case that num_frames_ has already been computed. StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames) @@ -694,8 +774,6 @@ class StackVisitor { bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_REQUIRES(Locks::mutator_lock_); - bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const - SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const @@ -733,6 +811,7 @@ class StackVisitor { ShadowFrame* cur_shadow_frame_; ArtMethod** cur_quick_frame_; uintptr_t cur_quick_frame_pc_; + const OatQuickMethodHeader* cur_oat_quick_method_header_; // Lazily computed, number of frames in the stack. size_t num_frames_; // Depth of the frame we're currently at. |