Move to newer clang annotations
Also enable -Wthread-safety-negative.
Changes:
Switch to capabilities and negative capabilities.
Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.
Bug: 20072211
Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/stack.h b/runtime/stack.h
index d60714f..8023de1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -155,7 +155,7 @@
// If this returns non-null then this does not mean the vreg is currently a reference
// on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
mirror::Object* ref;
if (HasReferenceArray()) {
@@ -229,7 +229,7 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(val);
@@ -244,14 +244,14 @@
}
}
- ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
}
- mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_);
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
@@ -333,7 +333,7 @@
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
virtual void Describe(std::ostream& os) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
const StackVisitor* const stack_visitor_;
@@ -410,7 +410,7 @@
return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
}
- size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_);
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
@@ -431,31 +431,31 @@
protected:
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
public:
virtual ~StackVisitor() {}
// Return 'true' if we should continue to visit more frames, 'false' to stop.
- virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
void WalkStack(bool include_transitions = false)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
bool IsShadowFrame() const {
return cur_shadow_frame_ != nullptr;
}
- uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Callee saves are held at the top of the frame
DCHECK(GetMethod() != nullptr);
uint8_t* save_addr =
@@ -467,46 +467,46 @@
}
// Returns the height of the stack in the managed stack frames, including transitions.
- size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetNumFrames() - cur_depth_ - 1;
}
// Returns a frame ID for JDWP use, starting from 1.
- size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFrameHeight() + 1;
}
- size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) {
if (num_frames_ == 0) {
num_frames_ = ComputeNumFrames(thread_, walk_kind_);
}
return num_frames_;
}
- size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) {
return cur_depth_;
}
// Get the method and dex pc immediately after the one that's currently being visited.
bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
uintptr_t* GetGPRAddress(uint32_t reg) const;
@@ -522,9 +522,9 @@
return reinterpret_cast<uint32_t*>(vreg_addr);
}
- uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_);
- void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Return sp-relative offset for a Dalvik virtual register, compiler
@@ -606,17 +606,17 @@
return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
}
- std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_);
static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
@@ -644,40 +644,40 @@
bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
bool is_float)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_);
- InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
Thread* const thread_;
const StackWalkKind walk_kind_;