Move to newer clang annotations
Also enable -Wthread-safety-negative.
Changes:
Switch to capabilities and negative capabilities.
Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.
Bug: 20072211
Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index eccebf1..287a50b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -72,7 +72,7 @@
public:
Breakpoint(ArtMethod* method, uint32_t dex_pc,
DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
@@ -81,14 +81,14 @@
method_ = soa.EncodeMethod(method);
}
- Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(other.dex_pc_),
deoptimization_kind_(other.deoptimization_kind_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(other.Method());
}
- ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
return soa.DecodeMethod(method_);
}
@@ -111,7 +111,7 @@
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
return os;
}
@@ -123,7 +123,7 @@
void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -149,7 +149,7 @@
void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, const JValue& return_value)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -166,7 +166,7 @@
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method, uint32_t dex_pc)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
@@ -174,7 +174,7 @@
void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -195,47 +195,47 @@
void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::PostException(exception_object);
}
// We only care about how many backward branches were executed in the Jit.
void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
<< " " << dex_pc_offset;
}
private:
static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
return instruction->IsReturn();
}
- static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
}
- static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
}
static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return (Dbg::GetInstrumentationEvents() & event) != 0;
}
@@ -298,8 +298,8 @@
}
static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(!Locks::breakpoint_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
@@ -311,7 +311,7 @@
}
static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
+ REQUIRES(!Locks::thread_suspend_count_lock_) {
MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
// A thread may be suspended for GC; in this code, we really want to know whether
// there's a debugger suspension active.
@@ -319,7 +319,7 @@
}
static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -334,7 +334,7 @@
}
static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -350,8 +350,8 @@
static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
if (thread_peer == nullptr) {
// This isn't even an object.
@@ -381,14 +381,14 @@
}
static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string temp;
const char* descriptor = klass->GetDescriptor(&temp);
return BasicTagFromDescriptor(descriptor);
}
static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(c != nullptr);
if (c->IsArrayClass()) {
return JDWP::JT_ARRAY;
@@ -764,7 +764,7 @@
OwnedMonitorVisitor(Thread* thread, Context* context,
std::vector<JDWP::ObjectId>* monitor_vector,
std::vector<uint32_t>* stack_depth_vector)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
current_stack_depth(0),
monitors(monitor_vector),
@@ -781,7 +781,7 @@
}
static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
visitor->monitors->push_back(gRegistry->Add(owned_monitor));
visitor->stack_depths->push_back(visitor->current_stack_depth);
@@ -1270,17 +1270,17 @@
}
static JDWP::MethodId ToMethodId(const ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
}
static ArtField* FromFieldId(JDWP::FieldId fid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
}
static ArtMethod* FromMethodId(JDWP::MethodId mid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
}
@@ -1326,10 +1326,7 @@
return modifier_instance == event_instance;
}
-void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_) {
+void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
if (m == nullptr) {
memset(location, 0, sizeof(*location));
} else {
@@ -1376,7 +1373,7 @@
* the end.
*/
static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1398,7 +1395,7 @@
* slots to dex style argument placement.
*/
static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1424,7 +1421,8 @@
return DexFile::kDexNoIndex16;
}
-JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
+JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
+ JDWP::ExpandBuf* pReply) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(class_id, &error);
if (c == nullptr) {
@@ -1437,7 +1435,8 @@
expandBufAdd4BE(pReply, instance_field_count + static_field_count);
for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
- ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
+ ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
+ c->GetStaticField(i - instance_field_count);
expandBufAddFieldId(pReply, ToFieldId(f));
expandBufAddUtf8String(pReply, f->GetName());
expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
@@ -1553,7 +1552,7 @@
static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
const char* name, const char* descriptor, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
@@ -1641,7 +1640,7 @@
}
static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
JValue field_value;
switch (fieldType) {
@@ -1688,7 +1687,7 @@
static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(ref_type_id, &error);
if (ref_type_id != 0 && c == nullptr) {
@@ -1744,7 +1743,7 @@
}
static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
// Debugging only happens at runtime so we know we are not running in a transaction.
static constexpr bool kNoTransactionMode = false;
@@ -1815,7 +1814,7 @@
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
@@ -1945,7 +1944,7 @@
static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
error);
if (*error != JDWP::ERR_NONE) {
@@ -2004,7 +2003,7 @@
static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
std::vector<JDWP::ObjectId>* child_thread_group_ids)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(thread_group != nullptr);
// Get the ArrayList<ThreadGroup> "groups" out of this thread group...
@@ -2158,7 +2157,7 @@
static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
mirror::Object* desired_thread_group, mirror::Object* peer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Do we want threads from all thread groups?
if (desired_thread_group == nullptr) {
return true;
@@ -2202,7 +2201,7 @@
}
}
-static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -2245,7 +2244,7 @@
public:
GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
JDWP::ExpandBuf* buf_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0),
start_frame_(start_frame_in),
@@ -2254,7 +2253,7 @@
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2366,7 +2365,7 @@
struct GetThisVisitor : public StackVisitor {
GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object(nullptr),
frame_id(frame_id_in) {}
@@ -2408,7 +2407,7 @@
class FindFrameVisitor FINAL : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_id_(frame_id),
error_(JDWP::ERR_INVALID_FRAMEID) {}
@@ -2482,14 +2481,14 @@
constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
static std::string GetStackContextAsString(const StackVisitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
PrettyMethod(visitor.GetMethod()).c_str());
}
static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
<< GetStackContextAsString(visitor);
return kStackFrameLocalAccessError;
@@ -2651,7 +2650,7 @@
template<typename T>
static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag, T value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to write " << tag << " local " << value
<< " (0x" << std::hex << value << ") into register v" << vreg
<< GetStackContextAsString(visitor);
@@ -2736,7 +2735,7 @@
}
static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(location != nullptr);
if (m == nullptr) {
memset(location, 0, sizeof(*location));
@@ -2814,7 +2813,7 @@
class CatchLocationFinder : public StackVisitor {
public:
CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
exception_(exception),
@@ -2826,7 +2825,7 @@
throw_dex_pc_(DexFile::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -2860,15 +2859,15 @@
return true; // Continue stack walk.
}
- ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
return catch_method_;
}
- ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
return throw_method_;
}
- mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
return this_at_throw_.Get();
}
@@ -3170,7 +3169,7 @@
}
static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// TODO We should not be asked to watch location in a native or abstract method so the code item
@@ -3191,7 +3190,7 @@
}
static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.Method() == m) {
return &breakpoint;
@@ -3208,7 +3207,7 @@
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(ArtMethod* m,
DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.Method() == m) {
CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
@@ -3237,7 +3236,7 @@
static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
ArtMethod* m,
const Breakpoint** existing_brkpt)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!Dbg::RequiresDeoptimization()) {
// We already run in interpreter-only mode so we don't need to deoptimize anything.
VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
@@ -3498,8 +3497,8 @@
class ScopedThreadSuspension {
public:
ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ REQUIRES(!Locks::thread_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) :
thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
@@ -3560,7 +3559,7 @@
// Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
// is for step-out.
struct SingleStepStackVisitor : public StackVisitor {
- explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
stack_depth(0),
method(nullptr),
@@ -4419,7 +4418,7 @@
needHeader_ = false;
}
- void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
if (pieceLenField_ == nullptr) {
// Flush immediately post Reset (maybe back-to-back Flush). Ignore.
CHECK(needHeader_);
@@ -4435,13 +4434,13 @@
}
static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
}
static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
}
@@ -4461,7 +4460,7 @@
}
// Returns true if the object is not an empty chunk.
- bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
// Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
// in the following code not to allocate memory, by ensuring buf_ is of the correct size
if (used_bytes == 0) {
@@ -4498,7 +4497,7 @@
}
void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
uint8_t state = ExamineNativeObject(start);
AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
@@ -4507,7 +4506,7 @@
}
void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -4519,7 +4518,7 @@
}
void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Make sure there's enough room left in the buffer.
// We need to use two bytes for every fractional 256 allocation units used by the chunk plus
// 17 bytes for any header.
@@ -4552,12 +4551,12 @@
*p_++ = length - 1;
}
- uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
uint8_t ExamineJavaObject(mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
if (o == nullptr) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
@@ -4607,7 +4606,7 @@
};
static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
HeapChunkContext::HeapChunkJavaCallback(
obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
@@ -4772,7 +4771,7 @@
};
static const char* GetMethodSourceFile(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const char* source_file = method->GetDeclaringClassSourceFile();
return (source_file != nullptr) ? source_file : "";