ART: SHARED_REQUIRES to REQUIRES_SHARED
This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.
Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 101c9a1..277bda4 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -37,7 +37,7 @@
static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
// it should be removed and JNI compiled stubs used instead.
ScopedObjectAccessUnchecked soa(self);
@@ -250,7 +250,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
JValue result_register,
- bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool stay_in_interpreter = false) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
@@ -466,7 +466,7 @@
}
static bool IsStringInit(const Instruction* instr, ArtMethod* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (instr->Opcode() == Instruction::INVOKE_DIRECT ||
instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
// Instead of calling ResolveMethod() which has suspend point and can trigger
@@ -499,7 +499,7 @@
ShadowFrame* shadow_frame,
bool from_code,
JValue* ret_val)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue value;
// Set value to last known result in case the shadow frame chain is empty.
value.SetJ(ret_val->GetJ());
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index bf4bcff..38ce851 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -38,20 +38,20 @@
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
mirror::Object* receiver, uint32_t* args, JValue* result,
bool stay_in_interpreter = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// 'from_code' denotes whether the deoptimization was explicitly triggered by compiled code.
extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, bool from_code,
JValue* ret_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// One-time sanity check.
void CheckInterpreterAsmConstants();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ac146b3..77c3f0f 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -196,7 +196,7 @@
template<Primitive::Type field_type>
static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue field_value;
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -456,7 +456,7 @@
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Uint required, so that sign extension does not make this wrong on 64b systems
uint32_t src_value = shadow_frame.GetVReg(src_reg);
mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
@@ -491,7 +491,7 @@
template <bool is_range,
bool do_assignability_check,
size_t kVarArgMax>
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
@@ -505,7 +505,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
@@ -541,7 +541,7 @@
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
uint16_t this_obj_vreg,
JValue result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Object* existing = shadow_frame->GetVRegReference(this_obj_vreg);
if (existing == nullptr) {
// If it's null, we come from compiled code that was deoptimized. Nothing to do,
@@ -854,7 +854,7 @@
return true;
}
-// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+// TODO fix thread analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<typename T>
static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
NO_THREAD_SAFETY_ANALYSIS {
@@ -865,7 +865,7 @@
}
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsActiveTransaction());
DCHECK(array != nullptr);
DCHECK_LE(count, array->GetLength());
@@ -904,7 +904,7 @@
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
@@ -917,7 +917,7 @@
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 7b38473..9d76685 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -66,7 +66,7 @@
namespace interpreter {
void ThrowNullPointerExceptionFromInterpreter()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kMonitorCounting>
static inline void DoMonitorEnter(Thread* self,
@@ -108,13 +108,13 @@
void AbortTransactionF(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AbortTransactionV(Thread* self, const char* fmt, va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
// DoInvokeVirtualQuick functions.
@@ -213,32 +213,32 @@
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type>
bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iput-XXX and sput-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
bool transaction_active>
bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type, bool transaction_active>
bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
// java.lang.String class is initialized.
static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -272,7 +272,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -290,7 +290,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -308,7 +308,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -326,7 +326,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -350,7 +350,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -378,7 +378,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -411,18 +411,18 @@
uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
__attribute__((cold))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set true if you want TraceExecution invocation before each bytecode execution.
constexpr bool kTraceExecutionEnabled = false;
static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
const uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kTraceExecutionEnabled) {
#define TRACE_LOG std::cerr
std::ostringstream oss;
@@ -465,7 +465,7 @@
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -486,7 +486,7 @@
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.h b/runtime/interpreter/interpreter_goto_table_impl.h
index bb9be88..c54746d 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.h
+++ b/runtime/interpreter/interpreter_goto_table_impl.h
@@ -33,7 +33,7 @@
JValue ExecuteGotoImpl(Thread* self,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
- JValue result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+ JValue result_register) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 322df4e..90d9f89 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -33,7 +33,7 @@
extern "C" bool ExecuteMterpImpl(Thread* self,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
- JValue* result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+ JValue* result_register) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index 90ec908..d0c9386 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -34,7 +34,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
JValue result_register,
- bool interpret_one_instruction) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool interpret_one_instruction) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 20a0753..a8c7d15 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -143,7 +143,7 @@
}
extern "C" size_t MterpShouldSwitchInterpreters()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive();
@@ -154,7 +154,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kVirtual, false, false>(
@@ -165,7 +165,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kSuper, false, false>(
@@ -176,7 +176,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kInterface, false, false>(
@@ -187,7 +187,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kDirect, false, false>(
@@ -198,7 +198,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kStatic, false, false>(
@@ -209,7 +209,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kVirtual, true, false>(
@@ -220,7 +220,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kSuper, true, false>(
@@ -231,7 +231,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kInterface, true, false>(
@@ -242,7 +242,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kDirect, true, false>(
@@ -253,7 +253,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kStatic, true, false>(
@@ -264,7 +264,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvokeVirtualQuick<false>(
@@ -275,7 +275,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvokeVirtualQuick<true>(
@@ -290,7 +290,7 @@
uint32_t tgt_vreg,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
String* s = ResolveString(self, *shadow_frame, index);
if (UNLIKELY(s == nullptr)) {
return true;
@@ -303,7 +303,7 @@
uint32_t tgt_vreg,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
@@ -316,7 +316,7 @@
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
@@ -334,7 +334,7 @@
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return false; // Caller will check for pending exception. Return value unimportant.
@@ -345,12 +345,12 @@
}
extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return FillArrayData(obj, payload);
}
extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
Object* obj = nullptr;
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
@@ -375,7 +375,7 @@
extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
@@ -385,7 +385,7 @@
uint16_t* dex_pc_ptr,
uint32_t inst_data,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
@@ -394,7 +394,7 @@
extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
}
@@ -402,7 +402,7 @@
extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
@@ -421,7 +421,7 @@
extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
@@ -430,7 +430,7 @@
extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
@@ -439,7 +439,7 @@
extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
Object* obj = AllocArrayFromCode<false, true>(
@@ -453,7 +453,7 @@
}
extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(self->IsExceptionPending());
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
@@ -469,7 +469,7 @@
}
extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t inst_data = inst->Fetch16(0);
if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -488,7 +488,7 @@
}
extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -496,7 +496,7 @@
}
extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -504,7 +504,7 @@
}
extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -512,7 +512,7 @@
}
extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -520,7 +520,7 @@
}
extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -528,7 +528,7 @@
}
extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -536,7 +536,7 @@
}
extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -545,7 +545,7 @@
}
extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -553,7 +553,7 @@
}
extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -565,7 +565,7 @@
}
extern "C" size_t MterpSuspendCheck(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
self->AllowThreadSuspension();
return MterpShouldSwitchInterpreters();
}
@@ -574,7 +574,7 @@
ArtMethod* referrer,
uint64_t* new_value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -595,7 +595,7 @@
mirror::Object* obj,
uint8_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
@@ -614,7 +614,7 @@
mirror::Object* obj,
uint16_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -634,7 +634,7 @@
mirror::Object* obj,
uint32_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -648,7 +648,7 @@
mirror::Object* obj,
uint64_t* new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -662,7 +662,7 @@
mirror::Object* obj,
mirror::Object* new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -673,7 +673,7 @@
}
extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(arr == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
@@ -687,7 +687,7 @@
}
extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
@@ -702,7 +702,7 @@
* and regenerated following batch updates.
*/
extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint16_t hotness_count = method->GetCounter();
int32_t countdown_value = jit::kJitHotnessDisabled;
jit::Jit* jit = Runtime::Current()->GetJit();
@@ -742,7 +742,7 @@
extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
@@ -753,7 +753,7 @@
// TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates.
extern "C" size_t MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -772,7 +772,7 @@
extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
ShadowFrame* shadow_frame,
int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
index 7e7337e..35f8f1c 100644
--- a/runtime/interpreter/mterp/mterp_stub.cc
+++ b/runtime/interpreter/mterp/mterp_stub.cc
@@ -40,7 +40,7 @@
*/
extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result_register)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
UNIMPLEMENTED(art::FATAL);
return false;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index a0e0e62..c614408 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -57,7 +57,7 @@
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) {
va_list args;
@@ -81,7 +81,7 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset,
- bool to_lower_case) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool to_lower_case) REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t int_value = static_cast<uint32_t>(shadow_frame->GetVReg(arg_offset));
// Only ASCII (7-bit).
@@ -117,7 +117,7 @@
Handle<mirror::ClassLoader> class_loader, JValue* result,
const std::string& method_name, bool initialize_class,
bool abort_if_not_found)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(className.Get() != nullptr);
std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -147,7 +147,7 @@
// actually the transaction abort exception. This must not be wrapped, as it signals an
// initialization abort.
static void CheckExceptionGenerateClassNotFound(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (self->IsExceptionPending()) {
// If it is not the transaction abort exception, wrap it.
std::string type(PrettyTypeOf(self->GetException()));
@@ -159,7 +159,7 @@
}
static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
if (param == nullptr) {
AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
@@ -442,7 +442,7 @@
static void GetResourceAsStream(Thread* self,
ShadowFrame* shadow_frame,
JValue* result,
- size_t arg_offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t arg_offset) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* resource_obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (resource_obj == nullptr) {
AbortTransactionOrFail(self, "null name for getResourceAsStream");
@@ -604,7 +604,7 @@
mirror::Array* src_array, int32_t src_pos,
mirror::Array* dst_array, int32_t dst_pos,
int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
@@ -748,7 +748,7 @@
JValue* result,
size_t arg_offset,
bool is_default_version)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<4> hs(self);
Handle<mirror::String> h_key(
hs.NewHandle(reinterpret_cast<mirror::String*>(shadow_frame->GetVRegReference(arg_offset))));
@@ -915,7 +915,7 @@
}
static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = dex_cache->GetDexFile();
if (dex_file == nullptr) {
return nullptr;
@@ -1026,7 +1026,7 @@
static void UnstartedMemoryPeekArray(
Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
if (obj == nullptr) {
@@ -1173,7 +1173,7 @@
// This allows getting the char array for new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringToCharArray(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
if (string == nullptr) {
AbortTransactionOrFail(self, "String.charAt with null object");
@@ -1299,7 +1299,7 @@
void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1313,7 +1313,7 @@
void UnstartedRuntime::UnstartedUnsafePutObjectVolatile(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1331,7 +1331,7 @@
void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1352,7 +1352,7 @@
// of correctly handling the corner cases.
void UnstartedRuntime::UnstartedIntegerParseInt(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
if (obj == nullptr) {
AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
@@ -1396,7 +1396,7 @@
// well.
void UnstartedRuntime::UnstartedLongParseLong(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
if (obj == nullptr) {
AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
@@ -1437,7 +1437,7 @@
void UnstartedRuntime::UnstartedMethodInvoke(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
ScopedObjectAccessUnchecked soa(self);
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index 03d7026..3f36a27 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -52,14 +52,14 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void Jni(Thread* self,
ArtMethod* method,
mirror::Object* receiver,
uint32_t* args,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Methods that intercept available libcore implementations.
@@ -68,7 +68,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
@@ -82,7 +82,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index c324600..ba751ec 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -49,7 +49,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \
}
#include "unstarted_runtime_list.h"
@@ -65,7 +65,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \
}
#include "unstarted_runtime_list.h"
@@ -83,7 +83,7 @@
Thread* self,
mirror::Class* component_type,
const StackHandleScope<3>& data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
mirror::Class* array_type = runtime->GetClassLinker()->FindArrayClass(self, &component_type);
CHECK(array_type != nullptr);
@@ -99,7 +99,7 @@
static void CheckObjectArray(mirror::ObjectArray<mirror::Object>* array,
const StackHandleScope<3>& data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK_EQ(array->GetLength(), 3);
CHECK_EQ(data.NumberOfReferences(), 3U);
for (size_t i = 0; i < 3; ++i) {
@@ -115,7 +115,7 @@
mirror::ObjectArray<mirror::Object>* dst,
int32_t dst_pos,
int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue result;
tmp->SetVRegReference(0, src);
tmp->SetVReg(1, src_pos);
@@ -141,7 +141,7 @@
int32_t dst_pos,
int32_t length,
const StackHandleScope<3>& expected_result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<3> hs_misc(self);
Handle<mirror::Class> dst_component_handle(hs_misc.NewHandle(dst_component_class));
@@ -167,7 +167,7 @@
ShadowFrame* tmp,
double const test_pairs[][2],
size_t num_pairs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < num_pairs; ++i) {
tmp->SetVRegDouble(0, test_pairs[i][0]);
@@ -189,7 +189,7 @@
// Prepare for aborts. Aborts assume that the exception class is already resolved, as the
// loading code doesn't work under transactions.
- void PrepareForAborts() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void PrepareForAborts() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* result = Runtime::Current()->GetClassLinker()->FindClass(
Thread::Current(),
Transaction::kAbortExceptionSignature,