Use ATTRIBUTE_UNUSED more.
Use it in lieu of UNUSED(), which had some incorrect uses.
Change-Id: If247dce58b72056f6eea84968e7196f0b5bef4da
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 4f2fc07..f1d0a5f 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -49,15 +49,13 @@
explicit NoopAllocator() {}
~NoopAllocator() {}
- void* Alloc(size_t size) {
- UNUSED(size);
+ void* Alloc(size_t size ATTRIBUTE_UNUSED) {
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
UNREACHABLE();
}
- void Free(void* p) {
+ void Free(void* p ATTRIBUTE_UNUSED) {
// Noop.
- UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 3422625..f9960ac 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -115,9 +115,7 @@
// Used internally by STL data structures.
template <class U>
- TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) noexcept {
- UNUSED(alloc);
- }
+ TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc ATTRIBUTE_UNUSED) noexcept {}
// Used internally by STL data structures.
TrackingAllocatorImpl() noexcept {
@@ -131,8 +129,7 @@
typedef TrackingAllocatorImpl<U, kTag> other;
};
- pointer allocate(size_type n, const_pointer hint = 0) {
- UNUSED(hint);
+ pointer allocate(size_type n, const_pointer hint ATTRIBUTE_UNUSED = 0) {
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 004895a..4e9282f 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -119,13 +119,13 @@
ArenaAllocatorStatsImpl(const ArenaAllocatorStatsImpl& other) = default;
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
- void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
+ void Copy(const ArenaAllocatorStatsImpl& other ATTRIBUTE_UNUSED) {}
+ void RecordAlloc(size_t bytes ATTRIBUTE_UNUSED, ArenaAllocKind kind ATTRIBUTE_UNUSED) {}
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
- void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
- UNUSED(os); UNUSED(first); UNUSED(lost_bytes_adjustment);
- }
+ void Dump(std::ostream& os ATTRIBUTE_UNUSED,
+ const Arena* first ATTRIBUTE_UNUSED,
+ ssize_t lost_bytes_adjustment ATTRIBUTE_UNUSED) const {}
};
template <bool kCount>
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 9174d2d..e2d4c24 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -176,8 +176,8 @@
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
diff --git a/runtime/base/debug_stack.h b/runtime/base/debug_stack.h
index 03f4575..e19aecb 100644
--- a/runtime/base/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -54,7 +54,7 @@
template <>
class DebugStackReferenceImpl<false> {
public:
- explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter) { UNUSED(counter); }
+ explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter ATTRIBUTE_UNUSED) {}
DebugStackReferenceImpl(const DebugStackReferenceImpl& other) = default;
DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) = default;
void CheckTop() { }
@@ -63,7 +63,7 @@
template <>
class DebugStackIndirectTopRefImpl<false> {
public:
- explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref) { UNUSED(ref); }
+ explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref ATTRIBUTE_UNUSED) {}
DebugStackIndirectTopRefImpl(const DebugStackIndirectTopRefImpl& other) = default;
DebugStackIndirectTopRefImpl& operator=(const DebugStackIndirectTopRefImpl& other) = default;
void CheckTop() { }
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 4f51370..2554fb0 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -132,7 +132,7 @@
ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Allow a delete-expression to destroy but not deallocate allocators created by Create().
- static void operator delete(void* ptr) { UNUSED(ptr); }
+ static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
private:
ArenaStack* const arena_stack_;
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 7c64449..562c2bf 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -146,8 +146,8 @@
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ScopedArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a4f95b6..b17b76e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1903,8 +1903,7 @@
JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
return error;
}
@@ -1931,8 +1930,7 @@
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
// Okay, so it's an object, but is it actually a thread?
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
// Zombie threads are in the null group.
expandBufAddObjectId(pReply, JDWP::ObjectId(0));
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 28c62a8..4e4f851 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -56,9 +56,8 @@
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -83,9 +82,8 @@
return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 377675e..6035dfe 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1524,9 +1524,9 @@
return sp8;
}
- virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
+ virtual void WalkHeader(
+ BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(sm);
}
void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 30a0983..5b31b3a 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -406,9 +406,8 @@
manager_->AddHandler(this, false);
}
-bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
+bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 277d319..eb0852a 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -41,8 +41,7 @@
explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
: dirty_cards_(dirty_cards) {}
- void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
- UNUSED(new_value);
+ void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 3d85395..e747f00 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -77,7 +77,8 @@
}
extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
- void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
if (used_bytes == 0) {
return;
@@ -86,10 +87,10 @@
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+extern "C" void DlmallocObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
- UNUSED(start);
- UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 6c32658..bb7e854 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -56,8 +56,7 @@
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
}
-void StickyMarkSweep::Sweep(bool swap_bitmaps) {
- UNUSED(swap_bitmaps);
+void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index a5dbad9..c081011 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -48,9 +48,7 @@
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
- UNUSED(ptr);
- }
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9e882a8..bbfcb31 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -31,8 +31,7 @@
explicit CountObjectsAllocated(size_t* objects_allocated)
: objects_allocated_(objects_allocated) {}
- void operator()(mirror::Object* obj) const {
- UNUSED(obj);
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
++*objects_allocated_;
}
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 2c44da2..f1d26d9 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -105,8 +105,7 @@
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread) OVERRIDE {
- UNUSED(thread); // Fix cppling bug.
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 44eb29e..18fb0d8 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -40,8 +40,9 @@
uint16_t inst_data) {
const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -234,8 +235,9 @@
bool do_assignability_check = do_access_check;
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -775,7 +777,7 @@
template<bool is_range, bool do_assignability_check>
bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result) {
+ const Instruction* inst, uint16_t inst_data ATTRIBUTE_UNUSED, JValue* result) {
const uint4_t num_additional_registers = inst->VRegB_25x();
// Argument word count.
const uint16_t number_of_inputs = num_additional_registers + kLambdaVirtualRegisterWidth;
@@ -790,7 +792,6 @@
vregC = inst->VRegC_3rc();
} else {
// TODO(iam): See if it's possible to remove inst_data dependency from 35x to avoid this path
- UNUSED(inst_data);
inst->GetAllArgs25x(arg);
}
@@ -806,7 +807,8 @@
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
- const uint16_t number_of_inputs = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+ const uint16_t number_of_inputs =
+ (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
// TODO: find a cleaner way to separate non-range and range information without duplicating
// code.
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0a4d6e3..5427a58 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1072,9 +1072,8 @@
return WriteTaggedObject(reply, contended_monitor);
}
-static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
+static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 4104d7a..dab1040 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -93,8 +93,7 @@
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity) {
- UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
+void JNIEnvExt::PushFrame(int capacity ATTRIBUTE_UNUSED) {
// TODO: take 'capacity' into account.
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6bc1829..234a733 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1743,8 +1743,9 @@
return static_cast<jchar*>(s->GetValue());
}
- static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
- UNUSED(chars);
+ static void ReleaseStringCritical(JNIEnv* env,
+ jstring java_string,
+ const jchar* chars ATTRIBUTE_UNUSED) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 3d54029..ec7d758 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -100,9 +100,8 @@
explicit SetLengthVisitor(int32_t length) : length_(length) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(usable_size);
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index d931777..e861921 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -248,10 +248,7 @@
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
- UNUSED(elf_filename);
- UNUSED(requested_base);
- UNUSED(abs_dex_location);
- UNUSED(error_msg);
+ UNUSED(elf_filename, requested_base, abs_dex_location, error_msg);
return false;
#else
{
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index cef8702..2c81edd 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -954,9 +954,7 @@
loaded_oat_file_(nullptr)
{}
- void Run(Thread* self) {
- UNUSED(self);
-
+ void Run(Thread* self ATTRIBUTE_UNUSED) {
// Load the dex files, and save a pointer to the loaded oat file, so that
// we can verify only one oat file was loaded for the dex location.
std::vector<std::unique_ptr<const DexFile>> dex_files;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 745aa63..ab342aa 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -752,26 +752,31 @@
}
}
-void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t new_dex_pc) {
- UNUSED(thread, this_object, method, new_dex_pc);
+void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t new_dex_pc) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field)
+void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field,
- const JValue& field_value)
+void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED,
+ const JValue& field_value ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -804,9 +809,9 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
+void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/utils.h b/runtime/utils.h
index b67f273..457d43f 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -272,18 +272,15 @@
class VoidFunctor {
public:
template <typename A>
- inline void operator() (A a) const {
- UNUSED(a);
+ inline void operator() (A a ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B>
- inline void operator() (A a, B b) const {
- UNUSED(a, b);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B, typename C>
- inline void operator() (A a, B b, C c) const {
- UNUSED(a, b, c);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED, C c ATTRIBUTE_UNUSED) const {
}
};