ART: More warnings
Enable -Wno-conversion-null, -Wredundant-decls and -Wshadow in general,
and -Wunused-but-set-parameter for GCC builds.
Change-Id: I81bbdd762213444673c65d85edae594a523836e5
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index acff52d..8558f96 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -19,8 +19,8 @@
#include "base/logging.h"
// ART specific morecore implementation defined in space.cc.
+static void* art_heap_morecore(void* m, intptr_t increment);
#define MORECORE(x) art_heap_morecore(m, x)
-extern "C" void* art_heap_morecore(void* m, intptr_t increment);
// Custom heap error handling.
#define PROCEED_ON_ERROR 0
@@ -31,12 +31,16 @@
// Ugly inclusion of C file so that ART specific #defines configure dlmalloc for our use for
// mspaces (regular dlmalloc is still declared in bionic).
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "../../../bionic/libc/upstream-dlmalloc/malloc.c"
-#pragma GCC diagnostic warning "-Wstrict-aliasing"
-#pragma GCC diagnostic warning "-Wempty-body"
+#pragma GCC diagnostic pop
+static void* art_heap_morecore(void* m, intptr_t increment) {
+ return ::art::gc::allocator::ArtDlMallocMoreCore(m, increment);
+}
static void art_heap_corruption(const char* function) {
LOG(::art::FATAL) << "Corrupt heap detected in: " << function;
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index c7ecbc8..0e91a43 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
#define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
+#include <cstdint>
+
// Configure dlmalloc for mspaces.
// Avoid a collision with one used in llvm.
#undef HAVE_MMAP
@@ -28,7 +30,10 @@
#define ONLY_MSPACES 1
#define MALLOC_INSPECT_ALL 1
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
#include "../../bionic/libc/upstream-dlmalloc/malloc.h"
+#pragma GCC diagnostic pop
#ifdef HAVE_ANDROID_OS
// Define dlmalloc routines from bionic that cannot be included directly because of redefining
@@ -47,4 +52,16 @@
extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
+namespace art {
+namespace gc {
+namespace allocator {
+
+// Callback from dlmalloc when it needs to increase the footprint. Must be implemented somewhere
+// else (currently dlmalloc_space.cc).
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment);
+
+} // namespace allocator
+} // namespace gc
+} // namespace art
+
#endif // ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f5e2fed..f9d6a51 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -31,8 +31,6 @@
namespace gc {
namespace allocator {
-extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
-
static constexpr bool kUsePrefetchDuringAllocRun = true;
static constexpr bool kPrefetchNewRunDataByZeroing = false;
static constexpr size_t kPrefetchStride = 64;
@@ -179,7 +177,7 @@
page_map_size_ = new_num_of_pages;
DCHECK_LE(page_map_size_, max_page_map_size_);
free_page_run_size_map_.resize(new_num_of_pages);
- art_heap_rosalloc_morecore(this, increment);
+ ArtRosAllocMoreCore(this, increment);
if (last_free_page_run_size > 0) {
// There was a free page run at the end. Expand its size.
DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
@@ -745,7 +743,7 @@
const size_t idx = run->size_bracket_idx_;
const size_t bracket_size = bracketSizes[idx];
bool run_was_full = false;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
if (kIsDebugBuild) {
run_was_full = run->IsFull();
}
@@ -785,7 +783,7 @@
DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
run->ZeroHeader();
{
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
FreePages(self, run, true);
}
} else {
@@ -1243,7 +1241,7 @@
run->to_be_bulk_freed_ = false;
#endif
size_t idx = run->size_bracket_idx_;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
if (run->IsThreadLocal()) {
DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
@@ -1303,7 +1301,7 @@
}
if (!run_was_current) {
run->ZeroHeader();
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
FreePages(self, run, true);
}
} else {
@@ -1521,7 +1519,7 @@
page_map_size_ = new_num_of_pages;
free_page_run_size_map_.resize(new_num_of_pages);
DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
- art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
+ ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
<< footprint_ << " to " << new_footprint;
@@ -1737,14 +1735,14 @@
void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
if (kIsDebugBuild) {
Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- MutexLock mu2(self, *Locks::thread_list_lock_);
+ MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
for (Thread* t : thread_list) {
AssertThreadLocalRunsAreRevoked(t);
}
for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
CHECK_EQ(current_runs_[idx], dedicated_full_run_);
}
}
@@ -1873,11 +1871,11 @@
Thread* self = Thread::Current();
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
<< "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
ReaderMutexLock wmu(self, bulk_free_lock_);
std::vector<Run*> runs;
{
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
size_t pm_end = page_map_size_;
size_t i = 0;
while (i < pm_end) {
@@ -1968,7 +1966,7 @@
std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
for (Thread* thread : threads) {
for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
- MutexLock mu(self, *size_bracket_locks_[i]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
CHECK(thread_local_run != nullptr);
CHECK(thread_local_run->IsThreadLocal());
@@ -1977,7 +1975,7 @@
}
}
for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
- MutexLock mu(self, *size_bracket_locks_[i]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Run* current_run = current_runs_[i];
CHECK(current_run != nullptr);
if (current_run != dedicated_full_run_) {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index a2f8342..2a0bf10 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -616,6 +616,10 @@
};
std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere
+// else (currently rosalloc_space.cc).
+void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment);
+
} // namespace allocator
} // namespace gc
} // namespace art