summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Eric Holk <eholk@google.com> 2020-08-21 15:38:12 -0700
committer Treehugger Robot <treehugger-gerrit@google.com> 2020-08-28 17:00:06 +0000
commitf1e1dd135c086a15c0e683c5778add033f97e4d9 (patch)
tree3146a9f0c4dd769a741641f1da34dff7df649480
parentca8343842f9094fd5eb86569d293250e783f582c (diff)
Enable -Wconversion for thread.cc
This should help prevent bugs due to unexpected implicit integer conversions. Some collateral changes were needed as well to limit the number of casts that had to be introduced. Bug: 165843530 Test: m test-art-host-gtests Change-Id: I091122827001ab335c7e140864f67cdf90fcf8b4
-rw-r--r--dexlayout/dex_visualize.cc4
-rw-r--r--libartbase/base/globals.h2
-rw-r--r--libartbase/base/utils.cc14
-rw-r--r--libartbase/base/utils.h4
-rw-r--r--runtime/runtime.cc2
-rw-r--r--runtime/stack_map.cc2
-rw-r--r--runtime/stack_map.h2
-rw-r--r--runtime/thread.cc62
-rw-r--r--runtime/thread.h2
9 files changed, 51 insertions, 43 deletions
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 27cec8d951..382e294d12 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -70,7 +70,7 @@ class Dumper {
if (printed_one) {
fprintf(out_file_, ", ");
}
- fprintf(out_file_, "\"%s\" %d", s.name.c_str(), s.offset / kPageSize);
+ fprintf(out_file_, "\"%s\" %" PRIuPTR, s.name.c_str(), s.offset / kPageSize);
printed_one = true;
}
}
@@ -331,7 +331,7 @@ void ShowDexSectionStatistics(dex_ir::Header* header, size_t dex_file_index) {
bytes = FindNextByteAfterSection(header, sorted_sections, i) - file_section.offset;
}
fprintf(stdout,
- "%-10s %8d %8d %8d %8d %%%02d\n",
+ "%-10s %8d %8d %8d %8" PRIuPTR " %%%02d\n",
file_section.name.c_str(),
file_section.offset,
file_section.size,
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 97eae635d4..700d4e6d0a 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -36,7 +36,7 @@ static constexpr size_t kStackAlignment = 16;
// System page size. We check this against sysconf(_SC_PAGE_SIZE) at runtime, but use a simple
// compile-time constant so the compiler can generate better code.
-static constexpr int kPageSize = 4096;
+static constexpr size_t kPageSize = 4096;
// Clion, clang analyzer, etc can falsely believe that "if (kIsDebugBuild)" always
// returns the same value. By wrapping into a call to another constexpr function, we force it
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 19311b3ded..339cf837a3 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -179,7 +179,7 @@ bool CacheOperationsMaySegFault() {
return false;
}
-pid_t GetTid() {
+uint32_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
CHECK_PTHREAD_CALL(pthread_threadid_np, (nullptr, &owner), __FUNCTION__); // Requires Mac OS 10.6
@@ -209,14 +209,14 @@ std::string GetThreadName(pid_t tid) {
return result;
}
-std::string PrettySize(int64_t byte_count) {
+std::string PrettySize(uint64_t byte_count) {
// The byte thresholds at which we display amounts. A byte count is displayed
// in unit U when kUnitThresholds[U] <= bytes < kUnitThresholds[U+1].
- static const int64_t kUnitThresholds[] = {
- 0, // B up to...
- 10*KB, // KB up to...
- 10*MB, // MB up to...
- 10LL*GB // GB from here.
+ static const uint64_t kUnitThresholds[] = {
+ 0, // B up to...
+ 10*KB, // KB up to...
+ 10*MB, // MB up to...
+ 10ULL*GB // GB from here.
};
static const int64_t kBytesPerUnit[] = { 1, KB, MB, GB };
static const char* const kUnitStrings[] = { "B", "KB", "MB", "GB" };
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 4bcb9151b7..66a5699519 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -40,14 +40,14 @@ static inline uint32_t PointerToLowMemUInt32(const void* p) {
}
// Returns a human-readable size string such as "1MB".
-std::string PrettySize(int64_t size_in_bytes);
+std::string PrettySize(uint64_t size_in_bytes);
// Splits a string using the given separator character into a vector of
// strings. Empty strings will be omitted.
void Split(const std::string& s, char separator, std::vector<std::string>* result);
// Returns the calling thread's tid. (The C libraries don't expose this.)
-pid_t GetTid();
+uint32_t GetTid();
// Returns the given thread's name.
std::string GetThreadName(pid_t tid);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f6bfd03756..7b907cc265 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1190,7 +1190,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
using Opt = RuntimeArgumentMap;
Opt runtime_options(std::move(runtime_options_in));
ScopedTrace trace(__FUNCTION__);
- CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
+ CHECK_EQ(static_cast<size_t>(sysconf(_SC_PAGE_SIZE)), kPageSize);
// Early override for logging output.
if (runtime_options.Exists(Opt::UseStderrLogger)) {
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index d813fd5487..5cda030285 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -140,7 +140,7 @@ size_t CodeInfo::Deduper::Dedupe(const uint8_t* code_info_data) {
return deduped_offset;
}
-StackMap CodeInfo::GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa) const {
+StackMap CodeInfo::GetStackMapForNativePcOffset(uintptr_t pc, InstructionSet isa) const {
uint32_t packed_pc = StackMap::PackNativePc(pc, isa);
// Binary search. All catch stack maps are stored separately at the end.
auto it = std::partition_point(
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 2065a79dd5..0d289b877b 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -413,7 +413,7 @@ class CodeInfo {
return stack_maps_.GetInvalidRow();
}
- StackMap GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa = kRuntimeISA) const;
+ StackMap GetStackMapForNativePcOffset(uintptr_t pc, InstructionSet isa = kRuntimeISA) const;
// Dump this CodeInfo object on `vios`.
// `code_offset` is the (absolute) native PC of the compiled method.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 55a7c28e15..55844109d2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -120,6 +120,9 @@
#endif
#endif // ART_USE_FUTEXES
+#pragma clang diagnostic push
+#pragma clang diagnostic error "-Wconversion"
+
namespace art {
using android::base::StringAppendV;
@@ -350,7 +353,7 @@ void Thread::Park(bool is_absolute, int64_t time) {
/* sleep if val = */ kNoPermitWaiterWaiting,
&timespec,
nullptr,
- FUTEX_BITSET_MATCH_ANY);
+ static_cast<int>(FUTEX_BITSET_MATCH_ANY));
} else {
// Time is nanos when scheduled for a relative time
timespec.tv_sec = SaturatedTimeT(time / 1000000000);
@@ -737,7 +740,8 @@ void Thread::InstallImplicitProtection() {
if (ProtectStack(/* fatal_on_error= */ false)) {
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
- uint32_t unwanted_size = stack_top - pregion - kPageSize;
+ size_t unwanted_size =
+ reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - kPageSize;
madvise(pregion, unwanted_size, MADV_DONTNEED);
return;
}
@@ -809,7 +813,8 @@ void Thread::InstallImplicitProtection() {
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
- uint32_t unwanted_size = stack_top - pregion - kPageSize;
+ size_t unwanted_size =
+ reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - kPageSize;
madvise(pregion, unwanted_size, MADV_DONTNEED);
}
@@ -1260,7 +1265,7 @@ static void GetThreadStack(pthread_t thread,
// If we're the main thread, check whether we were run with an unlimited stack. In that case,
// glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
// will be broken because we'll die long before we get close to 2GB.
- bool is_main_thread = (::art::GetTid() == getpid());
+ bool is_main_thread = (::art::GetTid() == static_cast<uint32_t>(getpid()));
if (is_main_thread) {
rlimit stack_limit;
if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
@@ -1379,7 +1384,8 @@ uint64_t Thread::GetCpuMicroTime() const {
pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
timespec now;
clock_gettime(cpu_clock_id, &now);
- return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
+ return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) +
+ static_cast<uint64_t>(now.tv_nsec) / UINT64_C(1000);
#else // __APPLE__
UNIMPLEMENTED(WARNING);
return -1;
@@ -1891,7 +1897,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
}
os << " | sysTid=" << tid
- << " nice=" << getpriority(PRIO_PROCESS, tid)
+ << " nice=" << getpriority(PRIO_PROCESS, static_cast<id_t>(tid))
<< " cgrp=" << scheduler_group_name;
if (thread != nullptr) {
int policy;
@@ -2543,7 +2549,7 @@ bool Thread::HandleScopeContains(jobject obj) const {
return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
}
-void Thread::HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id) {
+void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootNativeStack, thread_id));
for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
@@ -2706,13 +2712,13 @@ class FetchStackTraceVisitor : public StackVisitor {
class BuildInternalStackTraceVisitor : public StackVisitor {
public:
- BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
+ BuildInternalStackTraceVisitor(Thread* self, Thread* thread, uint32_t skip_depth)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
skip_depth_(skip_depth),
pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
- bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
+ bool Init(uint32_t depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
// Allocate method trace as an object array where the first element is a pointer array that
// contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
// class of the ArtMethod pointers.
@@ -2723,8 +2729,8 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
// The first element is the methods and dex pc array, the other elements are declaring classes
// for the methods to ensure classes in the stack trace don't get unloaded.
Handle<mirror::ObjectArray<mirror::Object>> trace(
- hs.NewHandle(
- mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1)));
+ hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
+ hs.Self(), array_class, static_cast<int32_t>(depth) + 1)));
if (trace == nullptr) {
// Acquire uninterruptible_ in all paths.
self_->StartAssertNoThreadSuspension("Building internal stack trace");
@@ -2771,11 +2777,11 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
count_, method, pointer_size_);
methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
- methods_and_pcs->GetLength() / 2 + count_, dex_pc, pointer_size_);
+ static_cast<uint32_t>(methods_and_pcs->GetLength()) / 2 + count_, dex_pc, pointer_size_);
// Save the declaring class of the method to ensure that the declaring classes of the methods
// do not get unloaded while the stack trace is live.
trace_->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
- count_ + 1, method->GetDeclaringClass());
+ static_cast<int32_t>(count_) + 1, method->GetDeclaringClass());
++count_;
}
@@ -2790,7 +2796,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
private:
Thread* const self_;
// How many more frames to skip.
- int32_t skip_depth_;
+ uint32_t skip_depth_;
// Current position down stack trace.
uint32_t count_ = 0;
// An object array where the first element is a pointer array that contains the ArtMethod
@@ -2882,7 +2888,7 @@ static ObjPtr<mirror::StackTraceElement> CreateStackTraceElement(
// Make the line_number field of StackTraceElement hold the dex pc.
// source_name_object is intentionally left null if we failed to map the dex pc to
// a line number (most probably because there is no debug info). See b/30183883.
- line_number = dex_pc;
+ line_number = static_cast<int32_t>(dex_pc);
} else {
if (source_file != nullptr) {
source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
@@ -2931,7 +2937,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
} else {
// Create java_trace array and place in local reference table
ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> java_traces =
- class_linker->AllocStackTraceElementArray(soa.Self(), depth);
+ class_linker->AllocStackTraceElementArray(soa.Self(), static_cast<size_t>(depth));
if (java_traces == nullptr) {
return nullptr;
}
@@ -2942,7 +2948,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
*stack_depth = depth;
}
- for (int32_t i = 0; i < depth; ++i) {
+ for (uint32_t i = 0; i < static_cast<uint32_t>(depth); ++i) {
ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces =
soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>();
// Methods and dex PC trace is element 0.
@@ -2952,13 +2958,14 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
- i + method_trace->GetLength() / 2, kRuntimePointerSize);
+ i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
const ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(soa, method, dex_pc);
if (obj == nullptr) {
return nullptr;
}
// We are called from native: use non-transactional mode.
- soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj);
+ soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(
+ static_cast<int32_t>(i), obj);
}
return result;
}
@@ -3097,7 +3104,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
soa.Self(), h_aste_class.Get(), "blockedOn", "Ljava/lang/Object;");
DCHECK(blocked_on_field != nullptr);
- size_t length = dumper.stack_trace_elements_.size();
+ int32_t length = static_cast<int32_t>(dumper.stack_trace_elements_.size());
ObjPtr<mirror::ObjectArray<mirror::Object>> array =
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), h_aste_array_class.Get(), length);
if (array == nullptr) {
@@ -3110,7 +3117,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr));
MutableHandle<mirror::ObjectArray<mirror::Object>> handle2(
hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
- for (size_t i = 0; i != length; ++i) {
+ for (size_t i = 0; i != static_cast<size_t>(length); ++i) {
handle.Assign(h_aste_class->AllocObject(soa.Self()));
if (handle == nullptr) {
soa.Self()->AssertPendingOOMException();
@@ -3123,9 +3130,8 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
// Create locked-on array.
if (!dumper.lock_objects_[i].empty()) {
- handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
- h_o_array_class.Get(),
- dumper.lock_objects_[i].size()));
+ handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(
+ soa.Self(), h_o_array_class.Get(), static_cast<int32_t>(dumper.lock_objects_[i].size())));
if (handle2 == nullptr) {
soa.Self()->AssertPendingOOMException();
return nullptr;
@@ -3151,7 +3157,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
}
ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get()));
- soa.Env()->SetObjectArrayElement(result.get(), i, elem.get());
+ soa.Env()->SetObjectArrayElement(result.get(), static_cast<jsize>(i), elem.get());
DCHECK(!soa.Self()->IsExceptionPending());
}
@@ -3828,7 +3834,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
// Visit callee-save registers that hold pointers.
uint32_t register_mask = code_info.GetRegisterMaskOf(map);
- for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
+ for (uint32_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
if (kIsDebugBuild && ref_addr == nullptr) {
@@ -3991,7 +3997,7 @@ void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
template <bool kPrecise>
void Thread::VisitRoots(RootVisitor* visitor) {
- const pid_t thread_id = GetThreadId();
+ const uint32_t thread_id = GetThreadId();
visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
@@ -4345,3 +4351,5 @@ ScopedExceptionStorage::~ScopedExceptionStorage() {
}
} // namespace art
+
+#pragma clang diagnostic pop // -Wconversion
diff --git a/runtime/thread.h b/runtime/thread.h
index 619df6fca2..039fdd9762 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -892,7 +892,7 @@ class Thread {
// Is the given obj in this thread's stack indirect reference table?
bool HandleScopeContains(jobject obj) const;
- void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id)
+ void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
REQUIRES_SHARED(Locks::mutator_lock_);
BaseHandleScope* GetTopHandleScope() {