From 7940e44f4517de5e2634a7e07d58d0fb26160513 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Fri, 12 Jul 2013 13:46:57 -0700 Subject: Create separate Android.mk for main build targets The runtime, compiler, dex2oat, and oatdump now are in seperate trees to prevent dependency creep. They can now be individually built without rebuilding the rest of the art projects. dalvikvm and jdwpspy were already this way. Builds in the art directory should behave as before, building everything including tests. Change-Id: Ic6b1151e5ed0f823c3dd301afd2b13eb2d8feb81 --- runtime/native/java_lang_System.cc | 334 +++++++++++++++++++++++++++++++++++++ 1 file changed, 334 insertions(+) create mode 100644 runtime/native/java_lang_System.cc (limited to 'runtime/native/java_lang_System.cc') diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc new file mode 100644 index 0000000000..2462f2fd8e --- /dev/null +++ b/runtime/native/java_lang_System.cc @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_throws.h" +#include "gc/accounting/card_table-inl.h" +#include "jni_internal.h" +#include "mirror/array.h" +#include "mirror/class.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "scoped_thread_state_change.h" + +/* + * We make guarantees about the atomicity of accesses to primitive + * variables. These guarantees also apply to elements of arrays. + * In particular, 8-bit, 16-bit, and 32-bit accesses must be atomic and + * must not cause "word tearing". Accesses to 64-bit array elements must + * either be atomic or treated as two 32-bit operations. References are + * always read and written atomically, regardless of the number of bits + * used to represent them. + * + * We can't rely on standard libc functions like memcpy(3) and memmove(3) + * in our implementation of System.arraycopy, because they may copy + * byte-by-byte (either for the full run or for "unaligned" parts at the + * start or end). We need to use functions that guarantee 16-bit or 32-bit + * atomicity as appropriate. + * + * System.arraycopy() is heavily used, so having an efficient implementation + * is important. The bionic libc provides a platform-optimized memory move + * function that should be used when possible. If it's not available, + * the trivial "reference implementation" versions below can be used until + * a proper version can be written. + * + * For these functions, The caller must guarantee that dst/src are aligned + * appropriately for the element type, and that n is a multiple of the + * element size. + */ + +/* + * Works like memmove(), except: + * - if all arguments are at least 32-bit aligned, we guarantee that we + * will use operations that preserve atomicity of 32-bit values + * - if not, we guarantee atomicity of 16-bit values + * + * If all three arguments are not at least 16-bit aligned, the behavior + * of this function is undefined. (We could remove this restriction by + * testing for unaligned values and punting to memmove(), but that's + * not currently useful.) + * + * TODO: add loop for 64-bit alignment + * TODO: use __builtin_prefetch + * TODO: write ARM/MIPS/x86 optimized versions + */ +void MemmoveWords(void* dst, const void* src, size_t n) { + DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x01), 0U); + + char* d = reinterpret_cast(dst); + const char* s = reinterpret_cast(src); + size_t copyCount; + + // If the source and destination pointers are the same, this is + // an expensive no-op. Testing for an empty move now allows us + // to skip a check later. + if (n == 0 || d == s) { + return; + } + + // Determine if the source and destination buffers will overlap if + // we copy data forward (i.e. *dst++ = *src++). + // + // It's okay if the destination buffer starts before the source and + // there is some overlap, because the reader is always ahead of the + // writer. + if (LIKELY((d < s) || ((size_t)(d - s) >= n))) { + // Copy forward. We prefer 32-bit loads and stores even for 16-bit + // data, so sort that out. + if (((reinterpret_cast(d) | reinterpret_cast(s)) & 0x03) != 0) { + // Not 32-bit aligned. Two possibilities: + // (1) Congruent, we can align to 32-bit by copying one 16-bit val + // (2) Non-congruent, we can do one of: + // a. copy whole buffer as a series of 16-bit values + // b. load/store 32 bits, using shifts to ensure alignment + // c. just copy the as 32-bit values and assume the CPU + // will do a reasonable job + // + // We're currently using (a), which is suboptimal. + if (((reinterpret_cast(d) ^ reinterpret_cast(s)) & 0x03) != 0) { + copyCount = n; + } else { + copyCount = 2; + } + n -= copyCount; + copyCount /= sizeof(uint16_t); + + while (copyCount--) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += sizeof(uint16_t); + s += sizeof(uint16_t); + } + } + + // Copy 32-bit aligned words. + copyCount = n / sizeof(uint32_t); + while (copyCount--) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += sizeof(uint32_t); + s += sizeof(uint32_t); + } + + // Check for leftovers. Either we finished exactly, or we have one remaining 16-bit chunk. + if ((n & 0x02) != 0) { + *(uint16_t*)d = *(uint16_t*)s; + } + } else { + // Copy backward, starting at the end. + d += n; + s += n; + + if (((reinterpret_cast(d) | reinterpret_cast(s)) & 0x03) != 0) { + // try for 32-bit alignment. + if (((reinterpret_cast(d) ^ reinterpret_cast(s)) & 0x03) != 0) { + copyCount = n; + } else { + copyCount = 2; + } + n -= copyCount; + copyCount /= sizeof(uint16_t); + + while (copyCount--) { + d -= sizeof(uint16_t); + s -= sizeof(uint16_t); + *reinterpret_cast(d) = *reinterpret_cast(s); + } + } + + // Copy 32-bit aligned words. + copyCount = n / sizeof(uint32_t); + while (copyCount--) { + d -= sizeof(uint32_t); + s -= sizeof(uint32_t); + *reinterpret_cast(d) = *reinterpret_cast(s); + } + + // Copy leftovers. + if ((n & 0x02) != 0) { + d -= sizeof(uint16_t); + s -= sizeof(uint16_t); + *reinterpret_cast(d) = *reinterpret_cast(s); + } + } +} + +#define move16 MemmoveWords +#define move32 MemmoveWords + +namespace art { + +static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + std::string actualType(PrettyTypeOf(array)); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;", + "%s of type %s is not an array", identifier, actualType.c_str()); +} + +static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) { + ScopedObjectAccess soa(env); + + // Null pointer checks. + if (UNLIKELY(javaSrc == NULL)) { + ThrowNullPointerException(NULL, "src == null"); + return; + } + if (UNLIKELY(javaDst == NULL)) { + ThrowNullPointerException(NULL, "dst == null"); + return; + } + + // Make sure source and destination are both arrays. + mirror::Object* srcObject = soa.Decode(javaSrc); + mirror::Object* dstObject = soa.Decode(javaDst); + if (UNLIKELY(!srcObject->IsArrayInstance())) { + ThrowArrayStoreException_NotAnArray("source", srcObject); + return; + } + if (UNLIKELY(!dstObject->IsArrayInstance())) { + ThrowArrayStoreException_NotAnArray("destination", dstObject); + return; + } + mirror::Array* srcArray = srcObject->AsArray(); + mirror::Array* dstArray = dstObject->AsArray(); + mirror::Class* srcComponentType = srcArray->GetClass()->GetComponentType(); + mirror::Class* dstComponentType = dstArray->GetClass()->GetComponentType(); + + // Bounds checking. + if (UNLIKELY(srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length)) { + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d", + srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length); + return; + } + + // Handle primitive arrays. + if (srcComponentType->IsPrimitive() || dstComponentType->IsPrimitive()) { + // If one of the arrays holds a primitive type the other array must hold the exact same type. + if (UNLIKELY(srcComponentType != dstComponentType)) { + std::string srcType(PrettyTypeOf(srcArray)); + std::string dstType(PrettyTypeOf(dstArray)); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;", + "Incompatible types: src=%s, dst=%s", + srcType.c_str(), dstType.c_str()); + return; + } + + size_t width = srcArray->GetClass()->GetComponentSize(); + uint8_t* dstBytes = reinterpret_cast(dstArray->GetRawData(width)); + const uint8_t* srcBytes = reinterpret_cast(srcArray->GetRawData(width)); + + switch (width) { + case 1: + memmove(dstBytes + dstPos, srcBytes + srcPos, length); + break; + case 2: + move16(dstBytes + dstPos * 2, srcBytes + srcPos * 2, length * 2); + break; + case 4: + move32(dstBytes + dstPos * 4, srcBytes + srcPos * 4, length * 4); + break; + case 8: + // We don't need to guarantee atomicity of the entire 64-bit word. + move32(dstBytes + dstPos * 8, srcBytes + srcPos * 8, length * 8); + break; + default: + LOG(FATAL) << "Unknown primitive array type: " << PrettyTypeOf(srcArray); + } + + return; + } + + // Neither class is primitive. Are the types trivially compatible? + const size_t width = sizeof(mirror::Object*); + uint8_t* dstBytes = reinterpret_cast(dstArray->GetRawData(width)); + const uint8_t* srcBytes = reinterpret_cast(srcArray->GetRawData(width)); + if (dstArray == srcArray || dstComponentType->IsAssignableFrom(srcComponentType)) { + // Yes. Bulk copy. + COMPILE_ASSERT(sizeof(width) == sizeof(uint32_t), move32_assumes_Object_references_are_32_bit); + move32(dstBytes + dstPos * width, srcBytes + srcPos * width, length * width); + Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length); + return; + } + + // The arrays are not trivially compatible. However, we may still be able to copy some or all of + // the elements if the source objects are compatible (for example, copying an Object[] to + // String[], the Objects being copied might actually be Strings). + // We can't do a bulk move because that would introduce a check-use race condition, so we copy + // elements one by one. + + // We already dealt with overlapping copies, so we don't need to cope with that case below. + CHECK_NE(dstArray, srcArray); + + mirror::Object* const * srcObjects = + reinterpret_cast(srcBytes + srcPos * width); + mirror::Object** dstObjects = reinterpret_cast(dstBytes + dstPos * width); + mirror::Class* dstClass = dstArray->GetClass()->GetComponentType(); + + // We want to avoid redundant IsAssignableFrom checks where possible, so we cache a class that + // we know is assignable to the destination array's component type. + mirror::Class* lastAssignableElementClass = dstClass; + + mirror::Object* o = NULL; + int i = 0; + for (; i < length; ++i) { + o = srcObjects[i]; + if (o != NULL) { + mirror::Class* oClass = o->GetClass(); + if (lastAssignableElementClass == oClass) { + dstObjects[i] = o; + } else if (dstClass->IsAssignableFrom(oClass)) { + lastAssignableElementClass = oClass; + dstObjects[i] = o; + } else { + // Can't put this element into the array. + break; + } + } else { + dstObjects[i] = NULL; + } + } + + Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length); + if (UNLIKELY(i != length)) { + std::string actualSrcType(PrettyTypeOf(o)); + std::string dstType(PrettyTypeOf(dstArray)); + ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow(); + soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;", + "source[%d] of type %s cannot be stored in destination array of type %s", + srcPos + i, actualSrcType.c_str(), dstType.c_str()); + return; + } +} + +static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) { + ScopedObjectAccess soa(env); + mirror::Object* o = soa.Decode(javaObject); + return static_cast(o->IdentityHashCode()); +} + +static JNINativeMethod gMethods[] = { + NATIVE_METHOD(System, arraycopy, "(Ljava/lang/Object;ILjava/lang/Object;II)V"), + NATIVE_METHOD(System, identityHashCode, "(Ljava/lang/Object;)I"), +}; + +void register_java_lang_System(JNIEnv* env) { + REGISTER_NATIVE_METHODS("java/lang/System"); +} + +} // namespace art -- cgit v1.2.3-59-g8ed1b From 2d88862f0752a7a0e65145b088f49dabd49d4284 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Thu, 18 Jul 2013 17:02:00 -0700 Subject: Fixing cpplint readability/casting issues Change-Id: I6821da0e23737995a9b884a04e9b63fac640cd05 --- Android.mk | 2 +- compiler/dex/quick/codegen_util.cc | 4 ++-- compiler/dex/quick/gen_invoke.cc | 2 +- compiler/llvm/runtime_support_builder.cc | 2 +- runtime/atomic.cc | 2 +- runtime/base/histogram-inl.h | 2 +- runtime/common_test.h | 3 ++- runtime/debugger.cc | 4 ++-- runtime/debugger.h | 2 +- runtime/gc/heap.cc | 2 +- runtime/hprof/hprof.cc | 12 ++++++------ runtime/instrumentation.cc | 5 +++-- runtime/jdwp/jdwp_adb.cc | 4 ++-- runtime/mem_map.cc | 3 ++- runtime/native/java_lang_System.cc | 2 +- runtime/thread_x86.cc | 2 +- 16 files changed, 28 insertions(+), 25 deletions(-) (limited to 'runtime/native/java_lang_System.cc') diff --git a/Android.mk b/Android.mk index e660827eab..8331b69c2a 100644 --- a/Android.mk +++ b/Android.mk @@ -334,7 +334,7 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size,+whitespace/operators,+readability/braces,+whitespace/indent,+whitespace/blank_line,+whitespace/end_of_line,+whitespace/labels,+whitespace/semicolon,+legal/copyright \ + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size,+whitespace/operators,+readability/braces,+whitespace/indent,+whitespace/blank_line,+whitespace/end_of_line,+whitespace/labels,+whitespace/semicolon,+legal/copyright,+readability/casting \ $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art-aspirational" to see warnings we would like to fix diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 8daa397135..630e294b37 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -190,10 +190,10 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { } if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) { - DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->use_mask, "use")); + DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->use_mask, "use")); } if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) { - DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->def_mask, "def")); + DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->def_mask, "def")); } } diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 8840526ec9..13a59bf69a 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -1113,7 +1113,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg); } else { CHECK(cu_->instruction_set == kX86); - ((X86Mir2Lir*)this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); + reinterpret_cast(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); } StoreValue(rl_dest, rl_result); return true; diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc index 19ccc3603c..e6479e0fd8 100644 --- a/compiler/llvm/runtime_support_builder.cc +++ b/compiler/llvm/runtime_support_builder.cc @@ -43,7 +43,7 @@ RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context, #define GET_RUNTIME_SUPPORT_FUNC_DECL(ID, NAME) \ do { \ ::llvm::Function* fn = module_.getFunction(#NAME); \ - DCHECK_NE(fn, (void*)NULL) << "Function not found: " << #NAME; \ + DCHECK(fn != NULL) << "Function not found: " << #NAME; \ runtime_support_func_decls_[runtime_support::ID] = fn; \ } while (0); diff --git a/runtime/atomic.cc b/runtime/atomic.cc index f2a998289c..c91db793ba 100644 --- a/runtime/atomic.cc +++ b/runtime/atomic.cc @@ -34,7 +34,7 @@ static const size_t kSwapMutexCount = 32; static std::vector* gSwapMutexes; static Mutex& GetSwapMutex(const volatile int64_t* addr) { - return *(*gSwapMutexes)[((unsigned)(void*)(addr) >> 3U) % kSwapMutexCount]; + return *(*gSwapMutexes)[(reinterpret_cast(addr) >> 3U) % kSwapMutexCount]; } #endif diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index d572cf9cba..1a63cf477f 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -66,7 +66,7 @@ template inline size_t Histogram::FindBucket(Value val) { // dividing the value by the bucket width. DCHECK_GE(val, min_); DCHECK_LE(val, max_); - size_t bucket_idx = static_cast((double)(val - min_) / bucket_width_); + size_t bucket_idx = static_cast(static_cast(val - min_) / bucket_width_); DCHECK_GE(bucket_idx, 0ul); DCHECK_LE(bucket_idx, bucket_count_); return bucket_idx; diff --git a/runtime/common_test.h b/runtime/common_test.h index 778ca63826..03a45aa20b 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -508,7 +508,8 @@ class CommonTest : public testing::Test { void ReserveImageSpace() { // Reserve where the image will be loaded up front so that other parts of test set up don't // accidentally end up colliding with the fixed memory address when we need to load the image. - image_reservation_.reset(MemMap::MapAnonymous("image reservation", (byte*)ART_BASE_ADDRESS, + image_reservation_.reset(MemMap::MapAnonymous("image reservation", + reinterpret_cast(ART_BASE_ADDRESS), (size_t)100 * 1024 * 1024, // 100MB PROT_NONE)); } diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 4fbee51045..9e9dd8736c 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -3281,7 +3281,7 @@ class HeapChunkContext { const size_t kMaxFreeLen = 2 * kPageSize; void* freeStart = startOfNextMemoryChunk_; void* freeEnd = start; - size_t freeLen = (char*)freeEnd - (char*)freeStart; + size_t freeLen = reinterpret_cast(freeEnd) - reinterpret_cast(freeStart); if (!native || freeLen < kMaxFreeLen) { AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen); flush = false; @@ -3302,7 +3302,7 @@ class HeapChunkContext { // allocation then the first sizeof(size_t) may belong to it. const size_t dlMallocOverhead = sizeof(size_t); AppendChunk(state, start, used_bytes + dlMallocOverhead); - startOfNextMemoryChunk_ = (char*)start + used_bytes + dlMallocOverhead; + startOfNextMemoryChunk_ = reinterpret_cast(start) + used_bytes + dlMallocOverhead; } void AppendChunk(uint8_t state, void* ptr, size_t length) diff --git a/runtime/debugger.h b/runtime/debugger.h index 28a2c60f8c..9005fda392 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -417,7 +417,7 @@ class Dbg { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 170915d8fc..341b62f48a 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -315,7 +315,7 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { size_t total_objects_allocated = GetObjectsAllocatedEver(); size_t total_bytes_allocated = GetBytesAllocatedEver(); if (total_duration != 0) { - const double total_seconds = double(total_duration / 1000) / 1000000.0; + const double total_seconds = static_cast(total_duration / 1000) / 1000000.0; os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; os << "Mean GC size throughput: " << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index d66ec7933b..3c8099a917 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -72,7 +72,7 @@ namespace hprof { #define U2_TO_BUF_BE(buf, offset, value) \ do { \ unsigned char* buf_ = (unsigned char*)(buf); \ - int offset_ = (int)(offset); \ + int offset_ = static_cast(offset); \ uint16_t value_ = (uint16_t)(value); \ buf_[offset_ + 0] = (unsigned char)(value_ >> 8); \ buf_[offset_ + 1] = (unsigned char)(value_ ); \ @@ -81,7 +81,7 @@ namespace hprof { #define U4_TO_BUF_BE(buf, offset, value) \ do { \ unsigned char* buf_ = (unsigned char*)(buf); \ - int offset_ = (int)(offset); \ + int offset_ = static_cast(offset); \ uint32_t value_ = (uint32_t)(value); \ buf_[offset_ + 0] = (unsigned char)(value_ >> 24); \ buf_[offset_ + 1] = (unsigned char)(value_ >> 16); \ @@ -92,7 +92,7 @@ namespace hprof { #define U8_TO_BUF_BE(buf, offset, value) \ do { \ unsigned char* buf_ = (unsigned char*)(buf); \ - int offset_ = (int)(offset); \ + int offset_ = static_cast(offset); \ uint64_t value_ = (uint64_t)(value); \ buf_[offset_ + 0] = (unsigned char)(value_ >> 56); \ buf_[offset_ + 1] = (unsigned char)(value_ >> 48); \ @@ -222,7 +222,7 @@ class HprofRecord { return UNIQUE_ERROR; } nb = fwrite(body_, 1, length_, fp_); - if (nb != (int)length_) { + if (nb != static_cast(length_)) { return UNIQUE_ERROR; } @@ -984,9 +984,9 @@ int Hprof::DumpHeapObject(mirror::Object* obj) { if (size == 1) { rec->AddU1List((const uint8_t*)aobj->GetRawData(sizeof(uint8_t)), length); } else if (size == 2) { - rec->AddU2List((const uint16_t*)(void*)aobj->GetRawData(sizeof(uint16_t)), length); + rec->AddU2List((const uint16_t*)aobj->GetRawData(sizeof(uint16_t)), length); } else if (size == 4) { - rec->AddU4List((const uint32_t*)(void*)aobj->GetRawData(sizeof(uint32_t)), length); + rec->AddU4List((const uint32_t*)aobj->GetRawData(sizeof(uint32_t)), length); } else if (size == 8) { rec->AddU8List((const uint64_t*)aobj->GetRawData(sizeof(uint64_t)), length); } diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 8598d6d4f8..bbd205227d 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -473,7 +473,7 @@ void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object size_t frame_id = StackVisitor::ComputeNumFrames(self); std::deque* stack = self->GetInstrumentationStack(); if (kVerboseInstrumentation) { - LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << (void*)lr; + LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << reinterpret_cast(lr); } instrumentation::InstrumentationStackFrame instrumentation_frame(this_object, method, lr, frame_id, interpreter_entry); @@ -530,7 +530,8 @@ uint64_t Instrumentation::PopInstrumentationStackFrame(Thread* self, uintptr_t* (static_cast(*return_pc) << 32); } else { if (kVerboseInstrumentation) { - LOG(INFO) << "Returning from " << PrettyMethod(method) << " to PC " << (void*)(*return_pc); + LOG(INFO) << "Returning from " << PrettyMethod(method) + << " to PC " << reinterpret_cast(*return_pc); } return *return_pc; } diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc index 9b9fe4c02b..2bfe63e3d4 100644 --- a/runtime/jdwp/jdwp_adb.cc +++ b/runtime/jdwp/jdwp_adb.cc @@ -157,7 +157,7 @@ int JdwpAdbState::ReceiveClientFd() { cmsg->cmsg_len = msg.msg_controllen; cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; - ((int*)(void*)CMSG_DATA(cmsg))[0] = -1; + (reinterpret_cast(CMSG_DATA(cmsg)))[0] = -1; int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0)); @@ -170,7 +170,7 @@ int JdwpAdbState::ReceiveClientFd() { return -1; } - return ((int*)(void*)CMSG_DATA(cmsg))[0]; + return (reinterpret_cast(CMSG_DATA(cmsg)))[0]; } /* diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index c75dffa63c..a0f389cb1e 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -192,7 +192,8 @@ bool MemMap::ProtectRegion(uint8_t* addr, size_t length, int prot) { * (The address must be page-aligned, the length doesn't need to be, * but we do need to ensure we cover the same range.) */ - uint8_t* alignAddr = (uint8_t*) ((uintptr_t) addr & ~(kPageSize-1)); + uint8_t* alignAddr = reinterpret_cast(RoundDown(reinterpret_cast(addr), + kPageSize)); size_t alignLength = length + (addr - alignAddr); if (mprotect(alignAddr, alignLength, prot) == 0) { diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc index 2462f2fd8e..30b4dc7ef5 100644 --- a/runtime/native/java_lang_System.cc +++ b/runtime/native/java_lang_System.cc @@ -123,7 +123,7 @@ void MemmoveWords(void* dst, const void* src, size_t n) { // Check for leftovers. Either we finished exactly, or we have one remaining 16-bit chunk. if ((n & 0x02) != 0) { - *(uint16_t*)d = *(uint16_t*)s; + *reinterpret_cast(d) = *reinterpret_cast(s); } } else { // Copy backward, starting at the end. diff --git a/runtime/thread_x86.cc b/runtime/thread_x86.cc index 959f317471..c398b2877a 100644 --- a/runtime/thread_x86.cc +++ b/runtime/thread_x86.cc @@ -73,7 +73,7 @@ void Thread::InitCpu() { entry.d = seg_32bit; entry.g = limit_in_pages; - entry_number = i386_set_ldt(LDT_AUTO_ALLOC, (ldt_entry*)(void*)(&entry), 1); + entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); if (entry_number == -1) { PLOG(FATAL) << "i386_set_ldt failed"; } -- cgit v1.2.3-59-g8ed1b