summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/art_method-inl.h11
-rw-r--r--runtime/art_method.h14
-rw-r--r--runtime/base/bit_utils.h26
-rw-r--r--runtime/class_linker.cc50
-rw-r--r--runtime/elf_file.cc3
-rw-r--r--runtime/elf_file.h2
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc6
-rw-r--r--runtime/image.cc12
-rw-r--r--runtime/image.h2
-rw-r--r--runtime/leb128.h4
-rw-r--r--runtime/length_prefixed_array.h51
-rw-r--r--runtime/mirror/class-inl.h26
-rw-r--r--runtime/native/java_lang_Class.cc2
-rw-r--r--runtime/thread.cc20
-rw-r--r--runtime/utils.cc372
-rw-r--r--runtime/utils.h3
-rw-r--r--runtime/verifier/method_verifier.cc58
19 files changed, 575 insertions, 91 deletions
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index bb3c72c433..40bb9e1d9b 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -63,6 +63,15 @@ inline void ArtMethod::SetDeclaringClass(mirror::Class* new_declaring_class) {
declaring_class_ = GcRoot<mirror::Class>(new_declaring_class);
}
+inline bool ArtMethod::CASDeclaringClass(mirror::Class* expected_class,
+ mirror::Class* desired_class) {
+ GcRoot<mirror::Class> expected_root(expected_class);
+ GcRoot<mirror::Class> desired_root(desired_class);
+ return reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_)->
+ CompareExchangeStrongSequentiallyConsistent(
+ expected_root, desired_root);
+}
+
inline uint32_t ArtMethod::GetAccessFlags() {
DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
GetDeclaringClass()->IsErroneous());
@@ -497,7 +506,7 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor) {
inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
- ObjectSize(image_pointer_size));
+ Size(image_pointer_size));
declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>(
const_cast<ArtMethod*>(src)->GetDexCacheResolvedMethods());
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 90352b7c08..1afd056655 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -67,6 +67,9 @@ class ArtMethod FINAL {
void SetDeclaringClass(mirror::Class *new_declaring_class)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
@@ -504,12 +507,19 @@ class ArtMethod FINAL {
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Size of an instance of this object.
- static size_t ObjectSize(size_t pointer_size) {
+ // Size of an instance of this native class.
+ static size_t Size(size_t pointer_size) {
return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size) +
(sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
}
+ // Alignment of an instance of this native class.
+ static size_t Alignment(size_t pointer_size) {
+ // The ArtMethod alignment is the same as image pointer size. This differs from
+ // alignof(ArtMethod) if cross-compiling with pointer_size != sizeof(void*).
+ return pointer_size;
+ }
+
void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 6f45dc8209..1b0d774419 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -29,21 +29,28 @@ namespace art {
template<typename T>
static constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
- // TODO: assert unsigned. There is currently many uses with signed values.
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
"T too large, must be smaller than long long");
- return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_clz(x) // TODO: __builtin_clz[ll] has undefined behavior for x=0
- : __builtin_clzll(x);
+ return
+ DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
+ (sizeof(T) == sizeof(uint32_t))
+ ? __builtin_clz(x)
+ : __builtin_clzll(x);
}
template<typename T>
static constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
- // TODO: assert unsigned. There is currently many uses with signed values.
- return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_ctz(x)
- : __builtin_ctzll(x);
+ // It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check
+ // that T is an unsigned type.
+ static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
+ "T too large, must be smaller than long long");
+ return
+ DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
+ (sizeof(T) == sizeof(uint32_t))
+ ? __builtin_ctz(x)
+ : __builtin_ctzll(x);
}
template<typename T>
@@ -158,6 +165,9 @@ static inline bool IsAlignedParam(T x, int n) {
#define DCHECK_ALIGNED(value, alignment) \
DCHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
+#define CHECK_ALIGNED_PARAM(value, alignment) \
+ CHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
+
#define DCHECK_ALIGNED_PARAM(value, alignment) \
DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f19263d757..c179c64491 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1213,9 +1213,8 @@ void ClassLinker::InitFromImage() {
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& methods = header.GetMethodsSection();
- const size_t art_method_size = ArtMethod::ObjectSize(image_pointer_size_);
SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
- methods.VisitPackedArtMethods(&visitor, space->Begin(), art_method_size);
+ methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
}
// reinit class_roots_
@@ -2294,9 +2293,11 @@ LengthPrefixedArray<ArtField>* ClassLinker::AllocArtFieldArray(Thread* self, siz
if (length == 0) {
return nullptr;
}
- auto* ret = new(Runtime::Current()->GetLinearAlloc()->Alloc(
- self, LengthPrefixedArray<ArtField>::ComputeSize(length))) LengthPrefixedArray<ArtField>(
- length);
+ // If the ArtField alignment changes, review all uses of LengthPrefixedArray<ArtField>.
+ static_assert(alignof(ArtField) == 4, "ArtField alignment is expected to be 4.");
+ size_t storage_size = LengthPrefixedArray<ArtField>::ComputeSize(length);
+ void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ auto* ret = new(array_storage) LengthPrefixedArray<ArtField>(length);
CHECK(ret != nullptr);
std::uninitialized_fill_n(&ret->At(0), length, ArtField());
return ret;
@@ -2306,13 +2307,15 @@ LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, s
if (length == 0) {
return nullptr;
}
- const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
- auto* ret = new (Runtime::Current()->GetLinearAlloc()->Alloc(
- self, LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size)))
- LengthPrefixedArray<ArtMethod>(length);
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ const size_t storage_size =
+ LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size, method_alignment);
+ void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ auto* ret = new (array_storage) LengthPrefixedArray<ArtMethod>(length);
CHECK(ret != nullptr);
for (size_t i = 0; i < length; ++i) {
- new(reinterpret_cast<void*>(&ret->At(i, method_size))) ArtMethod;
+ new(reinterpret_cast<void*>(&ret->At(i, method_size, method_alignment))) ArtMethod;
}
return ret;
}
@@ -4689,7 +4692,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
const bool have_interfaces = interfaces.Get() != nullptr;
const size_t num_interfaces =
have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
- const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
if (num_interfaces == 0) {
if (super_ifcount == 0) {
// Class implements no interfaces.
@@ -4914,7 +4918,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
// matter which direction we go. We walk it backward anyway.)
for (k = input_array_length - 1; k >= 0; --k) {
ArtMethod* vtable_method = input_virtual_methods != nullptr ?
- &input_virtual_methods->At(k, method_size) :
+ &input_virtual_methods->At(k, method_size, method_alignment) :
input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
ArtMethod* vtable_method_for_name_comparison =
vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_);
@@ -4975,10 +4979,14 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
// where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the
// realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since
// CopyFrom has internal read barriers.
- const size_t old_size = old_virtuals != nullptr ?
- LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count, method_size) : 0u;
+ const size_t old_size = old_virtuals != nullptr
+ ? LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count,
+ method_size,
+ method_alignment)
+ : 0u;
const size_t new_size = LengthPrefixedArray<ArtMethod>::ComputeSize(new_method_count,
- method_size);
+ method_size,
+ method_alignment);
auto* virtuals = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
runtime->GetLinearAlloc()->Realloc(self, old_virtuals, old_size, new_size));
if (UNLIKELY(virtuals == nullptr)) {
@@ -4989,7 +4997,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
if (virtuals != old_virtuals) {
// Maps from heap allocated miranda method to linear alloc miranda method.
- StrideIterator<ArtMethod> out = virtuals->Begin(method_size);
+ StrideIterator<ArtMethod> out = virtuals->Begin(method_size, method_alignment);
// Copy over the old methods + miranda methods.
for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
move_table.emplace(&m, &*out);
@@ -4999,7 +5007,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->Begin(method_size) + old_method_count);
+ StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
for (ArtMethod* mir_method : miranda_methods) {
@@ -5022,7 +5030,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
self->AssertPendingOOMException();
return false;
}
- out = StrideIterator<ArtMethod>(virtuals->Begin(method_size) + old_method_count);
+ out = virtuals->Begin(method_size, method_alignment) + old_method_count;
size_t vtable_pos = old_vtable_count;
for (size_t i = old_method_count; i < new_method_count; ++i) {
// Leave the declaring class alone as type indices are relative to it
@@ -5893,8 +5901,10 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi
}
ArtMethod* ClassLinker::CreateRuntimeMethod() {
- const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
- ArtMethod* method = &AllocArtMethodArray(Thread::Current(), 1)->At(0, method_size);
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(Thread::Current(), 1);
+ ArtMethod* method = &method_array->At(0, method_size, method_alignment);
CHECK(method != nullptr);
method->SetDexMethodIndex(DexFile::kDexNoIndex);
CHECK(method->IsRuntimeMethod());
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 9fd8c87435..723ee74eb6 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1868,7 +1868,8 @@ const File& ElfFile::GetFile() const {
DELEGATE_TO_IMPL(GetFile);
}
-bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) {
+bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset,
+ uint64_t* size) const {
if (elf32_.get() == nullptr) {
CHECK(elf64_.get() != nullptr);
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 48cb4b8b2e..1188c97658 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -60,7 +60,7 @@ class ElfFile {
const File& GetFile() const;
- bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size);
+ bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) const;
uint64_t FindSymbolAddress(unsigned section_type,
const std::string& symbol_name,
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index eaf26bc462..eaf33f6b7f 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -298,7 +298,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
interface_method->GetArtMethod(), sizeof(void*));
auto* virtual_methods = proxy_class->GetVirtualMethodsPtr();
size_t num_virtuals = proxy_class->NumVirtualMethods();
- size_t method_size = ArtMethod::ObjectSize(sizeof(void*));
+ size_t method_size = ArtMethod::Size(sizeof(void*));
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
reinterpret_cast<uintptr_t>(virtual_methods)) / method_size;
CHECK_LT(throws_index, static_cast<int>(num_virtuals));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 47f9b1b88e..c3a962737f 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -331,7 +331,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
// TODO: Check linear alloc and image.
- DCHECK_ALIGNED(ArtMethod::ObjectSize(sizeof(void*)), sizeof(void*))
+ DCHECK_ALIGNED(ArtMethod::Size(sizeof(void*)), sizeof(void*))
<< "ArtMethod is not pointer aligned";
if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) {
VLOG(signals) << "no method";
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c11c134326..fc2a801b7f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -614,7 +614,9 @@ void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
for (size_t i = 0; i < count; ++i) {
auto* root = roots[i];
auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
- MarkObject(&ref);
+ // The root can be in the to-space since we may visit the declaring class of an ArtMethod
+ // multiple times if it is on the call stack.
+ MarkObjectIfNotInToSpace(&ref);
if (*root != ref.AsMirrorPtr()) {
*root = ref.AsMirrorPtr();
}
@@ -624,7 +626,7 @@ void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
- MarkObject(roots[i]);
+ MarkObjectIfNotInToSpace(roots[i]);
}
}
diff --git a/runtime/image.cc b/runtime/image.cc
index ba1e58bb2c..2586959e55 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '8', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '9', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -153,19 +153,21 @@ void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base)
for (size_t i = 0; i < array->Length(); ++i) {
visitor->Visit(&array->At(i, sizeof(ArtField)));
}
- pos += array->ComputeSize(array->Length(), sizeof(ArtField));
+ pos += array->ComputeSize(array->Length());
}
}
void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor,
uint8_t* base,
- size_t method_size) const {
+ size_t pointer_size) const {
+ const size_t method_alignment = ArtMethod::Alignment(pointer_size);
+ const size_t method_size = ArtMethod::Size(pointer_size);
for (size_t pos = 0; pos < Size(); ) {
auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
for (size_t i = 0; i < array->Length(); ++i) {
- visitor->Visit(&array->At(i, method_size));
+ visitor->Visit(&array->At(i, method_size, method_alignment));
}
- pos += array->ComputeSize(array->Length(), method_size);
+ pos += array->ComputeSize(array->Length(), method_size, method_alignment);
}
}
diff --git a/runtime/image.h b/runtime/image.h
index eb26f7f9b6..1a0d8fd92f 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -65,7 +65,7 @@ class PACKED(4) ImageSection {
}
// Visit ArtMethods in the section starting at base.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t method_size) const;
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
// Visit ArtMethods in the section starting at base.
void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 14683d4063..976936d639 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -101,7 +101,7 @@ static inline int32_t DecodeSignedLeb128(const uint8_t** data) {
static inline uint32_t UnsignedLeb128Size(uint32_t data) {
// bits_to_encode = (data != 0) ? 32 - CLZ(x) : 1 // 32 - CLZ(data | 1)
// bytes = ceil(bits_to_encode / 7.0); // (6 + bits_to_encode) / 7
- uint32_t x = 6 + 32 - CLZ(data | 1);
+ uint32_t x = 6 + 32 - CLZ(data | 1U);
// Division by 7 is done by (x * 37) >> 8 where 37 = ceil(256 / 7).
// This works for 0 <= x < 256 / (7 * 37 - 256), i.e. 0 <= x <= 85.
return (x * 37) >> 8;
@@ -111,7 +111,7 @@ static inline uint32_t UnsignedLeb128Size(uint32_t data) {
static inline uint32_t SignedLeb128Size(int32_t data) {
// Like UnsignedLeb128Size(), but we need one bit beyond the highest bit that differs from sign.
data = data ^ (data >> 31);
- uint32_t x = 1 /* we need to encode the sign bit */ + 6 + 32 - CLZ(data | 1);
+ uint32_t x = 1 /* we need to encode the sign bit */ + 6 + 32 - CLZ(data | 1U);
return (x * 37) >> 8;
}
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index 2b2e8d34d2..d9bc656673 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -21,6 +21,8 @@
#include "linear_alloc.h"
#include "stride_iterator.h"
+#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/iteration_range.h"
namespace art {
@@ -28,29 +30,35 @@ namespace art {
template<typename T>
class LengthPrefixedArray {
public:
- explicit LengthPrefixedArray(uint64_t length) : length_(length) {}
+ explicit LengthPrefixedArray(size_t length)
+ : length_(dchecked_integral_cast<uint32_t>(length)) {}
- T& At(size_t index, size_t element_size = sizeof(T)) {
+ T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
DCHECK_LT(index, length_);
- return *reinterpret_cast<T*>(&data_[0] + index * element_size);
+ return AtUnchecked(index, element_size, alignment);
}
- StrideIterator<T> Begin(size_t element_size = sizeof(T)) {
- return StrideIterator<T>(reinterpret_cast<T*>(&data_[0]), element_size);
+ StrideIterator<T> Begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(0, element_size, alignment), element_size);
}
- StrideIterator<T> End(size_t element_size = sizeof(T)) {
- return StrideIterator<T>(reinterpret_cast<T*>(&data_[0] + element_size * length_),
- element_size);
+ StrideIterator<T> End(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(length_, element_size, alignment), element_size);
}
- static size_t OffsetOfElement(size_t index, size_t element_size = sizeof(T)) {
- return offsetof(LengthPrefixedArray<T>, data_) + index * element_size;
+ static size_t OffsetOfElement(size_t index,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) {
+ DCHECK_ALIGNED_PARAM(element_size, alignment);
+ return RoundUp(offsetof(LengthPrefixedArray<T>, data), alignment) + index * element_size;
}
- // Alignment is the caller's responsibility.
- static size_t ComputeSize(size_t num_elements, size_t element_size = sizeof(T)) {
- return OffsetOfElement(num_elements, element_size);
+ static size_t ComputeSize(size_t num_elements,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) {
+ size_t result = OffsetOfElement(num_elements, element_size, alignment);
+ DCHECK_ALIGNED_PARAM(result, alignment);
+ return result;
}
uint64_t Length() const {
@@ -58,21 +66,26 @@ class LengthPrefixedArray {
}
// Update the length but does not reallocate storage.
- void SetLength(uint64_t length) {
- length_ = length;
+ void SetLength(size_t length) {
+ length_ = dchecked_integral_cast<uint32_t>(length);
}
private:
- uint64_t length_; // 64 bits for 8 byte alignment of data_.
- uint8_t data_[0];
+ T& AtUnchecked(size_t index, size_t element_size, size_t alignment) {
+ return *reinterpret_cast<T*>(
+ reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
+ }
+
+ uint32_t length_;
+ uint8_t data[0];
};
// Returns empty iteration range if the array is null.
template<typename T>
IterationRange<StrideIterator<T>> MakeIterationRangeFromLengthPrefixedArray(
- LengthPrefixedArray<T>* arr, size_t element_size) {
+ LengthPrefixedArray<T>* arr, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
return arr != nullptr ?
- MakeIterationRange(arr->Begin(element_size), arr->End(element_size)) :
+ MakeIterationRange(arr->Begin(element_size, alignment), arr->End(element_size, alignment)) :
MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 887e204a44..ac9cb09731 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -92,14 +92,18 @@ inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size)
CheckPointerSize(pointer_size);
auto* methods = GetDirectMethodsPtrUnchecked();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
CheckPointerSize(pointer_size);
auto* methods = GetDirectMethodsPtr();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
template<VerifyObjectFlags kVerifyFlags>
@@ -133,7 +137,9 @@ inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size
CheckPointerSize(pointer_size);
LengthPrefixedArray<ArtMethod>* methods = GetVirtualMethodsPtrUnchecked();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline PointerArray* Class::GetVTable() {
@@ -837,29 +843,31 @@ void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetDirectMethodsPtrUnchecked(),
- ArtMethod::ObjectSize(pointer_size));
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetVirtualMethodsPtrUnchecked(),
- ArtMethod::ObjectSize(pointer_size));
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFields() {
- return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetSFields() {
- return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFieldsUnchecked() {
- return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() {
- return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1ca98e50d8..c337e91cf8 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -208,7 +208,7 @@ ALWAYS_INLINE static inline ArtField* FindFieldByName(
}
}
if (kIsDebugBuild) {
- for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields, sizeof(ArtField))) {
+ for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) {
CHECK_NE(field.GetName(), name->ToModifiedUtf8());
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d54a7a6aa8..a33e150b93 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2419,6 +2419,7 @@ class ReferenceMapVisitor : public StackVisitor {
void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = shadow_frame->GetMethod();
+ VisitDeclaringClass(m);
DCHECK(m != nullptr);
size_t num_regs = shadow_frame->NumberOfVRegs();
if (m->IsNative() || shadow_frame->HasReferenceArray()) {
@@ -2459,10 +2460,25 @@ class ReferenceMapVisitor : public StackVisitor {
}
private:
+ // Visiting the declaring class is necessary so that we don't unload the class of a method that
+ // is executing. We need to ensure that the code stays mapped.
+ void VisitDeclaringClass(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* klass = method->GetDeclaringClassNoBarrier();
+ // klass can be null for runtime methods.
+ if (klass != nullptr) {
+ mirror::Object* new_ref = klass;
+ visitor_(&new_ref, -1, this);
+ if (new_ref != klass) {
+ method->CASDeclaringClass(klass, new_ref->AsClass());
+ }
+ }
+ }
+
void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
- auto* cur_quick_frame = GetCurrentQuickFrame();
+ ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
- auto* m = *cur_quick_frame;
+ ArtMethod* m = *cur_quick_frame;
+ VisitDeclaringClass(m);
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 20512f9765..8aa1189a95 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -30,6 +30,7 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
+#include "dex_instruction.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -1452,4 +1453,375 @@ std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
+static void DumpMethodCFGImpl(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item,
+ std::ostream& os) {
+ os << "digraph {\n";
+ os << " # /* " << PrettyMethod(dex_method_idx, *dex_file, true) << " */\n";
+
+ std::set<uint32_t> dex_pc_is_branch_target;
+ {
+ // Go and populate.
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (inst->IsBranch()) {
+ dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset());
+ } else if (inst->IsSwitch()) {
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ dex_pc_is_branch_target.insert(dex_pc + offset);
+ }
+ }
+ }
+ }
+
+ // Create nodes for "basic blocks."
+ std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts.
+ std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs.
+
+ {
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ bool first_in_block = true;
+ bool force_new_block = false;
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (dex_pc == 0 ||
+ (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) ||
+ force_new_block) {
+ uint32_t id = dex_pc_to_node_id.size();
+ if (id > 0) {
+ // End last node.
+ os << "}\"];\n";
+ }
+ // Start next node.
+ os << " node" << id << " [shape=record,label=\"{";
+ dex_pc_to_node_id.insert(std::make_pair(dex_pc, id));
+ first_in_block = true;
+ force_new_block = false;
+ }
+
+ // Register instruction.
+ dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1));
+
+ // Print instruction.
+ if (!first_in_block) {
+ os << " | ";
+ } else {
+ first_in_block = false;
+ }
+
+ // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'.
+ os << "<" << "p" << dex_pc << ">";
+ os << " 0x" << std::hex << dex_pc << std::dec << ": ";
+ std::string inst_str = inst->DumpString(dex_file);
+ size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars
+ // we need to escape.
+ while (cur_start != std::string::npos) {
+ size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1);
+ if (next_escape == std::string::npos) {
+ os << inst_str.substr(cur_start, inst_str.size() - cur_start);
+ break;
+ } else {
+ os << inst_str.substr(cur_start, next_escape - cur_start);
+ // Escape all necessary characters.
+ while (next_escape < inst_str.size()) {
+ char c = inst_str.at(next_escape);
+ if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
+ os << '\\' << c;
+ } else {
+ break;
+ }
+ next_escape++;
+ }
+ if (next_escape >= inst_str.size()) {
+ next_escape = std::string::npos;
+ }
+ cur_start = next_escape;
+ }
+ }
+
+ // Force a new block for some fall-throughs and some instructions that terminate the "local"
+ // control flow.
+ force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd();
+ }
+ // Close last node.
+ if (dex_pc_to_node_id.size() > 0) {
+ os << "}\"];\n";
+ }
+ }
+
+ // Create edges between them.
+ {
+ std::ostringstream regular_edges;
+ std::ostringstream taken_edges;
+ std::ostringstream exception_edges;
+
+ // Common set of exception edges.
+ std::set<uint32_t> exception_targets;
+
+ // These blocks (given by the first dex pc) need exception per dex-pc handling in a second
+ // pass. In the first pass we try and see whether we can use a common set of edges.
+ std::set<uint32_t> blocks_with_detailed_exceptions;
+
+ {
+ uint32_t last_node_id = std::numeric_limits<uint32_t>::max();
+ uint32_t old_dex_pc = 0;
+ uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max();
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ {
+ auto it = dex_pc_to_node_id.find(dex_pc);
+ if (it != dex_pc_to_node_id.end()) {
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+
+ block_start_dex_pc = dex_pc;
+
+ // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things
+ // like switch data.
+ uint32_t old_last = last_node_id;
+ last_node_id = it->second;
+ if (old_last != std::numeric_limits<uint32_t>::max()) {
+ regular_edges << " node" << old_last << ":p" << old_dex_pc
+ << " -> node" << last_node_id << ":p" << dex_pc
+ << ";\n";
+ }
+ }
+
+ // Look at the exceptions of the first entry.
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ exception_targets.insert(catch_it.GetHandlerAddress());
+ }
+ }
+
+ // Handle instruction.
+
+ // Branch: something with at most two targets.
+ if (inst->IsBranch()) {
+ const int32_t offset = inst->GetTargetOffset();
+ const bool conditional = !inst->IsUnconditional();
+
+ auto target_it = dex_pc_to_node_id.find(dex_pc + offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (dex_pc + offset)
+ << ";\n";
+ }
+ if (!conditional) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ } else if (inst->IsSwitch()) {
+ // TODO: Iterate through all switch targets.
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ /* make sure the start of the switch is in range */
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ /* offset to switch table is a relative branch-style offset */
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ /* make sure the end of the switch is in range */
+ /* verify each switch target */
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t abs_offset = dex_pc + offset;
+ auto target_it = dex_pc_to_node_id.find(abs_offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ // TODO: value label.
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (abs_offset)
+ << ";\n";
+ }
+ }
+ }
+
+ // Exception edges. If this is not the first instruction in the block
+ if (block_start_dex_pc != dex_pc) {
+ std::set<uint32_t> current_handler_pcs;
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ current_handler_pcs.insert(catch_it.GetHandlerAddress());
+ }
+ if (current_handler_pcs != exception_targets) {
+ exception_targets.clear(); // Clear so we don't do something at the end.
+ blocks_with_detailed_exceptions.insert(block_start_dex_pc);
+ }
+ }
+
+ if (inst->IsReturn() ||
+ (inst->Opcode() == Instruction::THROW) ||
+ (inst->IsBranch() && inst->IsUnconditional())) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ }
+ // Finish up the last block, if it had common exceptions.
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+ }
+
+ // Second pass for detailed exception blocks.
+ // TODO
+ // Exception edges. If this is not the first instruction in the block
+ for (uint32_t dex_pc : blocks_with_detailed_exceptions) {
+ const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]);
+ uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second;
+ while (true) {
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ if (catch_it.HasNext()) {
+ std::set<uint32_t> handled_targets;
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ uint32_t handler_pc = catch_it.GetHandlerAddress();
+ auto it = handled_targets.find(handler_pc);
+ if (it == handled_targets.end()) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << this_node_id << ":p" << dex_pc
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+
+ // Mark as done.
+ handled_targets.insert(handler_pc);
+ }
+ }
+ }
+ if (inst->IsBasicBlockEnd()) {
+ break;
+ }
+
+ // Loop update. Have a break-out if the next instruction is a branch target and thus in
+ // another block.
+ dex_pc += inst->SizeInCodeUnits();
+ if (dex_pc >= code_item->insns_size_in_code_units_) {
+ break;
+ }
+ if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) {
+ break;
+ }
+ inst = inst->Next();
+ }
+ }
+
+ // Write out the sub-graphs to make edges styled.
+ os << "\n";
+ os << " subgraph regular_edges {\n";
+ os << " edge [color=\"#000000\",weight=.3,len=3];\n\n";
+ os << " " << regular_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph taken_edges {\n";
+ os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n";
+ os << " " << taken_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph exception_edges {\n";
+ os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n";
+ os << " " << exception_edges.str() << "\n";
+ os << " }\n\n";
+ }
+
+ os << "}\n";
+}
+
+void DumpMethodCFG(ArtMethod* method, std::ostream& os) {
+ const DexFile* dex_file = method->GetDexFile();
+ const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+
+ DumpMethodCFGImpl(dex_file, method->GetDexMethodIndex(), code_item, os);
+}
+
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
+ // This is painful, we need to find the code item. That means finding the class, and then
+ // iterating the table.
+ if (dex_method_idx >= dex_file->NumMethodIds()) {
+ os << "Could not find method-idx.";
+ return;
+ }
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
+
+ const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
+ if (class_def == nullptr) {
+ os << "Could not find class-def.";
+ return;
+ }
+
+ const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ if (class_data == nullptr) {
+ os << "No class data.";
+ return;
+ }
+
+ ClassDataItemIterator it(*dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ it.Next();
+ }
+
+ // Find method, and dump it.
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == dex_method_idx) {
+ DumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
+ return;
+ }
+ it.Next();
+ }
+
+ // Otherwise complain.
+ os << "Something went wrong, didn't find the method in the class data.";
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 4fa5f5a539..d1be51aff7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -324,6 +324,9 @@ static inline constexpr bool ValidPointerSize(size_t pointer_size) {
return pointer_size == 4 || pointer_size == 8;
}
+void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 16615340bd..1828b91e2a 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -53,6 +53,9 @@ static constexpr bool kTimeVerifyMethod = !kIsDebugBuild;
static constexpr bool gDebugVerify = false;
// TODO: Add a constant to method_verifier to turn on verbose logging?
+// On VLOG(verifier), should we dump the whole state when we run into a hard failure?
+static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
+
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
@@ -638,6 +641,12 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
have_pending_hard_failure_ = true;
+ if (VLOG_IS_ON(verifier) && kDumpRegLinesOnHardFailureIfVLOG) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::ostringstream oss;
+ Dump(oss);
+ LOG(ERROR) << oss.str();
+ }
break;
}
}
@@ -1034,8 +1043,8 @@ bool MethodVerifier::CheckArrayData(uint32_t cur_offset) {
DCHECK_LT(cur_offset, insn_count);
/* make sure the start of the array data table is in range */
- array_data_offset = insns[1] | (((int32_t) insns[2]) << 16);
- if ((int32_t) cur_offset + array_data_offset < 0 ||
+ array_data_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ if (static_cast<int32_t>(cur_offset) + array_data_offset < 0 ||
cur_offset + array_data_offset + 2 >= insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset
<< ", data offset " << array_data_offset
@@ -1147,8 +1156,9 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
DCHECK_LT(cur_offset, insn_count);
const uint16_t* insns = code_item_->insns_ + cur_offset;
/* make sure the start of the switch is in range */
- int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16;
- if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 > insn_count) {
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ if (static_cast<int32_t>(cur_offset) + switch_offset < 0 ||
+ cur_offset + switch_offset + 2 > insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", count " << insn_count;
@@ -1204,8 +1214,9 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
if (keys_offset > 0 && switch_count > 1) {
int32_t last_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16);
for (uint32_t targ = 1; targ < switch_count; targ++) {
- int32_t key = (int32_t) switch_insns[keys_offset + targ * 2] |
- (int32_t) (switch_insns[keys_offset + targ * 2 + 1] << 16);
+ int32_t key =
+ static_cast<int32_t>(switch_insns[keys_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[keys_offset + targ * 2 + 1] << 16);
if (key <= last_key) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid packed switch: last key=" << last_key
<< ", this=" << key;
@@ -1216,11 +1227,11 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
}
/* verify each switch target */
for (uint32_t targ = 0; targ < switch_count; targ++) {
- int32_t offset = (int32_t) switch_insns[targets_offset + targ * 2] |
- (int32_t) (switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t offset = static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
- abs_offset >= (int32_t) insn_count ||
+ abs_offset >= static_cast<int32_t>(insn_count) ||
!insn_flags_[abs_offset].IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
@@ -1319,7 +1330,7 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
ScopedIndentation indent1(vios);
const Instruction* inst = Instruction::At(code_item_->insns_);
for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_;
- dex_pc += inst->SizeInCodeUnits()) {
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
if (reg_line != nullptr) {
vios->Stream() << reg_line->Dump(this) << "\n";
@@ -1331,7 +1342,6 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
vios->Stream() << inst->DumpHex(5) << " ";
}
vios->Stream() << inst->DumpString(dex_file_) << "\n";
- inst = inst->Next();
}
}
@@ -2139,7 +2149,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
// Now verify if the element width in the table matches the element width declared in
// the array
- const uint16_t* array_data = insns + (insns[1] | (((int32_t) insns[2]) << 16));
+ const uint16_t* array_data =
+ insns + (insns[1] | (static_cast<int32_t>(insns[2]) << 16));
if (array_data[0] != Instruction::kArrayDataSignature) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid magic for array-data";
} else {
@@ -3077,7 +3088,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* just need to walk through and tag the targets.
*/
if ((opcode_flags & Instruction::kSwitch) != 0) {
- int offset_to_switch = insns[1] | (((int32_t) insns[2]) << 16);
+ int offset_to_switch = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
const uint16_t* switch_insns = insns + offset_to_switch;
int switch_count = switch_insns[1];
int offset_to_targets, targ;
@@ -3098,7 +3109,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* offsets are 32-bit, and only partly endian-swapped */
offset = switch_insns[offset_to_targets + targ * 2] |
- (((int32_t) switch_insns[offset_to_targets + targ * 2 + 1]) << 16);
+ (static_cast<int32_t>(switch_insns[offset_to_targets + targ * 2 + 1]) << 16);
abs_offset = work_insn_idx_ + offset;
DCHECK_LT(abs_offset, code_item_->insns_size_in_code_units_);
if (!CheckNotMoveExceptionOrMoveResult(code_item_->insns_, abs_offset)) {
@@ -3938,7 +3949,24 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime.
// Still check that the given value matches the instruction's type.
- work_line_->VerifyRegisterType(this, inst->VRegA_23x(), insn_type);
+ // Note: this is, as usual, complicated by the fact the the instruction isn't fully typed
+ // and fits multiple register types.
+ const RegType* modified_reg_type = &insn_type;
+ if ((modified_reg_type == &reg_types_.Integer()) ||
+ (modified_reg_type == &reg_types_.LongLo())) {
+ // May be integer or float | long or double. Overwrite insn_type accordingly.
+ const RegType& value_type = work_line_->GetRegisterType(this, inst->VRegA_23x());
+ if (modified_reg_type == &reg_types_.Integer()) {
+ if (&value_type == &reg_types_.Float()) {
+ modified_reg_type = &value_type;
+ }
+ } else {
+ if (&value_type == &reg_types_.DoubleLo()) {
+ modified_reg_type = &value_type;
+ }
+ }
+ }
+ work_line_->VerifyRegisterType(this, inst->VRegA_23x(), *modified_reg_type);
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {