diff options
34 files changed, 268 insertions, 68 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 5ff8a53de9..22fcf87524 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -76,8 +76,8 @@ namespace art { static constexpr bool kTimeCompileMethod = !kIsDebugBuild; -// Whether to produce 64-bit ELF files for 64-bit targets. Leave this off for now. -static constexpr bool kProduce64BitELFFiles = false; +// Whether to produce 64-bit ELF files for 64-bit targets. +static constexpr bool kProduce64BitELFFiles = true; // Whether classes-to-compile and methods-to-compile are only applied to the boot image, or, when // given, too all compilations. diff --git a/compiler/image_test.cc b/compiler/image_test.cc index dbdd3504d3..772cc80146 100644 --- a/compiler/image_test.cc +++ b/compiler/image_test.cc @@ -105,6 +105,7 @@ TEST_F(ImageTest, WriteRead) { << oat_file.GetFilename(); } + uint64_t image_file_size; { std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str())); ASSERT_TRUE(file.get() != nullptr); @@ -121,7 +122,8 @@ TEST_F(ImageTest, WriteRead) { ASSERT_FALSE(space->IsImageSpace()); ASSERT_TRUE(space != nullptr); ASSERT_TRUE(space->IsMallocSpace()); - ASSERT_LE(space->Size(), static_cast<size_t>(file->GetLength())); + + image_file_size = file->GetLength(); } ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr); @@ -167,6 +169,9 @@ TEST_F(ImageTest, WriteRead) { ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace()); gc::space::ImageSpace* image_space = heap->GetImageSpace(); + ASSERT_TRUE(image_space != nullptr); + ASSERT_LE(image_space->Size(), image_file_size); + image_space->VerifyImageAllocations(); uint8_t* image_begin = image_space->Begin(); uint8_t* image_end = image_space->End(); diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index d0a926f334..a1d8226f36 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -243,6 +243,9 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr, vmap_table_ptr, gc_map_ptr); + __builtin___clear_cache(reinterpret_cast<char*>(code_ptr), + reinterpret_cast<char*>(code_ptr + quick_code->size())); + const size_t thumb_offset = compiled_method->CodeDelta(); const uint32_t code_offset = code_ptr - base + thumb_offset; *out_method = OatFile::OatMethod(base, code_offset); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 7f0be05732..987a6c4ff7 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -55,7 +55,7 @@ static constexpr SRegister kFpuCalleeSaves[] = // S registers. Therefore there is no need to block it. static constexpr DRegister DTMP = D31; -#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())-> +#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value() class NullCheckSlowPathARM : public SlowPathCodeARM { @@ -318,7 +318,7 @@ class DeoptimizationSlowPathARM : public SlowPathCodeARM { #undef __ #undef __ -#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())-> +#define __ down_cast<ArmAssembler*>(GetAssembler())-> inline Condition ARMCondition(IfCondition cond) { switch (cond) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8678428bf3..f63a5d20c0 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -44,7 +44,7 @@ static constexpr int kC2ConditionMask = 0x400; static constexpr int kFakeReturnRegister = Register(8); -#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())-> +#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> class NullCheckSlowPathX86 : public SlowPathCodeX86 { public: @@ -324,7 +324,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCodeX86 { }; #undef __ -#define __ reinterpret_cast<X86Assembler*>(GetAssembler())-> +#define __ down_cast<X86Assembler*>(GetAssembler())-> inline Condition X86Condition(IfCondition cond) { switch (cond) { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 78d585115a..ca9a154717 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -46,7 +46,7 @@ static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 static constexpr int kC2ConditionMask = 0x400; -#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())-> +#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 { public: @@ -343,7 +343,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCodeX86_64 { }; #undef __ -#define __ reinterpret_cast<X86_64Assembler*>(GetAssembler())-> +#define __ down_cast<X86_64Assembler*>(GetAssembler())-> inline Condition X86_64Condition(IfCondition cond) { switch (cond) { diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index b31de98e25..17a006cc3a 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -122,7 +122,12 @@ void HDeadCodeElimination::RemoveDeadInstructions() { if (!inst->HasSideEffects() && !inst->CanThrow() && !inst->IsSuspendCheck() - && !inst->IsMemoryBarrier() // If we added an explicit barrier then we should keep it. + // The current method needs to stay in the graph in case of inlining. + // It is always passed anyway, and keeping it in the graph does not + // affect the generated code. + && !inst->IsCurrentMethod() + // If we added an explicit barrier then we should keep it. + && !inst->IsMemoryBarrier() && !inst->HasUses()) { block->RemoveInstruction(inst); MaybeRecordStat(MethodCompilationStat::kRemovedDeadInstruction); diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index 5e784d7312..a381315bac 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -802,9 +802,9 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) { current->SetRegister(reg); if (!current->IsDeadAt(free_until[reg])) { // If the register is only available for a subset of live ranges - // covered by `current`, split `current` at the position where + // covered by `current`, split `current` before the position where // the register is not available anymore. - LiveInterval* split = Split(current, free_until[reg]); + LiveInterval* split = SplitBetween(current, current->GetStart(), free_until[reg]); DCHECK(split != nullptr); AddSorted(unhandled_, split); } diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index d07a14a3e6..b7da36299d 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -426,6 +426,13 @@ TEST(RegisterAllocatorTest, FreeUntil) { // Add an artifical range to cover the temps that will be put in the unhandled list. LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval(); unhandled->AddLoopRange(0, 60); + + // Populate the instructions in the liveness object, to please the register allocator. + for (size_t i = 0; i < 60; ++i) { + liveness.instructions_from_lifetime_position_.Add( + graph->GetEntryBlock()->GetFirstInstruction()); + } + // For SSA value intervals, only an interval resulted from a split may intersect // with inactive intervals. unhandled = register_allocator.Split(unhandled, 5); diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 250eb04a1c..d5f977feec 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -341,6 +341,7 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until, // starts at. If one location is a register we return it as a hint. This // will avoid a move between the two blocks. HBasicBlock* block = liveness.GetBlockFromPosition(GetStart() / 2); + size_t next_register_use = FirstRegisterUse(); for (size_t i = 0; i < block->GetPredecessors().Size(); ++i) { size_t position = block->GetPredecessors().Get(i)->GetLifetimeEnd() - 1; // We know positions above GetStart() do not have a location yet. @@ -348,7 +349,9 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until, LiveInterval* existing = GetParent()->GetSiblingAt(position); if (existing != nullptr && existing->HasRegister() - && (free_until[existing->GetRegister()] > GetStart())) { + // It's worth using that register if it is available until + // the next use. + && (free_until[existing->GetRegister()] >= next_register_use)) { return existing->GetRegister(); } } diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 4cbe29ae42..4667825a62 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1219,6 +1219,7 @@ class SsaLivenessAnalysis : public ValueObject { size_t number_of_ssa_values_; ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive); + ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil); DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis); }; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 98e14ea9a6..666fb604c0 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -136,7 +136,7 @@ TEST(StackMapTest, Test2) { ArenaBitVector sp_mask2(&arena, 0, true); sp_mask2.SetBit(3); - sp_mask1.SetBit(8); + sp_mask2.SetBit(8); stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 18); // Short location. stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location. @@ -148,7 +148,7 @@ TEST(StackMapTest, Test2) { stream.FillIn(region); CodeInfo code_info(region); - ASSERT_EQ(1u, code_info.GetStackMaskSize()); + ASSERT_EQ(2u, code_info.GetStackMaskSize()); ASSERT_EQ(2u, code_info.GetNumberOfStackMaps()); uint32_t number_of_location_catalog_entries = diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h index 17835f5610..afa8dc187e 100644 --- a/runtime/base/bit_vector.h +++ b/runtime/base/bit_vector.h @@ -21,6 +21,7 @@ #include <iterator> #include "base/bit_utils.h" +#include "globals.h" namespace art { @@ -229,6 +230,19 @@ class BitVector { // Number of bits set in range [0, end) in storage. (No range check.) static uint32_t NumSetBits(const uint32_t* storage, uint32_t end); + // Fill given memory region with the contents of the vector and zero padding. + void CopyTo(void* dst, size_t len) const { + DCHECK_LE(static_cast<size_t>(GetHighestBitSet() + 1), len * kBitsPerByte); + size_t vec_len = GetSizeOf(); + if (vec_len < len) { + void* dst_padding = reinterpret_cast<uint8_t*>(dst) + vec_len; + memcpy(dst, storage_, vec_len); + memset(dst_padding, 0, len - vec_len); + } else { + memcpy(dst, storage_, len); + } + } + void Dump(std::ostream& os, const char* prefix) const; private: diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc index c51b9b0570..19c01f20e7 100644 --- a/runtime/base/bit_vector_test.cc +++ b/runtime/base/bit_vector_test.cc @@ -211,4 +211,62 @@ TEST(BitVector, Subset) { } } +TEST(BitVector, CopyTo) { + { + // Test copying an empty BitVector. Padding should fill `buf` with zeroes. + BitVector bv(0, true, Allocator::GetMallocAllocator()); + uint32_t buf; + + bv.CopyTo(&buf, sizeof(buf)); + EXPECT_EQ(0u, bv.GetSizeOf()); + EXPECT_EQ(0u, buf); + } + + { + // Test copying when `bv.storage_` and `buf` are of equal lengths. + BitVector bv(0, true, Allocator::GetMallocAllocator()); + uint32_t buf; + + bv.SetBit(0); + bv.SetBit(17); + bv.SetBit(26); + EXPECT_EQ(sizeof(buf), bv.GetSizeOf()); + + bv.CopyTo(&buf, sizeof(buf)); + EXPECT_EQ(0x04020001u, buf); + } + + { + // Test copying when the `bv.storage_` is longer than `buf`. As long as + // `buf` is long enough to hold all set bits, copying should succeed. + BitVector bv(0, true, Allocator::GetMallocAllocator()); + uint8_t buf[5]; + + bv.SetBit(18); + bv.SetBit(39); + EXPECT_LT(sizeof(buf), bv.GetSizeOf()); + + bv.CopyTo(buf, sizeof(buf)); + EXPECT_EQ(0x00u, buf[0]); + EXPECT_EQ(0x00u, buf[1]); + EXPECT_EQ(0x04u, buf[2]); + EXPECT_EQ(0x00u, buf[3]); + EXPECT_EQ(0x80u, buf[4]); + } + + { + // Test zero padding when `bv.storage_` is shorter than `buf`. + BitVector bv(0, true, Allocator::GetMallocAllocator()); + uint32_t buf[2]; + + bv.SetBit(18); + bv.SetBit(31); + EXPECT_GT(sizeof(buf), bv.GetSizeOf()); + + bv.CopyTo(buf, sizeof(buf)); + EXPECT_EQ(0x80040000U, buf[0]); + EXPECT_EQ(0x00000000U, buf[1]); + } +} + } // namespace art diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 84da475e7d..4266c4ab9e 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -1264,12 +1264,18 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { // marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy. for (GcRoot<mirror::Class>& root : class_table_) { buffered_visitor.VisitRoot(root); - root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + if ((flags & kVisitRootFlagNonMoving) == 0) { + // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since + // these roots are all reachable from the class or dex cache. + root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + } } // PreZygote classes can't move so we won't need to update fields' declaring classes. for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) { buffered_visitor.VisitRoot(root); - root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + if ((flags & kVisitRootFlagNonMoving) == 0) { + root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + } } } else if ((flags & kVisitRootFlagNewRoots) != 0) { for (auto& root : new_class_roots_) { @@ -4800,11 +4806,11 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; - auto*& imt_ref = out_imt[imt_index]; - if (imt_ref == unimplemented_method) { - imt_ref = method; - } else if (imt_ref != conflict_method) { - imt_ref = conflict_method; + auto** imt_ref = &out_imt[imt_index]; + if (*imt_ref == unimplemented_method) { + *imt_ref = method; + } else if (*imt_ref != conflict_method) { + *imt_ref = conflict_method; } } } @@ -4959,45 +4965,54 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass ++out; } } - UpdateClassVirtualMethods(klass.Get(), virtuals, new_method_count); - // Done copying methods, they are all reachable from the class now, so we can end the no thread - // suspension assert. - self->EndAssertNoThreadSuspension(old_cause); - - size_t old_vtable_count = vtable->GetLength(); - const size_t new_vtable_count = old_vtable_count + miranda_methods.size(); - vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count))); - if (UNLIKELY(vtable.Get() == nullptr)) { - self->AssertPendingOOMException(); - return false; - } StrideIterator<ArtMethod> out( reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); + // Copy the mirada methods before making a copy of the vtable so that moving GC doesn't miss + // any roots. This is necessary since these miranda methods wont get their roots visited from + // the class table root visiting until they are copied to the new virtuals array. + const size_t old_vtable_count = vtable->GetLength(); + const size_t new_vtable_count = old_vtable_count + miranda_methods.size(); + size_t method_idx = old_vtable_count; for (auto* mir_method : miranda_methods) { ArtMethod* out_method = &*out; - out->CopyFrom(mir_method, image_pointer_size_); // Leave the declaring class alone as type indices are relative to it + out_method->CopyFrom(mir_method, image_pointer_size_); out_method->SetAccessFlags(out_method->GetAccessFlags() | kAccMiranda); - out_method->SetMethodIndex(0xFFFF & old_vtable_count); - vtable->SetElementPtrSize(old_vtable_count, out_method, image_pointer_size_); + out_method->SetMethodIndex(0xFFFF & method_idx); move_table.emplace(mir_method, out_method); ++out; - ++old_vtable_count; + ++method_idx; + } + DCHECK_EQ(new_vtable_count, method_idx); + UpdateClassVirtualMethods(klass.Get(), virtuals, new_method_count); + // Done copying methods, they are all reachable from the class now, so we can end the no thread + // suspension assert. + self->EndAssertNoThreadSuspension(old_cause); + vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count))); + if (UNLIKELY(vtable.Get() == nullptr)) { + self->AssertPendingOOMException(); + return false; } - // Update old vtable methods. - for (size_t i = 0; i < old_vtable_count - miranda_methods.size(); ++i) { - auto* m = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_); + for (method_idx = 0; method_idx < old_vtable_count; ++method_idx) { + auto* m = vtable->GetElementPtrSize<ArtMethod*>(method_idx, image_pointer_size_); DCHECK(m != nullptr) << PrettyClass(klass.Get()); auto it = move_table.find(m); if (it != move_table.end()) { auto* new_m = it->second; DCHECK(new_m != nullptr) << PrettyClass(klass.Get()); - vtable->SetElementPtrSize(i, new_m, image_pointer_size_); + vtable->SetElementPtrSize(method_idx, new_m, image_pointer_size_); } } + // Update miranda methods. + out = StrideIterator<ArtMethod>( + reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); + for (; method_idx < new_vtable_count; ++method_idx) { + vtable->SetElementPtrSize(method_idx, &*out, image_pointer_size_); + ++out; + } + klass->SetVTable(vtable.Get()); - CHECK_EQ(old_vtable_count, new_vtable_count); // Go fix up all the stale miranda pointers. for (size_t i = 0; i < ifcount; ++i) { for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) { diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 7999559aaf..24615e2a66 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -1467,7 +1467,7 @@ std::string Dbg::GetMethodName(JDWP::MethodId method_id) { if (m == nullptr) { return "null"; } - return m->GetName(); + return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName(); } std::string Dbg::GetFieldName(JDWP::FieldId field_id) { @@ -1590,8 +1590,9 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g ArtMethod* m = i < direct_method_count ? c->GetDirectMethod(i, ptr_size) : c->GetVirtualMethod(i - direct_method_count, ptr_size); expandBufAddMethodId(pReply, ToMethodId(m)); - expandBufAddUtf8String(pReply, m->GetName()); - expandBufAddUtf8String(pReply, m->GetSignature().ToString()); + expandBufAddUtf8String(pReply, m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName()); + expandBufAddUtf8String(pReply, + m->GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString()); if (with_generic) { const char* generic_signature = ""; expandBufAddUtf8String(pReply, generic_signature); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 7c92b18e60..042c33f7ff 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2083,7 +2083,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx, return GetTwoWordFailureValue(); // Failure. } } else { - DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); + DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); if (kIsDebugBuild) { uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); const DexFile::CodeItem* code = caller_method->GetCodeItem(); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 91a0e8a471..1c9c41204a 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -590,7 +590,8 @@ void MarkSweep::MarkNonThreadRoots() { void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); // Visit all runtime roots and clear dirty flags. - Runtime::Current()->VisitConcurrentRoots(this, flags); + Runtime::Current()->VisitConcurrentRoots( + this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving)); } class ScanObjectVisitor { diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index fbde4947c0..59d0259f2b 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1630,7 +1630,12 @@ size_t Heap::GetObjectsAllocated() const { } uint64_t Heap::GetObjectsAllocatedEver() const { - return GetObjectsFreedEver() + GetObjectsAllocated(); + uint64_t total = GetObjectsFreedEver(); + // If we are detached, we can't use GetObjectsAllocated since we can't change thread states. + if (Thread::Current() != nullptr) { + total += GetObjectsAllocated(); + } + return total; } uint64_t Heap::GetBytesAllocatedEver() const { diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 482640b51a..f0b7bfddea 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -806,7 +806,7 @@ void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], for (size_t i = 0; i < kImtSize; i++) { auto method = methods[i]; DCHECK(method != nullptr); - SetEmbeddedImTableEntry(i, Runtime::Current()->GetImtConflictMethod(), pointer_size); + SetEmbeddedImTableEntry(i, method, pointer_size); } PointerArray* table = GetVTableDuringLinking(); CHECK(table != nullptr) << PrettyClass(this); diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 744e7b5b79..f1c96b5007 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -435,7 +435,7 @@ class MANAGED LOCKABLE Object { field_offset, static_cast<int32_t>(ptr)); } else { SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>( - field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value))); + field_offset, static_cast<int64_t>(reinterpret_cast<uintptr_t>(new_value))); } } // TODO fix thread safety analysis broken by the use of template. This should be @@ -469,8 +469,8 @@ class MANAGED LOCKABLE Object { } else { int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset); // Check that we dont lose any non 0 bits. - DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v); - return reinterpret_cast<T>(v); + DCHECK_EQ(static_cast<int64_t>(static_cast<uintptr_t>(v)), v); + return reinterpret_cast<T>(static_cast<uintptr_t>(v)); } } diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc index 224d266a15..1c21edbc42 100644 --- a/runtime/mirror/throwable.cc +++ b/runtime/mirror/throwable.cc @@ -71,9 +71,18 @@ bool Throwable::IsCheckedException() { int32_t Throwable::GetStackDepth() { Object* stack_state = GetStackState(); - if (stack_state == nullptr || !stack_state->IsObjectArray()) return -1; - ObjectArray<Object>* method_trace = down_cast<ObjectArray<Object>*>(stack_state); - return method_trace->GetLength() - 1; + if (stack_state == nullptr) { + return -1; + } + if (!stack_state->IsIntArray() && !stack_state->IsLongArray()) { + return -1; + } + mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray()); + int32_t array_len = method_trace->GetLength(); + // The format is [method pointers][pcs] so the depth is half the length (see method + // BuildInternalStackTraceVisitor::Init). + CHECK_EQ(array_len % 2, 0); + return array_len / 2; } std::string Throwable::Dump() { diff --git a/runtime/runtime.h b/runtime/runtime.h index f6f9725a0a..e569333bf0 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -98,6 +98,9 @@ enum VisitRootFlags : uint8_t { kVisitRootFlagStartLoggingNewRoots = 0x4, kVisitRootFlagStopLoggingNewRoots = 0x8, kVisitRootFlagClearRootLog = 0x10, + // Non moving means we can have optimizations where we don't visit some roots if they are + // definitely reachable from another location. E.g. ArtMethod and ArtField roots. + kVisitRootFlagNonMoving = 0x20, }; class Runtime { diff --git a/runtime/stack_map.h b/runtime/stack_map.h index eefdaa7391..ba0b6d6265 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -671,9 +671,7 @@ class StackMap { void SetStackMask(const CodeInfo& info, const BitVector& sp_map) { MemoryRegion region = GetStackMask(info); - for (size_t i = 0; i < region.size_in_bits(); i++) { - region.StoreBit(i, sp_map.IsBitSet(i)); - } + sp_map.CopyTo(region.start(), region.size()); } bool HasDexRegisterMap(const CodeInfo& info) const { diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h index 5971524d81..a68030287b 100644 --- a/runtime/stride_iterator.h +++ b/runtime/stride_iterator.h @@ -62,7 +62,8 @@ class StrideIterator : public std::iterator<std::random_access_iterator_tag, T> private: uintptr_t ptr_; - const size_t stride_; + // Not const for operator=. + size_t stride_; }; } // namespace art diff --git a/runtime/thread.cc b/runtime/thread.cc index 89e34674d5..65999f77b5 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1583,7 +1583,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { bool Init(int depth) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Allocate method trace with an extra slot that will hold the PC trace + // Allocate method trace with format [method pointers][pcs]. auto* cl = Runtime::Current()->GetClassLinker(); trace_ = cl->AllocPointerArray(self_, depth * 2); if (trace_ == nullptr) { diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index f25e4ee706..9faaa4a57e 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -2860,7 +2860,16 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { if (have_pending_hard_failure_) { if (Runtime::Current()->IsAotCompiler()) { /* When AOT compiling, check that the last failure is a hard failure */ - CHECK_EQ(failures_[failures_.size() - 1], VERIFY_ERROR_BAD_CLASS_HARD); + if (failures_[failures_.size() - 1] != VERIFY_ERROR_BAD_CLASS_HARD) { + LOG(ERROR) << "Pending failures:"; + for (auto& error : failures_) { + LOG(ERROR) << error; + } + for (auto& error_msg : failure_messages_) { + LOG(ERROR) << error_msg->str(); + } + LOG(FATAL) << "Pending hard failure, but last failure not hard."; + } } /* immediate failure, reject class */ info_messages_ << "Rejecting opcode " << inst->DumpString(dex_file_); @@ -3358,13 +3367,13 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator( if (!src_type.IsIntegralTypes()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type << " but expected " << reg_type; - return res_method; + return nullptr; } } else if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) { // Continue on soft failures. We need to find possible hard failures to avoid problems in the // compiler. if (have_pending_hard_failure_) { - return res_method; + return nullptr; } } sig_registers += reg_type.IsLongOrDoubleTypes() ? 2 : 1; diff --git a/test/489-current-method-regression/expected.txt b/test/489-current-method-regression/expected.txt new file mode 100644 index 0000000000..cced94c787 --- /dev/null +++ b/test/489-current-method-regression/expected.txt @@ -0,0 +1 @@ +In bar diff --git a/test/489-current-method-regression/info.txt b/test/489-current-method-regression/info.txt new file mode 100644 index 0000000000..da03a4fa8c --- /dev/null +++ b/test/489-current-method-regression/info.txt @@ -0,0 +1,2 @@ +Regression test for a crash due to the removal +of HCurrentMethod from the optimizing graph. diff --git a/test/489-current-method-regression/src/Main.java b/test/489-current-method-regression/src/Main.java new file mode 100644 index 0000000000..7d102f5490 --- /dev/null +++ b/test/489-current-method-regression/src/Main.java @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) { + System.out.println(foo(1, 0)); + } + + public static String foo(int a, int b) { + if (a == 42) { + // The class loading will be seen as dead code by + // the optimizer. + Class c = Main.class; + } + return new Main().bar(); + } + + public String bar() { + return "In bar"; + } +} diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt index 5922257d01..a6b216bf3a 100644 --- a/test/800-smali/expected.txt +++ b/test/800-smali/expected.txt @@ -15,4 +15,5 @@ b/18800943 (2) MoveExc MoveExceptionOnEntry EmptySparseSwitch +b/20224106 Done! diff --git a/test/800-smali/smali/b_20224106.smali b/test/800-smali/smali/b_20224106.smali new file mode 100644 index 0000000000..78009dbc0a --- /dev/null +++ b/test/800-smali/smali/b_20224106.smali @@ -0,0 +1,16 @@ +.class public LB20224106; + +# Test that a hard + soft verifier failure in invoke-interface does not lead to +# an order abort (the last failure must be hard). + +.super Ljava/lang/Object; + +.method public static run(LB20224106;Ljava/lang/Object;)V + .registers 4 + # Two failure points here: + # 1) There is a parameter type mismatch. The formal type is integral (int), but the actual + # type is reference. + # 2) The receiver is not an interface or Object + invoke-interface {v2, v3}, Ljava/net/DatagramSocket;->checkPort(I)V + return-void +.end method diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java index 3e0b1f99ed..3e88364089 100644 --- a/test/800-smali/src/Main.java +++ b/test/800-smali/src/Main.java @@ -79,6 +79,8 @@ public class Main { "moveExceptionOnEntry", new Object[]{0}, new VerifyError(), null)); testCases.add(new TestCase("EmptySparseSwitch", "EmptySparseSwitch", "run", null, null, null)); + testCases.add(new TestCase("b/20224106", "B20224106", "run", null, new VerifyError(), + 0)); } public void runTests() { diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh index a19fd15559..344d2dedb3 100755 --- a/tools/run-libcore-tests.sh +++ b/tools/run-libcore-tests.sh @@ -19,11 +19,16 @@ if [ ! -d libcore ]; then exit 1 fi -# Jar containing all the tests. +# Jar containing jsr166 tests. +jsr166_test_jar=out/target/common/obj/JAVA_LIBRARIES/jsr166-tests_intermediates/javalib.jar + +# Jar containing all the other tests. test_jar=out/target/common/obj/JAVA_LIBRARIES/core-tests_intermediates/javalib.jar + if [ ! -f $test_jar ]; then - echo "Before running, you must build core-tests and vogar: make core-tests vogar vogar.jar" + echo "Before running, you must build core-tests, jsr166-tests and vogar: \ + make core-tests jsr166-tests vogar vogar.jar" exit 1 fi @@ -54,9 +59,10 @@ working_packages=("libcore.icu" "org.apache.harmony.tests.java.util" "org.apache.harmony.tests.java.text" "org.apache.harmony.tests.javax.security" - "tests.java.lang.String") + "tests.java.lang.String" + "jsr166") # Run the tests using vogar. echo "Running tests for the following test packages:" echo ${working_packages[@]} | tr " " "\n" -vogar $@ --expectations art/tools/libcore_failures.txt --classpath $test_jar ${working_packages[@]} +vogar $@ --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]} |