diff options
| -rw-r--r-- | cmdline/cmdline_types.h | 5 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 42 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.h | 1 | ||||
| -rw-r--r-- | runtime/base/mutex.h | 2 | ||||
| -rw-r--r-- | runtime/class_linker.cc | 46 | ||||
| -rw-r--r-- | runtime/dex_file_verifier.cc | 37 | ||||
| -rw-r--r-- | runtime/gc/heap-inl.h | 9 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 95 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 17 | ||||
| -rw-r--r-- | runtime/hprof/hprof.cc | 48 | ||||
| -rw-r--r-- | runtime/mirror/class-inl.h | 2 | ||||
| -rw-r--r-- | runtime/mirror/class.h | 6 | ||||
| -rw-r--r-- | runtime/runtime.cc | 6 | ||||
| -rw-r--r-- | runtime/stride_iterator.h | 4 | ||||
| -rw-r--r-- | test/514-shifts/expected.txt | 0 | ||||
| -rw-r--r-- | test/514-shifts/info.txt | 2 | ||||
| -rw-r--r-- | test/514-shifts/src/Main.java | 106 |
17 files changed, 347 insertions, 81 deletions
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index f38478cdd6..2cb86a6554 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -472,6 +472,7 @@ struct XGcOption { bool verify_pre_gc_rosalloc_ = kIsDebugBuild; bool verify_pre_sweeping_rosalloc_ = false; bool verify_post_gc_rosalloc_ = false; + bool gcstress_ = false; }; template <> @@ -509,6 +510,10 @@ struct CmdlineType<XGcOption> : CmdlineTypeParser<XGcOption> { xgc.verify_post_gc_rosalloc_ = true; } else if (gc_option == "nopostverify_rosalloc") { xgc.verify_post_gc_rosalloc_ = false; + } else if (gc_option == "gcstress") { + xgc.gcstress_ = true; + } else if (gc_option == "nogcstress") { + xgc.gcstress_ = false; } else if ((gc_option == "precise") || (gc_option == "noprecise") || (gc_option == "verifycardtable") || diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 2b1131d65f..3f28e64b4a 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -2458,7 +2458,9 @@ void LocationsBuilderARM::HandleShift(HBinaryOperation* op) { case Primitive::kPrimInt: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + // Make the output overlap, as it will be used to hold the masked + // second input. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); break; } case Primitive::kPrimLong: { @@ -2489,13 +2491,13 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { // Arm doesn't mask the shift count so we need to do it ourselves. if (second.IsRegister()) { Register second_reg = second.AsRegister<Register>(); - __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue)); + __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue)); if (op->IsShl()) { - __ Lsl(out_reg, first_reg, second_reg); + __ Lsl(out_reg, first_reg, out_reg); } else if (op->IsShr()) { - __ Asr(out_reg, first_reg, second_reg); + __ Asr(out_reg, first_reg, out_reg); } else { - __ Lsr(out_reg, first_reg, second_reg); + __ Lsr(out_reg, first_reg, out_reg); } } else { int32_t cst = second.GetConstant()->AsIntConstant()->GetValue(); @@ -2524,44 +2526,44 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { Register second_reg = second.AsRegister<Register>(); if (op->IsShl()) { + __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue)); // Shift the high part - __ and_(second_reg, second_reg, ShifterOperand(63)); - __ Lsl(o_h, high, second_reg); + __ Lsl(o_h, high, o_l); // Shift the low part and `or` what overflew on the high part - __ rsb(temp, second_reg, ShifterOperand(32)); + __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord)); __ Lsr(temp, low, temp); __ orr(o_h, o_h, ShifterOperand(temp)); // If the shift is > 32 bits, override the high part - __ subs(temp, second_reg, ShifterOperand(32)); + __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord)); __ it(PL); __ Lsl(o_h, low, temp, false, PL); // Shift the low part - __ Lsl(o_l, low, second_reg); + __ Lsl(o_l, low, o_l); } else if (op->IsShr()) { + __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue)); // Shift the low part - __ and_(second_reg, second_reg, ShifterOperand(63)); - __ Lsr(o_l, low, second_reg); + __ Lsr(o_l, low, o_h); // Shift the high part and `or` what underflew on the low part - __ rsb(temp, second_reg, ShifterOperand(32)); + __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord)); __ Lsl(temp, high, temp); __ orr(o_l, o_l, ShifterOperand(temp)); // If the shift is > 32 bits, override the low part - __ subs(temp, second_reg, ShifterOperand(32)); + __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord)); __ it(PL); __ Asr(o_l, high, temp, false, PL); // Shift the high part - __ Asr(o_h, high, second_reg); + __ Asr(o_h, high, o_h); } else { + __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue)); // same as Shr except we use `Lsr`s and not `Asr`s - __ and_(second_reg, second_reg, ShifterOperand(63)); - __ Lsr(o_l, low, second_reg); - __ rsb(temp, second_reg, ShifterOperand(32)); + __ Lsr(o_l, low, o_h); + __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord)); __ Lsl(temp, high, temp); __ orr(o_l, o_l, ShifterOperand(temp)); - __ subs(temp, second_reg, ShifterOperand(32)); + __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord)); __ it(PL); __ Lsr(o_l, high, temp, false, PL); - __ Lsr(o_h, high, second_reg); + __ Lsr(o_h, high, o_h); } break; } diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index c410fa80ba..2fe464daf3 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -32,6 +32,7 @@ class SlowPathCodeARM; // Use a local definition to prevent copying mistakes. static constexpr size_t kArmWordSize = kArmPointerSize; +static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte; static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 }; static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 0ab148e37e..aa91ca161d 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -61,6 +61,7 @@ enum LockLevel { kAbortLock, kJdwpSocketLock, kRegionSpaceRegionLock, + kTransactionLogLock, kReferenceQueueSoftReferencesLock, kReferenceQueuePhantomReferencesLock, kReferenceQueueFinalizerReferencesLock, @@ -77,7 +78,6 @@ enum LockLevel { kDexFileMethodInlinerLock, kDexFileToMethodInlinerMapLock, kMarkSweepMarkStackLock, - kTransactionLogLock, kInternTableLock, kOatFileSecondaryLookupLock, kDefaultMutexLevel, diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index d2805cdbd6..5240447c75 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -1310,7 +1310,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { // reinit references to when reinitializing a ClassLinker from a // mapped image. void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { - class_roots_.VisitRoot(visitor, RootInfo(kRootVMInternal)); + class_roots_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); Thread* const self = Thread::Current(); { ReaderMutexLock mu(self, dex_lock_); @@ -1333,9 +1333,9 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { } } VisitClassRoots(visitor, flags); - array_iftable_.VisitRoot(visitor, RootInfo(kRootVMInternal)); - for (size_t i = 0; i < kFindArrayCacheSize; ++i) { - find_array_class_cache_[i].VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); + array_iftable_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); + for (GcRoot<mirror::Class>& root : find_array_class_cache_) { + root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); } } @@ -4928,8 +4928,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } } if (miranda_method == nullptr) { - size_t size = ArtMethod::ObjectSize(image_pointer_size_); - miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(size)); + miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size)); CHECK(miranda_method != nullptr); // Point the interface table at a phantom slot. new(miranda_method) ArtMethod(*interface_method, image_pointer_size_); @@ -4968,34 +4967,42 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass ++out; } } + StrideIterator<ArtMethod> out( + reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); + // Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and + // we want the roots of the miranda methods to get visited. + for (ArtMethod* mir_method : miranda_methods) { + out->CopyFrom(mir_method, image_pointer_size_); + out->SetAccessFlags(out->GetAccessFlags() | kAccMiranda); + move_table.emplace(mir_method, &*out); + ++out; + } UpdateClassVirtualMethods(klass.Get(), virtuals, new_method_count); - // Done copying methods, they are all reachable from the class now, so we can end the no thread + // Done copying methods, they are all roots in the class now, so we can end the no thread // suspension assert. self->EndAssertNoThreadSuspension(old_cause); - size_t old_vtable_count = vtable->GetLength(); + const size_t old_vtable_count = vtable->GetLength(); const size_t new_vtable_count = old_vtable_count + miranda_methods.size(); + miranda_methods.clear(); vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count))); if (UNLIKELY(vtable.Get() == nullptr)) { self->AssertPendingOOMException(); return false; } - StrideIterator<ArtMethod> out( + out = StrideIterator<ArtMethod>( reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); - for (auto* mir_method : miranda_methods) { - ArtMethod* out_method = &*out; - out->CopyFrom(mir_method, image_pointer_size_); + size_t vtable_pos = old_vtable_count; + for (size_t i = old_method_count; i < new_method_count; ++i) { // Leave the declaring class alone as type indices are relative to it - out_method->SetAccessFlags(out_method->GetAccessFlags() | kAccMiranda); - out_method->SetMethodIndex(0xFFFF & old_vtable_count); - vtable->SetElementPtrSize(old_vtable_count, out_method, image_pointer_size_); - move_table.emplace(mir_method, out_method); + out->SetMethodIndex(0xFFFF & vtable_pos); + vtable->SetElementPtrSize(vtable_pos, &*out, image_pointer_size_); ++out; - ++old_vtable_count; + ++vtable_pos; } - + CHECK_EQ(vtable_pos, new_vtable_count); // Update old vtable methods. - for (size_t i = 0; i < old_vtable_count - miranda_methods.size(); ++i) { + for (size_t i = 0; i < old_vtable_count; ++i) { auto* m = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_); DCHECK(m != nullptr) << PrettyClass(klass.Get()); auto it = move_table.find(m); @@ -5006,7 +5013,6 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } } klass->SetVTable(vtable.Get()); - CHECK_EQ(old_vtable_count, new_vtable_count); // Go fix up all the stale miranda pointers. for (size_t i = 0; i < ifcount; ++i) { for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) { diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc index a66c38e0fe..5d5a7da20c 100644 --- a/runtime/dex_file_verifier.cc +++ b/runtime/dex_file_verifier.cc @@ -684,25 +684,52 @@ bool DexFileVerifier::CheckIntraClassDataItem() { ClassDataItemIterator it(*dex_file_, ptr_); // These calls use the raw access flags to check whether the whole dex field is valid. - + uint32_t prev_index = 0; for (; it.HasNextStaticField(); it.Next()) { - if (!CheckClassDataItemField(it.GetMemberIndex(), it.GetRawMemberAccessFlags(), true)) { + uint32_t curr_index = it.GetMemberIndex(); + if (curr_index < prev_index) { + ErrorStringPrintf("out-of-order static field indexes %d and %d", prev_index, curr_index); + return false; + } + prev_index = curr_index; + if (!CheckClassDataItemField(curr_index, it.GetRawMemberAccessFlags(), true)) { return false; } } + prev_index = 0; for (; it.HasNextInstanceField(); it.Next()) { - if (!CheckClassDataItemField(it.GetMemberIndex(), it.GetRawMemberAccessFlags(), false)) { + uint32_t curr_index = it.GetMemberIndex(); + if (curr_index < prev_index) { + ErrorStringPrintf("out-of-order instance field indexes %d and %d", prev_index, curr_index); + return false; + } + prev_index = curr_index; + if (!CheckClassDataItemField(curr_index, it.GetRawMemberAccessFlags(), false)) { return false; } } + prev_index = 0; for (; it.HasNextDirectMethod(); it.Next()) { - if (!CheckClassDataItemMethod(it.GetMemberIndex(), it.GetRawMemberAccessFlags(), + uint32_t curr_index = it.GetMemberIndex(); + if (curr_index < prev_index) { + ErrorStringPrintf("out-of-order direct method indexes %d and %d", prev_index, curr_index); + return false; + } + prev_index = curr_index; + if (!CheckClassDataItemMethod(curr_index, it.GetRawMemberAccessFlags(), it.GetMethodCodeItemOffset(), true)) { return false; } } + prev_index = 0; for (; it.HasNextVirtualMethod(); it.Next()) { - if (!CheckClassDataItemMethod(it.GetMemberIndex(), it.GetRawMemberAccessFlags(), + uint32_t curr_index = it.GetMemberIndex(); + if (curr_index < prev_index) { + ErrorStringPrintf("out-of-order virtual method indexes %d and %d", prev_index, curr_index); + return false; + } + prev_index = curr_index; + if (!CheckClassDataItemMethod(curr_index, it.GetRawMemberAccessFlags(), it.GetMethodCodeItemOffset(), false)) { return false; } diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 2d5433032d..2ec9c864e3 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -174,6 +174,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas } else { DCHECK(!Dbg::IsAllocTrackingEnabled()); } + if (kInstrumented) { + if (gc_stress_mode_) { + CheckGcStressMode(self, &obj); + } + } else { + DCHECK(!gc_stress_mode_); + } // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since @@ -391,7 +398,7 @@ inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) co // Zygote resulting in it being prematurely freed. // We can only do this for primitive objects since large objects will not be within the card table // range. This also means that we rely on SetClass not dirtying the object's card. - return byte_count >= large_object_threshold_ && c->IsPrimitiveArray(); + return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass()); } template <bool kGrow> diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 9a70d69d2e..57557e2fd7 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -21,6 +21,7 @@ #include <limits> #include <memory> +#include <unwind.h> // For GC verification. #include <vector> #include "art_field-inl.h" @@ -125,7 +126,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, - bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom, + bool verify_post_gc_rosalloc, bool gc_stress_mode, + bool use_homogeneous_space_compaction_for_oom, uint64_t min_interval_homogeneous_space_compaction_by_oom) : non_moving_space_(nullptr), rosalloc_space_(nullptr), @@ -170,6 +172,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc), verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc), verify_post_gc_rosalloc_(verify_post_gc_rosalloc), + gc_stress_mode_(gc_stress_mode), /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap * verification is enabled, we limit the size of allocation stacks to speed up their @@ -209,13 +212,17 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max blocking_gc_count_last_window_(0U), gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount), blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U, - kGcCountRateMaxBucketCount) { + kGcCountRateMaxBucketCount), + backtrace_lock_(nullptr), + seen_backtrace_count_(0u), + unique_backtrace_count_(0u) { if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() entering"; } + Runtime* const runtime = Runtime::Current(); // If we aren't the zygote, switch to the default non zygote allocator. This may update the // entrypoints. - const bool is_zygote = Runtime::Current()->IsZygote(); + const bool is_zygote = runtime->IsZygote(); if (!is_zygote) { // Background compaction is currently not supported for command line runs. if (background_collector_type_ != foreground_collector_type_) { @@ -507,8 +514,12 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max LOG(FATAL) << "There's a gap between the image space and the non-moving space"; } } - if (running_on_valgrind_) { - Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); + instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation(); + if (gc_stress_mode_) { + backtrace_lock_ = new Mutex("GC complete lock"); + } + if (running_on_valgrind_ || gc_stress_mode_) { + instrumentation->InstrumentQuickAllocEntryPoints(); } if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() exiting"; @@ -1072,6 +1083,12 @@ Heap::~Heap() { STLDeleteElements(&discontinuous_spaces_); delete gc_complete_lock_; delete pending_task_lock_; + delete backtrace_lock_; + if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) { + LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed() + << " total=" << seen_backtrace_count_.LoadRelaxed() + + unique_backtrace_count_.LoadRelaxed(); + } VLOG(heap) << "Finished ~Heap()"; } @@ -3675,5 +3692,73 @@ void Heap::ClearMarkedObjects() { } } +// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp. +class StackCrawlState { + public: + StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count) + : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) { + } + size_t GetFrameCount() const { + return frame_count_; + } + static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) { + auto* const state = reinterpret_cast<StackCrawlState*>(arg); + const uintptr_t ip = _Unwind_GetIP(context); + // The first stack frame is get_backtrace itself. Skip it. + if (ip != 0 && state->skip_count_ > 0) { + --state->skip_count_; + return _URC_NO_REASON; + } + // ip may be off for ARM but it shouldn't matter since we only use it for hashing. + state->frames_[state->frame_count_] = ip; + state->frame_count_++; + return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON; + } + + private: + uintptr_t* const frames_; + size_t frame_count_; + const size_t max_depth_; + size_t skip_count_; +}; + +static size_t get_backtrace(uintptr_t* frames, size_t max_depth) { + StackCrawlState state(frames, max_depth, 0u); + _Unwind_Backtrace(&StackCrawlState::Callback, &state); + return state.GetFrameCount(); +} + +void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) { + auto* const runtime = Runtime::Current(); + if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() && + !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) { + // Check if we should GC. + bool new_backtrace = false; + { + static constexpr size_t kMaxFrames = 16u; + uintptr_t backtrace[kMaxFrames]; + const size_t frames = get_backtrace(backtrace, kMaxFrames); + uint64_t hash = 0; + for (size_t i = 0; i < frames; ++i) { + hash = hash * 2654435761 + backtrace[i]; + hash += (hash >> 13) ^ (hash << 6); + } + MutexLock mu(self, *backtrace_lock_); + new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end(); + if (new_backtrace) { + seen_backtraces_.insert(hash); + } + } + if (new_backtrace) { + StackHandleScope<1> hs(self); + auto h = hs.NewHandleWrapper(obj); + CollectGarbage(false); + unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1); + } else { + seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1); + } + } +} + } // namespace gc } // namespace art diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index dac747b854..81476a4f8c 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -19,6 +19,7 @@ #include <iosfwd> #include <string> +#include <unordered_set> #include <vector> #include "allocator_type.h" @@ -180,7 +181,8 @@ class Heap { bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, - bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction, + bool verify_post_gc_rosalloc, bool gc_stress_mode, + bool use_homogeneous_space_compaction, uint64_t min_interval_homogeneous_space_compaction_by_oom); ~Heap(); @@ -887,6 +889,10 @@ class Heap { void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); + // GC stress mode attempts to do one GC per unique backtrace. + void CheckGcStressMode(Thread* self, mirror::Object** obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // All-known continuous spaces, where objects lie within fixed bounds. std::vector<space::ContinuousSpace*> continuous_spaces_; @@ -1042,6 +1048,7 @@ class Heap { bool verify_pre_gc_rosalloc_; bool verify_pre_sweeping_rosalloc_; bool verify_post_gc_rosalloc_; + const bool gc_stress_mode_; // RAII that temporarily disables the rosalloc verification during // the zygote fork. @@ -1192,6 +1199,14 @@ class Heap { // The histogram of the number of blocking GC invocations per window duration. Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_); + // GC stress related data structures. + Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + // Debugging variables, seen backtraces vs unique backtraces. + Atomic<uint64_t> seen_backtrace_count_; + Atomic<uint64_t> unique_backtrace_count_; + // Stack trace hashes that we already saw, + std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_); + friend class CollectorTransitionTask; friend class collector::GarbageCollector; friend class collector::MarkCompact; diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index 6e0e56e82a..a2a4f0df6f 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -1116,15 +1116,16 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) { size_t size_patch_offset = output_->Length(); __ AddU4(0x77777777); - // Write the instance data; fields for this class, followed by super class fields, - // and so on. Don't write the klass or monitor fields of Object.class. - mirror::Class* orig_klass = klass; + // What we will use for the string value if the object is a string. + mirror::Object* string_value = nullptr; + + // Write the instance data; fields for this class, followed by super class fields, and so on. do { - int ifieldCount = klass->NumInstanceFields(); - for (int i = 0; i < ifieldCount; ++i) { + const size_t instance_fields = klass->NumInstanceFields(); + for (size_t i = 0; i < instance_fields; ++i) { ArtField* f = klass->GetInstanceField(i); size_t size; - auto t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size); + HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size); switch (t) { case hprof_basic_byte: __ AddU1(f->GetByte(obj)); @@ -1149,34 +1150,35 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) { break; } } + // Add value field for String if necessary. + if (klass->IsStringClass()) { + mirror::String* s = obj->AsString(); + if (s->GetLength() == 0) { + // If string is empty, use an object-aligned address within the string for the value. + string_value = reinterpret_cast<mirror::Object*>( + reinterpret_cast<uintptr_t>(s) + kObjectAlignment); + } else { + string_value = reinterpret_cast<mirror::Object*>(s->GetValue()); + } + __ AddObjectId(string_value); + } klass = klass->GetSuperClass(); } while (klass != nullptr); + // Patch the instance field length. + __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4)); + // Output native value character array for strings. - if (orig_klass->IsStringClass()) { + CHECK_EQ(obj->IsString(), string_value != nullptr); + if (string_value != nullptr) { mirror::String* s = obj->AsString(); - mirror::Object* value; - if (s->GetLength() == 0) { - // If string is empty, use an object-aligned address within the string for the value. - value = reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(s) + kObjectAlignment); - } else { - value = reinterpret_cast<mirror::Object*>(s->GetValue()); - } - __ AddObjectId(value); - - // Patch the instance field length. - __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4)); - __ AddU1(HPROF_PRIMITIVE_ARRAY_DUMP); - __ AddObjectId(value); + __ AddObjectId(string_value); __ AddU4(StackTraceSerialNumber(obj)); __ AddU4(s->GetLength()); __ AddU1(hprof_basic_char); __ AddU2List(s->GetValue(), s->GetLength()); - } else { - // Patch the instance field length. - __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4)); } } diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 835b94ade4..0538f4baa4 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -757,7 +757,7 @@ inline bool Class::GetSlowPathEnabled() { } inline void Class::SetSlowPath(bool enabled) { - SetFieldBoolean<false>(GetSlowPathFlagOffset(), enabled); + SetFieldBoolean<false, false>(GetSlowPathFlagOffset(), enabled); } inline void Class::InitializeClassVisitor::operator()( diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index ba8a693bdb..0453906171 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -1030,10 +1030,14 @@ class MANAGED Class FINAL : public Object { } static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(!java_lang_Class_.IsNull()); + DCHECK(HasJavaLangClass()); return java_lang_Class_.Read(); } + static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return !java_lang_Class_.IsNull(); + } + // Can't call this SetClass or else gets called instead of Object::SetClass in places. static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ResetClass(); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 4a2a0c9111..6c55129847 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -144,7 +144,10 @@ struct TraceConfig { }; Runtime::Runtime() - : instruction_set_(kNone), + : resolution_method_(nullptr), + imt_conflict_method_(nullptr), + imt_unimplemented_method_(nullptr), + instruction_set_(kNone), compiler_callbacks_(nullptr), is_zygote_(false), must_relocate_(false), @@ -870,6 +873,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) xgc_option.verify_pre_gc_rosalloc_, xgc_option.verify_pre_sweeping_rosalloc_, xgc_option.verify_post_gc_rosalloc_, + xgc_option.gcstress_, runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM), runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs)); ATRACE_END(); diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h index 5971524d81..bd622f3709 100644 --- a/runtime/stride_iterator.h +++ b/runtime/stride_iterator.h @@ -22,7 +22,7 @@ namespace art { template<typename T> -class StrideIterator : public std::iterator<std::random_access_iterator_tag, T> { +class StrideIterator : public std::iterator<std::forward_iterator_tag, T> { public: StrideIterator(const StrideIterator&) = default; StrideIterator(StrideIterator&&) = default; @@ -62,7 +62,7 @@ class StrideIterator : public std::iterator<std::random_access_iterator_tag, T> private: uintptr_t ptr_; - const size_t stride_; + size_t stride_; }; } // namespace art diff --git a/test/514-shifts/expected.txt b/test/514-shifts/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/514-shifts/expected.txt diff --git a/test/514-shifts/info.txt b/test/514-shifts/info.txt new file mode 100644 index 0000000000..eb93c5f15b --- /dev/null +++ b/test/514-shifts/info.txt @@ -0,0 +1,2 @@ +Regression test for optimizing that used to miscompile +shifts on ARM. diff --git a/test/514-shifts/src/Main.java b/test/514-shifts/src/Main.java new file mode 100644 index 0000000000..6c44eaba26 --- /dev/null +++ b/test/514-shifts/src/Main.java @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Main { + public static void main(String[] args) { + testIntShiftRight(); + testIntShiftLeft(); + testIntUnsignedShiftRight(); + testLongShiftRight(); + testLongShiftLeft(); + testLongUnsignedShiftRight(); + } + + public static void testIntShiftLeft() { + int a = myField; + int b = myOtherField << a; + if (b != -2147483648) { + throw new Error("Expected -2147483648, got " + b); + } + if (a != 0xFFF) { + throw new Error("Expected 0xFFF, got " + a); + } + } + + public static void testIntShiftRight() { + int a = myField; + int b = myOtherField >> a; + if (b != 0) { + throw new Error("Expected 0, got " + b); + } + if (a != 0xFFF) { + throw new Error("Expected 0xFFF, got " + a); + } + } + + public static void testIntUnsignedShiftRight() { + int a = myField; + int b = myOtherField >>> a; + if (b != 0) { + throw new Error("Expected 0, got " + b); + } + if (a != 0xFFF) { + throw new Error("Expected 0xFFF, got " + a); + } + } + + public static void testLongShiftLeft() { + long a = myLongField; + long b = myOtherLongField << a; + if (b != 0x2468ACF13579BDEL) { + throw new Error("Expected 0x2468ACF13579BDEL, got " + b); + } + // The int conversion will be GVN'ed with the one required + // by Java specification of long shift left. + if ((int)a != 0x41) { + throw new Error("Expected 0x41, got " + a); + } + } + + public static void testLongShiftRight() { + long a = myLongField; + long b = myOtherLongField >> a; + if (b != 0x91A2B3C4D5E6F7L) { + throw new Error("Expected 0x91A2B3C4D5E6F7L, got " + b); + } + // The int conversion will be GVN'ed with the one required + // by Java specification of long shift right. + if ((int)a != 0x41) { + throw new Error("Expected 0x41, got " + a); + } + } + + public static void testLongUnsignedShiftRight() { + long a = myLongField; + long b = myOtherLongField >>> a; + if (b != 0x91A2B3C4D5E6F7L) { + throw new Error("Expected 0x91A2B3C4D5E6F7L, got " + b); + } + // The int conversion will be GVN'ed with the one required + // by Java specification of long shift right. + if ((int)a != 0x41) { + throw new Error("Expected 0x41, got " + a); + } + } + + static int myField = 0xFFF; + static int myOtherField = 0x1; + + // Use a value that will need to be masked before doing the shift. + // The maximum shift is 0x3F. + static long myLongField = 0x41; + static long myOtherLongField = 0x123456789abcdefL; +} |