diff options
| -rw-r--r-- | compiler/dex/mir_analysis.cc | 5 | ||||
| -rw-r--r-- | compiler/dex/mir_graph.cc | 17 | ||||
| -rw-r--r-- | compiler/dex/mir_graph.h | 6 | ||||
| -rw-r--r-- | runtime/dex_instruction.h | 5 | ||||
| -rw-r--r-- | runtime/entrypoints/quick/quick_trampoline_entrypoints.cc | 18 | ||||
| -rw-r--r-- | runtime/gc/heap-inl.h | 2 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 21 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 5 | ||||
| -rw-r--r-- | runtime/mirror/array-inl.h | 1 |
9 files changed, 62 insertions, 18 deletions
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc index 7ce8f696be..8ef80fa6bb 100644 --- a/compiler/dex/mir_analysis.cc +++ b/compiler/dex/mir_analysis.cc @@ -1004,6 +1004,11 @@ bool MIRGraph::SkipCompilation() { return false; } + // Contains a pattern we don't want to compile? + if (punt_to_interpreter_) { + return true; + } + if (compiler_filter == CompilerOptions::kInterpretOnly) { LOG(WARNING) << "InterpretOnly should ideally be filtered out prior to parsing."; return true; diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index e4550d1e60..2bfc15459a 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -86,7 +86,8 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) forward_branches_(0), compiler_temps_(arena, 6, kGrowableArrayMisc), num_non_special_compiler_temps_(0), - max_available_non_special_compiler_temps_(0) { + max_available_non_special_compiler_temps_(0), + punt_to_interpreter_(false) { try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg)) - std::abs(static_cast<int>(kVRegTempBaseReg)); @@ -610,6 +611,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ } int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode); + int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode); uint64_t df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode]; @@ -676,6 +678,19 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ } else if (flags & Instruction::kSwitch) { cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); } + if (verify_flags & Instruction::kVerifyVarArgRange) { + /* + * The Quick backend's runtime model includes a gap between a method's + * argument ("in") vregs and the rest of its vregs. Handling a range instruction + * which spans the gap is somewhat complicated, and should not happen + * in normal usage of dx. Punt to the interpreter. + */ + int first_reg_in_range = insn->dalvikInsn.vC; + int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1; + if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) { + punt_to_interpreter_ = true; + } + } current_offset_ += width; BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */ false, /* immed_pred_block_p */ NULL); diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index d344055656..28e94709e8 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -684,6 +684,11 @@ class MIRGraph { return opcode >= static_cast<int>(kMirOpFirst); } + // Is this vreg in the in set? + bool IsInVReg(int vreg) { + return (vreg >= cu_->num_regs); + } + void DumpCheckStats(); MIR* FindMoveResult(BasicBlock* bb, MIR* mir); int SRegToVReg(int ssa_reg) const; @@ -917,6 +922,7 @@ class MIRGraph { size_t num_non_special_compiler_temps_; size_t max_available_non_special_compiler_temps_; size_t max_available_special_compiler_temps_; + bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret. friend class LocalValueNumberingTest; }; diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h index c434cdd938..4352c4add0 100644 --- a/runtime/dex_instruction.h +++ b/runtime/dex_instruction.h @@ -422,6 +422,11 @@ class Instruction { return kInstructionFlags[opcode]; } + // Return the verify flags for the given opcode. + static int VerifyFlagsOf(Code opcode) { + return kInstructionVerifyFlags[opcode]; + } + // Returns true if this instruction is a branch. bool IsBranch() const { return (kInstructionFlags[Opcode()] & kBranch) != 0; diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 7cbeb2971a..5339b5ea1f 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -756,21 +756,25 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); visitor.VisitArguments(); thread->EndAssertNoThreadSuspension(old_cause); + bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; // Resolve method filling in dex cache. if (called->IsRuntimeMethod()) { + SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr); called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); + receiver = sirt_receiver.get(); } const void* code = NULL; if (LIKELY(!thread->IsExceptionPending())) { // Incompatible class change should have been handled in resolve method. CHECK(!called->CheckIncompatibleClassChange(invoke_type)); - // Refine called method based on receiver. - if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); - } else if (invoke_type == kInterface) { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); - } - if ((invoke_type == kVirtual) || (invoke_type == kInterface)) { + if (virtual_or_interface) { + // Refine called method based on receiver. + CHECK(receiver != nullptr) << invoke_type; + if (invoke_type == kVirtual) { + called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + } else { + called = receiver->GetClass()->FindVirtualMethodForInterface(called); + } // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index // of the sharpened method. if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) { diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index e089ef203f..89ded0b27f 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -256,7 +256,7 @@ inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) co // Zygote resulting in it being prematurely freed. // We can only do this for primitive objects since large objects will not be within the card table // range. This also means that we rely on SetClass not dirtying the object's card. - return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray(); + return byte_count >= large_object_threshold_ && c->IsPrimitiveArray(); } template <bool kGrow> diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 8d8cdd6047..2e6d2c29b6 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -98,6 +98,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max long_gc_log_threshold_(long_gc_log_threshold), ignore_max_footprint_(ignore_max_footprint), have_zygote_space_(false), + large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled. soft_reference_queue_(this), weak_reference_queue_(this), finalizer_reference_queue_(this), @@ -159,11 +160,16 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max } // If we aren't the zygote, switch to the default non zygote allocator. This may update the // entrypoints. - if (!Runtime::Current()->IsZygote() || !kMovingCollector) { + if (!Runtime::Current()->IsZygote()) { ChangeCollector(post_zygote_collector_type_); + large_object_threshold_ = kDefaultLargeObjectThreshold; } else { - // We are the zygote, use bump pointer allocation + semi space collector. - ChangeCollector(kCollectorTypeSS); + if (kMovingCollector) { + // We are the zygote, use bump pointer allocation + semi space collector. + ChangeCollector(kCollectorTypeSS); + } else { + ChangeCollector(post_zygote_collector_type_); + } } live_bitmap_.reset(new accounting::HeapBitmap(this)); @@ -1485,15 +1491,13 @@ void Heap::PreZygoteFork() { main_space_->SetFootprintLimit(main_space_->Capacity()); AddSpace(main_space_); have_zygote_space_ = true; + // Enable large object space allocations. + large_object_threshold_ = kDefaultLargeObjectThreshold; // Create the zygote space mod union table. accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space); CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; AddModUnionTable(mod_union_table); - // Reset the cumulative loggers since we now have a few additional timing phases. - for (const auto& collector : garbage_collectors_) { - collector->ResetCumulativeStatistics(); - } // Can't use RosAlloc for non moving space due to thread local buffers. // TODO: Non limited space for non-movable objects? MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release(); @@ -2049,7 +2053,8 @@ void Heap::ProcessCards(TimingLogger& timings) { TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings); // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards // were dirty before the GC started. - // TODO: Don't need to use atomic. + // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread) + // -> clean(cleaning thread). // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint // roots and then we scan / update mod union tables after. We will always scan either card. // If we end up with the non aged card, we scan it it in the pause. diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 5d3232fa3d..2f227d0d37 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -119,7 +119,7 @@ class Heap { // If true, measure the total allocation time. static constexpr bool kMeasureAllocationTime = false; // Primitive arrays larger than this size are put in the large object space. - static constexpr size_t kLargeObjectThreshold = 3 * kPageSize; + static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize; static constexpr size_t kDefaultInitialSize = 2 * MB; static constexpr size_t kDefaultMaximumSize = 32 * MB; @@ -743,6 +743,9 @@ class Heap { // If we have a zygote space. bool have_zygote_space_; + // Minimum allocation size of large object. + size_t large_object_threshold_; + // Guards access to the state of GC, associated conditional variable is used to signal when a GC // completes. Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 8158bc5f61..1d37775f93 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -141,6 +141,7 @@ inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_c allocator_type, visitor)); } if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) { + array_class = result->GetClass(); // In case the array class moved. CHECK_EQ(array_class->GetComponentSize(), component_size); if (!fill_usable) { CHECK_EQ(result->SizeOf(), size); |