diff options
25 files changed, 290 insertions, 199 deletions
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt index 0a349ac9c1..9e02f9f4e5 100644 --- a/build/apex/ld.config.txt +++ b/build/apex/ld.config.txt @@ -32,11 +32,17 @@ namespace.platform.link.default.shared_libs += libnativebridge.so namespace.platform.link.default.shared_libs += libnativehelper.so namespace.platform.link.default.shared_libs += libnativeloader.so +# Note that we don't need to link the default namespace with conscrypt: +# the runtime Java code and binaries do not explicitly load native libraries +# from it. + ############################################################################### # "conscrypt" APEX namespace # # This namespace is for libraries within the conscrypt APEX. ############################################################################### + +# Keep in sync with conscrypt namespace in /system/etc/ld.config.txt. namespace.conscrypt.isolated = true namespace.conscrypt.visible = true diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index 1725154735..478ecdf02d 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -427,6 +427,7 @@ struct XGcOption { gc::CollectorType collector_type_ = gc::kCollectorTypeDefault; bool verify_pre_gc_heap_ = false; bool verify_pre_sweeping_heap_ = kIsDebugBuild; + bool generational_cc = kEnableGenerationalCCByDefault; bool verify_post_gc_heap_ = false; bool verify_pre_gc_rosalloc_ = kIsDebugBuild; bool verify_pre_sweeping_rosalloc_ = false; @@ -455,6 +456,10 @@ struct CmdlineType<XGcOption> : CmdlineTypeParser<XGcOption> { xgc.verify_pre_sweeping_heap_ = true; } else if (gc_option == "nopresweepingverify") { xgc.verify_pre_sweeping_heap_ = false; + } else if (gc_option == "generational_cc") { + xgc.generational_cc = true; + } else if (gc_option == "nogenerational_cc") { + xgc.generational_cc = false; } else if (gc_option == "postverify") { xgc.verify_post_gc_heap_ = true; } else if (gc_option == "nopostverify") { diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 3b34e8d0f6..42dbc77087 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1460,22 +1460,27 @@ void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED const debug::MethodDebugInfo& info) { const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); DCHECK(compiler_options.GenerateAnyDebugInfo()); - - // If both flags are passed, generate full debug info. - const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo(); - - // Create entry for the single method that we just compiled. - std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT( - compiler_options.GetInstructionSet(), - compiler_options.GetInstructionSetFeatures(), - mini_debug_info, - info); - AddNativeDebugInfoForJit(Thread::Current(), - reinterpret_cast<const void*>(info.code_address), - elf_file, - debug::PackElfFileForJIT, - compiler_options.GetInstructionSet(), - compiler_options.GetInstructionSetFeatures()); + TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit)); + { + TimingLogger::ScopedTiming st("Generate JIT debug info", &logger); + + // If both flags are passed, generate full debug info. + const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo(); + + // Create entry for the single method that we just compiled. + std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT( + compiler_options.GetInstructionSet(), + compiler_options.GetInstructionSetFeatures(), + mini_debug_info, + info); + AddNativeDebugInfoForJit(Thread::Current(), + reinterpret_cast<const void*>(info.code_address), + elf_file, + debug::PackElfFileForJIT, + compiler_options.GetInstructionSet(), + compiler_options.GetInstructionSetFeatures()); + } + Runtime::Current()->GetJit()->AddTimingLogger(logger); } } // namespace art diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp index e9147380df..5aa8236dd8 100644 --- a/dexlayout/Android.bp +++ b/dexlayout/Android.bp @@ -205,7 +205,7 @@ art_cc_binary { target: { android: { shared_libs: [ - "libpagemap", + "libmeminfo", ], }, }, diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc index 7a849f2898..28d40486bc 100644 --- a/dexlayout/dexdiag.cc +++ b/dexlayout/dexdiag.cc @@ -34,13 +34,18 @@ #include "dex_ir.h" #include "dex_ir_builder.h" #ifdef ART_TARGET_ANDROID -#include "pagemap/pagemap.h" +#include <meminfo/pageacct.h> +#include <meminfo/procmeminfo.h> #endif #include "vdex_file.h" namespace art { using android::base::StringPrintf; +#ifdef ART_TARGET_ANDROID +using android::meminfo::ProcMemInfo; +using android::meminfo::Vma; +#endif static bool g_verbose = false; @@ -194,7 +199,7 @@ static uint16_t FindSectionTypeForPage(size_t page, return DexFile::kDexTypeHeaderItem; } -static void ProcessPageMap(uint64_t* pagemap, +static void ProcessPageMap(const std::vector<uint64_t>& pagemap, size_t start, size_t end, const std::vector<dex_ir::DexFileSection>& sections, @@ -202,7 +207,7 @@ static void ProcessPageMap(uint64_t* pagemap, static constexpr size_t kLineLength = 32; for (size_t page = start; page < end; ++page) { char type_char = '.'; - if (PM_PAGEMAP_PRESENT(pagemap[page])) { + if (::android::meminfo::page_present(pagemap[page])) { const size_t dex_page_offset = page - start; uint16_t type = FindSectionTypeForPage(dex_page_offset, sections); page_counts->Increment(type); @@ -265,7 +270,7 @@ static void DisplayDexStatistics(size_t start, printer->PrintSkipLine(); } -static void ProcessOneDexMapping(uint64_t* pagemap, +static void ProcessOneDexMapping(const std::vector<uint64_t>& pagemap, uint64_t map_start, const DexFile* dex_file, uint64_t vdex_start, @@ -316,8 +321,8 @@ static bool IsVdexFileMapping(const std::string& mapped_name) { return false; } -static bool DisplayMappingIfFromVdexFile(pm_map_t* map, Printer* printer) { - std::string vdex_name = pm_map_name(map); +static bool DisplayMappingIfFromVdexFile(ProcMemInfo& proc, const Vma& vma, Printer* printer) { + std::string vdex_name = vma.name; // Extract all the dex files from the vdex file. std::string error_msg; std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_name, @@ -344,34 +349,33 @@ static bool DisplayMappingIfFromVdexFile(pm_map_t* map, Printer* printer) { return false; } // Open the page mapping (one uint64_t per page) for the entire vdex mapping. - uint64_t* pagemap; - size_t len; - if (pm_map_pagemap(map, &pagemap, &len) != 0) { + std::vector<uint64_t> pagemap; + if (!proc.PageMap(vma, &pagemap)) { std::cerr << "Error creating pagemap." << std::endl; return false; } // Process the dex files. std::cout << "MAPPING " - << pm_map_name(map) - << StringPrintf(": %" PRIx64 "-%" PRIx64, pm_map_start(map), pm_map_end(map)) + << vma.name + << StringPrintf(": %" PRIx64 "-%" PRIx64, vma.start, vma.end) << std::endl; for (const auto& dex_file : dex_files) { ProcessOneDexMapping(pagemap, - pm_map_start(map), + vma.start, dex_file.get(), reinterpret_cast<uint64_t>(vdex->Begin()), printer); } - free(pagemap); return true; } -static void ProcessOneOatMapping(uint64_t* pagemap, size_t size, Printer* printer) { +static void ProcessOneOatMapping(const std::vector<uint64_t>& pagemap, + Printer* printer) { static constexpr size_t kLineLength = 32; size_t resident_page_count = 0; - for (size_t page = 0; page < size; ++page) { + for (size_t page = 0; page < pagemap.size(); ++page) { char type_char = '.'; - if (PM_PAGEMAP_PRESENT(pagemap[page])) { + if (::android::meminfo::page_present(pagemap[page])) { ++resident_page_count; type_char = '*'; } @@ -383,13 +387,13 @@ static void ProcessOneOatMapping(uint64_t* pagemap, size_t size, Printer* printe } } if (g_verbose) { - if (size % kLineLength != 0) { + if (pagemap.size() % kLineLength != 0) { std::cout << std::endl; } } - double percent_of_total = 100.0 * resident_page_count / size; + double percent_of_total = 100.0 * resident_page_count / pagemap.size(); printer->PrintHeader(); - printer->PrintOne("EXECUTABLE", resident_page_count, size, percent_of_total, percent_of_total); + printer->PrintOne("EXECUTABLE", resident_page_count, pagemap.size(), percent_of_total, percent_of_total); printer->PrintSkipLine(); } @@ -405,21 +409,19 @@ static bool IsOatFileMapping(const std::string& mapped_name) { return false; } -static bool DisplayMappingIfFromOatFile(pm_map_t* map, Printer* printer) { +static bool DisplayMappingIfFromOatFile(ProcMemInfo& proc, const Vma& vma, Printer* printer) { // Open the page mapping (one uint64_t per page) for the entire vdex mapping. - uint64_t* pagemap; - size_t len; - if (pm_map_pagemap(map, &pagemap, &len) != 0) { + std::vector<uint64_t> pagemap; + if (!proc.PageMap(vma, &pagemap) != 0) { std::cerr << "Error creating pagemap." << std::endl; return false; } // Process the dex files. std::cout << "MAPPING " - << pm_map_name(map) - << StringPrintf(": %" PRIx64 "-%" PRIx64, pm_map_start(map), pm_map_end(map)) + << vma.name + << StringPrintf(": %" PRIx64 "-%" PRIx64, vma.start, vma.end) << std::endl; - ProcessOneOatMapping(pagemap, len, printer); - free(pagemap); + ProcessOneOatMapping(pagemap, printer); return true; } @@ -488,27 +490,11 @@ static int DexDiagMain(int argc, char* argv[]) { return EXIT_FAILURE; } - // get libpagemap kernel information. - pm_kernel_t* ker; - if (pm_kernel_create(&ker) != 0) { - std::cerr << "Error creating kernel interface -- does this kernel have pagemap?" << std::endl; - return EXIT_FAILURE; - } - - // get libpagemap process information. - pm_process_t* proc; - if (pm_process_create(ker, pid, &proc) != 0) { - std::cerr << "Error creating process interface -- does process " - << pid - << " really exist?" - << std::endl; - return EXIT_FAILURE; - } - + // get libmeminfo process information. + ProcMemInfo proc(pid); // Get the set of mappings by the specified process. - pm_map_t** maps; - size_t num_maps; - if (pm_process_maps(proc, &maps, &num_maps) != 0) { + const std::vector<Vma>& maps = proc.Maps(); + if (maps.empty()) { std::cerr << "Error listing maps." << std::endl; return EXIT_FAILURE; } @@ -516,19 +502,19 @@ static int DexDiagMain(int argc, char* argv[]) { bool match_found = false; // Process the mappings that are due to vdex or oat files. Printer printer; - for (size_t i = 0; i < num_maps; ++i) { - std::string mapped_file_name = pm_map_name(maps[i]); + for (auto& vma : maps) { + std::string mapped_file_name = vma.name; // Filter by name contains options (if any). if (!FilterByNameContains(mapped_file_name, name_filters)) { continue; } if (IsVdexFileMapping(mapped_file_name)) { - if (!DisplayMappingIfFromVdexFile(maps[i], &printer)) { + if (!DisplayMappingIfFromVdexFile(proc, vma, &printer)) { return EXIT_FAILURE; } match_found = true; } else if (IsOatFileMapping(mapped_file_name)) { - if (!DisplayMappingIfFromOatFile(maps[i], &printer)) { + if (!DisplayMappingIfFromOatFile(proc, vma, &printer)) { return EXIT_FAILURE; } match_found = true; diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc index 9490798552..4953bab624 100644 --- a/libartbase/base/file_utils.cc +++ b/libartbase/base/file_utils.cc @@ -278,6 +278,17 @@ std::string ReplaceFileExtension(const std::string& filename, const std::string& } } +bool LocationIsOnRuntimeModule(const char* full_path) { + std::string error_msg; + const char* runtime_path = GetAndroidDirSafe("ANDROID_RUNTIME_ROOT", + "/apex/com.android.runtime", + &error_msg); + if (runtime_path == nullptr) { + return false; + } + return android::base::StartsWith(full_path, runtime_path); +} + bool LocationIsOnSystem(const char* path) { #ifdef _WIN32 UNUSED(path); diff --git a/libartbase/base/file_utils.h b/libartbase/base/file_utils.h index c249bccc3c..bddfaa1faf 100644 --- a/libartbase/base/file_utils.h +++ b/libartbase/base/file_utils.h @@ -72,6 +72,9 @@ std::string GetVdexFilename(const std::string& oat_filename); // ReplaceFileExtension("foo", "abc") == "foo.abc" std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension); +// Return whether the location is on apex/com.android.runtime +bool LocationIsOnRuntimeModule(const char* location); + // Return whether the location is on system (i.e. android root). bool LocationIsOnSystem(const char* location); diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc index a814b6640c..7e93639a4a 100644 --- a/libdexfile/dex/art_dex_file_loader.cc +++ b/libdexfile/dex/art_dex_file_loader.cc @@ -539,17 +539,17 @@ std::unique_ptr<DexFile> ArtDexFileLoader::OpenCommon(const uint8_t* base, error_msg, std::move(container), verify_result); - - // Check if this dex file is located in the framework directory. - // If it is, set a flag on the dex file. This is used by hidden API - // policy decision logic. - // Location can contain multidex suffix, so fetch its canonical version. Note - // that this will call `realpath`. - std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str()); - if (dex_file != nullptr && LocationIsOnSystemFramework(path.c_str())) { - dex_file->SetHiddenapiDomain(hiddenapi::Domain::kPlatform); + if (dex_file != nullptr) { + // Set hidden API domain based based on location. + // Location can contain multidex suffix, so fetch its canonical version. Note + // that this will call `realpath`. + std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str()); + if (LocationIsOnSystemFramework(path.c_str())) { + dex_file->SetHiddenapiDomain(hiddenapi::Domain::kPlatform); + } else if (LocationIsOnRuntimeModule(path.c_str())) { + dex_file->SetHiddenapiDomain(hiddenapi::Domain::kCorePlatform); + } } - return dex_file; } diff --git a/runtime/Android.bp b/runtime/Android.bp index b89eb02ff0..a3081e9122 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -396,8 +396,10 @@ libart_cc_defaults { "libnativeloader", "libbacktrace", "liblog", - // For atrace, properties, ashmem, set_sched_policy. + // For atrace, properties, ashmem. "libcutils", + // For set_sched_policy. + "libprocessgroup", // For common macros. "libbase", ], diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index 1014c0e15c..2de79107f4 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -36,8 +36,7 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( Thread* const self, mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) { - if (kEnableGenerationalConcurrentCopyingCollection - && !done_scanning_.load(std::memory_order_acquire)) { + if (use_generational_cc_ && !done_scanning_.load(std::memory_order_acquire)) { // Everything in the unevac space should be marked for young generation CC, // except for large objects. DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) @@ -130,7 +129,7 @@ inline mirror::Object* ConcurrentCopying::Mark(Thread* const self, mirror::Object* holder, MemberOffset offset) { // Cannot have `kNoUnEvac` when Generational CC collection is disabled. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac); + DCHECK(!kNoUnEvac || use_generational_cc_); if (from_ref == nullptr) { return nullptr; } @@ -172,9 +171,7 @@ inline mirror::Object* ConcurrentCopying::Mark(Thread* const self, return to_ref; } case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: - if (kEnableGenerationalConcurrentCopyingCollection - && kNoUnEvac - && !region_space_->IsLargeObject(from_ref)) { + if (kNoUnEvac && use_generational_cc_ && !region_space_->IsLargeObject(from_ref)) { if (!kFromGCThread) { DCHECK(IsMarkedInUnevacFromSpace(from_ref)) << "Returning unmarked object to mutator"; } @@ -245,8 +242,7 @@ inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_re DCHECK(region_space_->IsInUnevacFromSpace(from_ref)); if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) { return true; - } else if (!kEnableGenerationalConcurrentCopyingCollection - || done_scanning_.load(std::memory_order_acquire)) { + } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) { // If the card table scanning is not finished yet, then only read-barrier // state should be checked. Checking the mark bitmap is unreliable as there // may be some objects - whose corresponding card is dirty - which are diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 8f7b76a0c2..642b12e9b7 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -69,15 +69,19 @@ static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild; ConcurrentCopying::ConcurrentCopying(Heap* heap, bool young_gen, + bool use_generational_cc, const std::string& name_prefix, bool measure_read_barrier_slow_path) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "concurrent copying"), - region_space_(nullptr), gc_barrier_(new Barrier(0)), + region_space_(nullptr), + gc_barrier_(new Barrier(0)), gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", kDefaultGcMarkStackSize, kDefaultGcMarkStackSize)), + use_generational_cc_(use_generational_cc), + young_gen_(young_gen), rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack", kReadBarrierMarkStackSize, kReadBarrierMarkStackSize)), @@ -100,7 +104,6 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, region_space_inter_region_bitmap_(nullptr), non_moving_space_inter_region_bitmap_(nullptr), reclaimed_bytes_ratio_sum_(0.f), - young_gen_(young_gen), skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), measure_read_barrier_slow_path_(measure_read_barrier_slow_path), mark_from_read_barrier_measurements_(false), @@ -119,7 +122,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, num_bytes_allocated_before_gc_(0) { static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, "The region space size and the read barrier table region size must match"); - CHECK(kEnableGenerationalConcurrentCopyingCollection || !young_gen_); + CHECK(use_generational_cc_ || !young_gen_); Thread* self = Thread::Current(); { ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); @@ -138,7 +141,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, pooled_mark_stacks_.push_back(mark_stack); } } - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { // Allocate sweep array free buffer. std::string error_msg; sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous( @@ -194,7 +197,7 @@ void ConcurrentCopying::RunPhases() { InitializePhase(); // In case of forced evacuation, all regions are evacuated and hence no // need to compute live_bytes. - if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) { + if (use_generational_cc_ && !young_gen_ && !force_evacuate_all_) { MarkingPhase(); } } @@ -290,7 +293,7 @@ void ConcurrentCopying::ActivateReadBarrierEntrypoints() { } void ConcurrentCopying::CreateInterRegionRefBitmaps() { - DCHECK(kEnableGenerationalConcurrentCopyingCollection); + DCHECK(use_generational_cc_); DCHECK(region_space_inter_region_bitmap_ == nullptr); DCHECK(non_moving_space_inter_region_bitmap_ == nullptr); DCHECK(region_space_ != nullptr); @@ -325,7 +328,7 @@ void ConcurrentCopying::BindBitmaps() { CHECK(!space->IsZygoteSpace()); CHECK(!space->IsImageSpace()); CHECK(space == region_space_ || space == heap_->non_moving_space_); - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { if (space == region_space_) { region_space_bitmap_ = region_space_->GetMarkBitmap(); } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) { @@ -358,7 +361,7 @@ void ConcurrentCopying::BindBitmaps() { } } } - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (use_generational_cc_ && young_gen_) { for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) { CHECK(space->IsLargeObjectSpace()); space->AsLargeObjectSpace()->CopyLiveToMarked(); @@ -391,7 +394,7 @@ void ConcurrentCopying::InitializePhase() { GcCause gc_cause = GetCurrentIteration()->GetGcCause(); force_evacuate_all_ = false; - if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) { + if (!use_generational_cc_ || !young_gen_) { if (gc_cause == kGcCauseExplicit || gc_cause == kGcCauseCollectorTransition || GetCurrentIteration()->GetClearSoftReferences()) { @@ -407,7 +410,7 @@ void ConcurrentCopying::InitializePhase() { DCHECK(immune_gray_stack_.empty()); } } - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { done_scanning_.store(false, std::memory_order_release); } BindBitmaps(); @@ -421,7 +424,7 @@ void ConcurrentCopying::InitializePhase() { } LOG(INFO) << "GC end of InitializePhase"; } - if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) { + if (use_generational_cc_ && !young_gen_) { region_space_bitmap_->Clear(); } mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed); @@ -533,7 +536,7 @@ class ConcurrentCopying::FlipCallback : public Closure { cc->region_space_->SetFromSpace( cc->rb_table_, evac_mode, - /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection); + /*clear_live_bytes=*/ !cc->use_generational_cc_); } cc->SwapStacks(); if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { @@ -542,7 +545,7 @@ class ConcurrentCopying::FlipCallback : public Closure { cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); } cc->is_marking_ = true; - if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) { + if (kIsDebugBuild && !cc->use_generational_cc_) { cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared(); } if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { @@ -866,7 +869,7 @@ inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) { DCHECK(obj != nullptr); DCHECK(immune_spaces_.ContainsObject(obj)); // Update the fields without graying it or pushing it onto the mark stack. - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (use_generational_cc_ && young_gen_) { // Young GC does not care about references to unevac space. It is safe to not gray these as // long as scan immune objects happens after scanning the dirty cards. Scan<true>(obj); @@ -1394,7 +1397,7 @@ void ConcurrentCopying::CopyingPhase() { if (kUseBakerReadBarrier) { gc_grays_immune_objects_ = false; } - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { if (kVerboseMode) { LOG(INFO) << "GC ScanCardsForSpace"; } @@ -2152,7 +2155,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) { // It may be already marked if we accidentally pushed the same object twice due to the racy // bitmap read in MarkUnevacFromSpaceRegion. - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (use_generational_cc_ && young_gen_) { CHECK(region_space_->IsLargeObject(to_ref)); region_space_->ZeroLiveBytesForLargeObject(to_ref); } @@ -2169,7 +2172,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { } break; case space::RegionSpace::RegionType::kRegionTypeToSpace: - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { // Copied to to-space, set the bit so that the next GC can scan objects. region_space_bitmap_->Set(to_ref); } @@ -2214,7 +2217,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { } } if (perform_scan) { - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (use_generational_cc_ && young_gen_) { Scan<true>(to_ref); } else { Scan<false>(to_ref); @@ -2373,7 +2376,7 @@ void ConcurrentCopying::SweepSystemWeaks(Thread* self) { } void ConcurrentCopying::Sweep(bool swap_bitmaps) { - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (use_generational_cc_ && young_gen_) { // Only sweep objects on the live stack. SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false); } else { @@ -2407,7 +2410,7 @@ void ConcurrentCopying::Sweep(bool swap_bitmaps) { // Copied and adapted from MarkSweep::SweepArray. void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { // This method is only used when Generational CC collection is enabled. - DCHECK(kEnableGenerationalConcurrentCopyingCollection); + DCHECK(use_generational_cc_); CheckEmptyMarkStack(); TimingLogger::ScopedTiming t("SweepArray", GetTimings()); Thread* self = Thread::Current(); @@ -2891,8 +2894,7 @@ bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) { DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref; if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) { return true; - } else if (!kEnableGenerationalConcurrentCopyingCollection - || done_scanning_.load(std::memory_order_acquire)) { + } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) { // Read the comment in IsMarkedInUnevacFromSpace() accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap(); accounting::LargeObjectBitmap* los_bitmap = nullptr; @@ -2954,7 +2956,7 @@ class ConcurrentCopying::RefFieldsVisitor { explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread) : collector_(collector), thread_(thread) { // Cannot have `kNoUnEvac` when Generational CC collection is disabled. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac); + DCHECK(!kNoUnEvac || collector_->use_generational_cc_); } void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) @@ -2991,7 +2993,7 @@ class ConcurrentCopying::RefFieldsVisitor { template <bool kNoUnEvac> inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { // Cannot have `kNoUnEvac` when Generational CC collection is disabled. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac); + DCHECK(!kNoUnEvac || use_generational_cc_); if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) { // Avoid all read barriers during visit references to help performance. // Don't do this in transaction mode because we may read the old value of an field which may @@ -3012,7 +3014,7 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { template <bool kNoUnEvac> inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { // Cannot have `kNoUnEvac` when Generational CC collection is disabled. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac); + DCHECK(!kNoUnEvac || use_generational_cc_); DCHECK_EQ(Thread::Current(), thread_running_gc_); mirror::Object* ref = obj->GetFieldObject< mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); @@ -3386,7 +3388,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self, } else { DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); - if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) { + if (!use_generational_cc_ || !young_gen_) { // Mark it in the live bitmap. CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref)); } @@ -3482,7 +3484,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self, los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap(); DCHECK(los_bitmap->HasAddress(ref)); } - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { // The sticky-bit CC collector is only compatible with Baker-style read barriers. DCHECK(kUseBakerReadBarrier); // Not done scanning, use AtomicSetReadBarrierPointer. @@ -3551,11 +3553,11 @@ void ConcurrentCopying::FinishPhase() { } // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false // positives. - if (!kEnableGenerationalConcurrentCopyingCollection && !kVerifyNoMissingCardMarks) { + if (!kVerifyNoMissingCardMarks && !use_generational_cc_) { TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings()); // We do not currently use the region space cards at all, madvise them away to save ram. heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit()); - } else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) { + } else if (use_generational_cc_ && !young_gen_) { region_space_inter_region_bitmap_->Clear(); non_moving_space_inter_region_bitmap_->Clear(); } diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index a41c17a748..124713c17c 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -65,10 +65,11 @@ class ConcurrentCopying : public GarbageCollector { // pages. static constexpr bool kGrayDirtyImmuneObjects = true; - explicit ConcurrentCopying(Heap* heap, - bool young_gen, - const std::string& name_prefix = "", - bool measure_read_barrier_slow_path = false); + ConcurrentCopying(Heap* heap, + bool young_gen, + bool use_generational_cc, + const std::string& name_prefix = "", + bool measure_read_barrier_slow_path = false); ~ConcurrentCopying(); void RunPhases() override @@ -90,7 +91,7 @@ class ConcurrentCopying : public GarbageCollector { void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); GcType GetGcType() const override { - return (kEnableGenerationalConcurrentCopyingCollection && young_gen_) + return (use_generational_cc_ && young_gen_) ? kGcTypeSticky : kGcTypePartial; } @@ -323,6 +324,19 @@ class ConcurrentCopying : public GarbageCollector { std::unique_ptr<Barrier> gc_barrier_; std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; + // If true, enable generational collection when using the Concurrent Copying + // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC + // for major collections. Generational CC collection is currently only + // compatible with Baker read barriers. Set in Heap constructor. + const bool use_generational_cc_; + + // Generational "sticky", only trace through dirty objects in region space. + const bool young_gen_; + + // If true, the GC thread is done scanning marked objects on dirty and aged + // card (see ConcurrentCopying::CopyingPhase). + Atomic<bool> done_scanning_; + // The read-barrier mark-bit stack. Stores object references whose // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier, // so that this bit can be reset at the end of the collection in @@ -400,12 +414,6 @@ class ConcurrentCopying : public GarbageCollector { // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle float reclaimed_bytes_ratio_sum_; - // Generational "sticky", only trace through dirty objects in region space. - const bool young_gen_; - // If true, the GC thread is done scanning marked objects on dirty and aged - // card (see ConcurrentCopying::CopyingPhase). - Atomic<bool> done_scanning_; - // The skipped blocks are memory blocks/chucks that were copies of // objects that were unused due to lost races (cas failures) at // object copy/forward pointer install. They are reused. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index d699da0d16..5f62d758c6 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -107,8 +107,9 @@ static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB; // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator // threads (lower pauses, use less memory bandwidth). -static constexpr double kStickyGcThroughputAdjustment = - kEnableGenerationalConcurrentCopyingCollection ? 0.5 : 1.0; +static double GetStickyGcThroughputAdjustment(bool use_generational_cc) { + return use_generational_cc ? 0.5 : 1.0; +} // Whether or not we compact the zygote in PreZygoteFork. static constexpr bool kCompactZygote = kMovingCollector; // How many reserve entries are at the end of the allocation stack, these are only needed if the @@ -201,6 +202,7 @@ Heap::Heap(size_t initial_size, bool gc_stress_mode, bool measure_gc_performance, bool use_homogeneous_space_compaction_for_oom, + bool use_generational_cc, uint64_t min_interval_homogeneous_space_compaction_by_oom, bool dump_region_info_before_gc, bool dump_region_info_after_gc) @@ -288,6 +290,7 @@ Heap::Heap(size_t initial_size, pending_collector_transition_(nullptr), pending_heap_trim_(nullptr), use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom), + use_generational_cc_(use_generational_cc), running_collection_is_blocking_(false), blocking_gc_count_(0U), blocking_gc_time_(0U), @@ -494,7 +497,8 @@ Heap::Heap(size_t initial_size, MemMap region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin); CHECK(region_space_mem_map.IsValid()) << "No region space mem map"; - region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map)); + region_space_ = space::RegionSpace::Create( + kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_); AddSpace(region_space_); } else if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) { @@ -652,26 +656,28 @@ Heap::Heap(size_t initial_size, if (MayUseCollector(kCollectorTypeCC)) { concurrent_copying_collector_ = new collector::ConcurrentCopying(this, /*young_gen=*/false, + use_generational_cc_, "", measure_gc_performance); - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { young_concurrent_copying_collector_ = new collector::ConcurrentCopying( this, /*young_gen=*/true, + use_generational_cc_, "young", measure_gc_performance); } active_concurrent_copying_collector_ = concurrent_copying_collector_; DCHECK(region_space_ != nullptr); concurrent_copying_collector_->SetRegionSpace(region_space_); - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { young_concurrent_copying_collector_->SetRegionSpace(region_space_); // At this point, non-moving space should be created. DCHECK(non_moving_space_ != nullptr); concurrent_copying_collector_->CreateInterRegionRefBitmaps(); } garbage_collectors_.push_back(concurrent_copying_collector_); - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { garbage_collectors_.push_back(young_concurrent_copying_collector_); } } @@ -2262,7 +2268,7 @@ void Heap::ChangeCollector(CollectorType collector_type) { gc_plan_.clear(); switch (collector_type_) { case kCollectorTypeCC: { - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { gc_plan_.push_back(collector::kGcTypeSticky); } gc_plan_.push_back(collector::kGcTypeFull); @@ -2739,7 +2745,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, collector = semi_space_collector_; break; case kCollectorTypeCC: - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { // TODO: Other threads must do the flip checkpoint before they start poking at // active_concurrent_copying_collector_. So we should not concurrency here. active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ? @@ -3637,19 +3643,21 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, collector::GcType non_sticky_gc_type = NonStickyGcType(); // Find what the next non sticky collector will be. collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type); - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { if (non_sticky_collector == nullptr) { non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial); } CHECK(non_sticky_collector != nullptr); } + double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_); + // If the throughput of the current sticky GC >= throughput of the non sticky collector, then // do another sticky collection next. // We also check that the bytes allocated aren't over the footprint limit in order to prevent a // pathological case where dead objects which aren't reclaimed by sticky could get accumulated // if the sticky GC throughput always remained >= the full/partial throughput. size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); - if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >= + if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >= non_sticky_collector->GetEstimatedMeanThroughput() && non_sticky_collector->NumberOfIterations() > 0 && bytes_allocated <= target_footprint) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 52c9386309..4c5d896c9e 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -212,6 +212,7 @@ class Heap { bool gc_stress_mode, bool measure_gc_performance, bool use_homogeneous_space_compaction, + bool use_generational_cc, uint64_t min_interval_homogeneous_space_compaction_by_oom, bool dump_region_info_before_gc, bool dump_region_info_after_gc); @@ -532,6 +533,10 @@ class Heap { return num_bytes_allocated_.load(std::memory_order_relaxed); } + bool GetUseGenerationalCC() const { + return use_generational_cc_; + } + // Returns the number of objects currently allocated. size_t GetObjectsAllocated() const REQUIRES(!Locks::heap_bitmap_lock_); @@ -768,7 +773,7 @@ class Heap { // Returns the active concurrent copying collector. collector::ConcurrentCopying* ConcurrentCopyingCollector() { - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) || (active_concurrent_copying_collector_ == young_concurrent_copying_collector_)); } else { @@ -1477,6 +1482,11 @@ class Heap { // Whether or not we use homogeneous space compaction to avoid OOM errors. bool use_homogeneous_space_compaction_for_oom_; + // If true, enable generational collection when using the Concurrent Copying + // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC + // for major collections. Set in Heap constructor. + const bool use_generational_cc_; + // True if the currently running collection has made some thread wait. bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_); // The number of blocking GC runs. diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index a5ba1dcd37..5179702916 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -93,11 +93,12 @@ MemMap RegionSpace::CreateMemMap(const std::string& name, return mem_map; } -RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) { - return new RegionSpace(name, std::move(mem_map)); +RegionSpace* RegionSpace::Create( + const std::string& name, MemMap&& mem_map, bool use_generational_cc) { + return new RegionSpace(name, std::move(mem_map), use_generational_cc); } -RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map) +RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc) : ContinuousMemMapAllocSpace(name, std::move(mem_map), mem_map.Begin(), @@ -105,6 +106,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map) mem_map.End(), kGcRetentionPolicyAlwaysCollect), region_lock_("Region lock", kRegionSpaceRegionLock), + use_generational_cc_(use_generational_cc), time_(1U), num_regions_(mem_map_.Size() / kRegionSize), num_non_free_regions_(0U), @@ -179,9 +181,44 @@ size_t RegionSpace::ToSpaceSize() { return num_regions * kRegionSize; } +void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) { + // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections. + DCHECK(GetUseGenerationalCC() || clear_live_bytes); + DCHECK(!IsFree() && IsInToSpace()); + type_ = RegionType::kRegionTypeUnevacFromSpace; + if (IsNewlyAllocated()) { + // A newly allocated region set as unevac from-space must be + // a large or large tail region. + DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_); + // Always clear the live bytes of a newly allocated (large or + // large tail) region. + clear_live_bytes = true; + // Clear the "newly allocated" status here, as we do not want the + // GC to see it when encountering (and processing) references in the + // from-space. + // + // Invariant: There should be no newly-allocated region in the + // from-space (when the from-space exists, which is between the calls + // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace). + is_newly_allocated_ = false; + } + if (clear_live_bytes) { + // Reset the live bytes, as we have made a non-evacuation + // decision (possibly based on the percentage of live bytes). + live_bytes_ = 0; + } +} + +bool RegionSpace::Region::GetUseGenerationalCC() { + // We are retrieving the info from Heap, instead of the cached version in + // RegionSpace, because accessing the Heap from a Region object is easier + // than accessing the RegionSpace. + return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC(); +} + inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) { // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || (evac_mode != kEvacModeNewlyAllocated)); + DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated)); DCHECK((IsAllocated() || IsLarge()) && IsInToSpace()); // The region should be evacuated if: // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or @@ -253,7 +290,7 @@ inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) { void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) { // This method is only used when Generational CC collection is enabled. - DCHECK(kEnableGenerationalConcurrentCopyingCollection); + DCHECK(use_generational_cc_); // This code uses a logic similar to the one used in RegionSpace::FreeLarge // to traverse the regions supporting `obj`. @@ -292,7 +329,7 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, EvacMode evac_mode, bool clear_live_bytes) { // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes); + DCHECK(use_generational_cc_ || clear_live_bytes); ++time_; if (kUseTableLookupReadBarrier) { DCHECK(rb_table->IsAllCleared()); @@ -336,9 +373,7 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, // mark-bit otherwise the live_bytes will not be updated in // ConcurrentCopying::ProcessMarkStackRef() and hence will break the // logic. - if (kEnableGenerationalConcurrentCopyingCollection - && !should_evacuate - && is_newly_allocated) { + if (use_generational_cc_ && !should_evacuate && is_newly_allocated) { GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin())); } num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1; @@ -506,7 +541,7 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, // bitmap. But they cannot do so before we know the next GC cycle will // be a major one, so this operation happens at the beginning of such a // major collection, before marking starts. - if (!kEnableGenerationalConcurrentCopyingCollection) { + if (!use_generational_cc_) { GetLiveBitmap()->ClearRange( reinterpret_cast<mirror::Object*>(r->Begin()), reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize)); @@ -520,8 +555,7 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, // `r` when it has an undefined live bytes count (i.e. when // `r->LiveBytes() == static_cast<size_t>(-1)`) with // Generational CC. - if (!kEnableGenerationalConcurrentCopyingCollection || - (r->LiveBytes() != static_cast<size_t>(-1))) { + if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) { // Only some allocated bytes are live in this unevac region. // This should only happen for an allocated non-large region. DCHECK(r->IsAllocated()) << r->State(); @@ -918,7 +952,7 @@ RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) { Region* r = ®ions_[region_index]; if (r->IsFree()) { r->Unfree(this, time_); - if (kEnableGenerationalConcurrentCopyingCollection) { + if (use_generational_cc_) { // TODO: Add an explanation for this assertion. DCHECK(!for_evac || !r->is_newly_allocated_); } diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index a6f501becb..d8b54e26ed 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -59,7 +59,7 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { // guaranteed to be granted, if it is required, the caller should call Begin on the returned // space to confirm the request was granted. static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin); - static RegionSpace* Create(const std::string& name, MemMap&& mem_map); + static RegionSpace* Create(const std::string& name, MemMap&& mem_map, bool use_generational_cc); // Allocate `num_bytes`, returns null if the space is full. mirror::Object* Alloc(Thread* self, @@ -368,7 +368,7 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { } private: - RegionSpace(const std::string& name, MemMap&& mem_map); + RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc); class Region { public: @@ -523,33 +523,7 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { // collection, RegionSpace::ClearFromSpace will preserve the space // used by this region, and tag it as to-space (see // Region::SetUnevacFromSpaceAsToSpace below). - void SetAsUnevacFromSpace(bool clear_live_bytes) { - // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections. - DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes); - DCHECK(!IsFree() && IsInToSpace()); - type_ = RegionType::kRegionTypeUnevacFromSpace; - if (IsNewlyAllocated()) { - // A newly allocated region set as unevac from-space must be - // a large or large tail region. - DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_); - // Always clear the live bytes of a newly allocated (large or - // large tail) region. - clear_live_bytes = true; - // Clear the "newly allocated" status here, as we do not want the - // GC to see it when encountering (and processing) references in the - // from-space. - // - // Invariant: There should be no newly-allocated region in the - // from-space (when the from-space exists, which is between the calls - // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace). - is_newly_allocated_ = false; - } - if (clear_live_bytes) { - // Reset the live bytes, as we have made a non-evacuation - // decision (possibly based on the percentage of live bytes). - live_bytes_ = 0; - } - } + void SetAsUnevacFromSpace(bool clear_live_bytes); // Set this region as to-space. Used by RegionSpace::ClearFromSpace. // This is only valid if it is currently an unevac from-space region. @@ -562,7 +536,7 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode); void AddLiveBytes(size_t live_bytes) { - DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace()); + DCHECK(GetUseGenerationalCC() || IsInUnevacFromSpace()); DCHECK(!IsLargeTail()); DCHECK_NE(live_bytes_, static_cast<size_t>(-1)); // For large allocations, we always consider all bytes in the regions live. @@ -616,6 +590,8 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { uint64_t GetLongestConsecutiveFreeBytes() const; private: + static bool GetUseGenerationalCC(); + size_t idx_; // The region's index in the region space. size_t live_bytes_; // The live bytes. Used to compute the live percent. uint8_t* begin_; // The begin address of the region. @@ -738,6 +714,8 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + // Cached version of Heap::use_generational_cc_. + const bool use_generational_cc_; uint32_t time_; // The time as the number of collections since the startup. size_t num_regions_; // The number of regions in this space. // The number of non-free regions in this space. diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h index c73a7101da..8bd59ea90a 100644 --- a/runtime/hidden_api.h +++ b/runtime/hidden_api.h @@ -104,8 +104,8 @@ class AccessContext { Domain dex_domain = dex_file->GetHiddenapiDomain(); if (class_loader.IsNull() && dex_domain == Domain::kApplication) { - LOG(WARNING) << "DexFile " << dex_file->GetLocation() << " is in boot classpath " - << "but is assigned untrusted domain"; + // LOG(WARNING) << "DexFile " << dex_file->GetLocation() << " is in boot classpath " + // << "but is assigned untrusted domain"; dex_domain = Domain::kPlatform; } return dex_domain; @@ -415,7 +415,7 @@ inline bool ShouldDenyAccessToMember(T* member, } // Access checks are not disabled, report the violation. - detail::MaybeReportCorePlatformApiViolation(member, caller_context, access_method); + // detail::MaybeReportCorePlatformApiViolation(member, caller_context, access_method); // Deny access if the policy is enabled. return policy == EnforcementPolicy::kEnabled; diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 4a042598ba..6fd691f556 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -703,6 +703,7 @@ void ParsedOptions::Usage(const char* fmt, ...) { UsageMessage(stream, " -Xgc:[no]postsweepingverify_rosalloc\n"); UsageMessage(stream, " -Xgc:[no]postverify_rosalloc\n"); UsageMessage(stream, " -Xgc:[no]presweepingverify\n"); + UsageMessage(stream, " -Xgc:[no]generational_cc\n"); UsageMessage(stream, " -Ximage:filename\n"); UsageMessage(stream, " -Xbootclasspath-locations:bootclasspath\n" " (override the dex locations of the -Xbootclasspath files)\n"); diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc index cbb7b825d3..77d2316502 100644 --- a/runtime/parsed_options_test.cc +++ b/runtime/parsed_options_test.cc @@ -130,6 +130,23 @@ TEST_F(ParsedOptionsTest, ParsedOptionsGc) { EXPECT_EQ(gc::kCollectorTypeSS, xgc.collector_type_); } +TEST_F(ParsedOptionsTest, ParsedOptionsGenerationalCC) { + RuntimeOptions options; + options.push_back(std::make_pair("-Xgc:generational_cc", nullptr)); + + RuntimeArgumentMap map; + bool parsed = ParsedOptions::Parse(options, false, &map); + ASSERT_TRUE(parsed); + ASSERT_NE(0u, map.Size()); + + using Opt = RuntimeArgumentMap; + + EXPECT_TRUE(map.Exists(Opt::GcOption)); + + XGcOption xgc = map.GetOrDefault(Opt::GcOption); + ASSERT_TRUE(xgc.generational_cc); +} + TEST_F(ParsedOptionsTest, ParsedOptionsInstructionSet) { using Opt = RuntimeArgumentMap; diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 4853187e5c..a86bc9438e 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1240,6 +1240,10 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { kExtraDefaultHeapGrowthMultiplier; } XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption); + + // Generational CC collection is currently only compatible with Baker read barriers. + bool use_generational_cc = kUseBakerReadBarrier && xgc_option.generational_cc; + heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize), runtime_options.GetOrDefault(Opt::HeapGrowthLimit), runtime_options.GetOrDefault(Opt::HeapMinFree), @@ -1274,6 +1278,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { xgc_option.gcstress_, xgc_option.measure_, runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM), + use_generational_cc, runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs), runtime_options.Exists(Opt::DumpRegionInfoBeforeGC), runtime_options.Exists(Opt::DumpRegionInfoAfterGC)); diff --git a/runtime/runtime_globals.h b/runtime/runtime_globals.h index 793291a008..81d350b24e 100644 --- a/runtime/runtime_globals.h +++ b/runtime/runtime_globals.h @@ -40,16 +40,24 @@ static constexpr bool kMovingCollector = true; static constexpr bool kMarkCompactSupport = false && kMovingCollector; // True if we allow moving classes. static constexpr bool kMovingClasses = !kMarkCompactSupport; -// If true, enable generational collection when using the Concurrent Copying -// (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC -// for major collections. +// When using the Concurrent Copying (CC) collector, if +// `ART_USE_GENERATIONAL_CC` is true, enable generational collection by default, +// i.e. use sticky-bit CC for minor collections and (full) CC for major +// collections. +// This default value can be overridden with the runtime option +// `-Xgc:[no]generational_cc`. // -// Generational CC collection is currently only compatible with Baker read -// barriers. -#if defined(ART_USE_GENERATIONAL_CC) && defined(ART_READ_BARRIER_TYPE_IS_BAKER) -static constexpr bool kEnableGenerationalConcurrentCopyingCollection = true; +// TODO(b/67628039): Consider either: +// - renaming this to a better descriptive name (e.g. +// `ART_USE_GENERATIONAL_CC_BY_DEFAULT`); or +// - removing `ART_USE_GENERATIONAL_CC` and having a fixed default value. +// Any of these changes will require adjusting users of this preprocessor +// directive and the corresponding build system environment variable (e.g. in +// ART's continuous testing). +#ifdef ART_USE_GENERATIONAL_CC +static constexpr bool kEnableGenerationalCCByDefault = true; #else -static constexpr bool kEnableGenerationalConcurrentCopyingCollection = false; +static constexpr bool kEnableGenerationalCCByDefault = false; #endif // If true, enable the tlab allocator by default. diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc index 8ff6c529b0..24864f9542 100644 --- a/runtime/thread_android.cc +++ b/runtime/thread_android.cc @@ -21,7 +21,7 @@ #include <sys/resource.h> #include <sys/time.h> -#include <cutils/sched_policy.h> +#include <processgroup/sched_policy.h> #include <utils/threads.h> #include "base/macros.h" diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc index 8dfb402395..3dc27892aa 100644 --- a/test/674-hiddenapi/hiddenapi.cc +++ b/test/674-hiddenapi/hiddenapi.cc @@ -82,6 +82,14 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_appendToBootClassLoader( return int_index; } +extern "C" JNIEXPORT void JNICALL Java_Main_setWhitelistAll(JNIEnv*, jclass, jboolean value) { + std::vector<std::string> exemptions; + if (value != JNI_FALSE) { + exemptions.push_back("L"); + } + Runtime::Current()->SetHiddenApiExemptions(exemptions); +} + static jobject NewInstance(JNIEnv* env, jclass klass) { jmethodID constructor = env->GetMethodID(klass, "<init>", "()V"); if (constructor == nullptr) { diff --git a/test/674-hiddenapi/src-art/Main.java b/test/674-hiddenapi/src-art/Main.java index 190f4ace65..d6a8c6dbd8 100644 --- a/test/674-hiddenapi/src-art/Main.java +++ b/test/674-hiddenapi/src-art/Main.java @@ -119,9 +119,8 @@ public class Main { // loaded by their parent class loader. String nativeLibCopy = createNativeLibCopy(parentDomain, childDomain, whitelistAllApis); - if (whitelistAllApis) { - VMRuntime.getRuntime().setHiddenApiExemptions(new String[]{"L"}); - } + // Set exemptions to "L" (matches all classes) if we are testing whitelisting. + setWhitelistAll(whitelistAllApis); // Invoke ChildClass.runTest Class<?> childClass = Class.forName("ChildClass", true, childLoader); @@ -129,8 +128,6 @@ public class Main { "runTest", String.class, Integer.TYPE, Integer.TYPE, Boolean.TYPE); runTestMethod.invoke(null, nativeLibCopy, parentDomain.ordinal(), childDomain.ordinal(), whitelistAllApis); - - VMRuntime.getRuntime().setHiddenApiExemptions(new String[0]); } // Routine which tries to figure out the absolute path of our native library. @@ -203,4 +200,5 @@ public class Main { private static native int appendToBootClassLoader(String dexPath, boolean isCorePlatform); private static native void setDexDomain(int index, boolean isCorePlatform); private static native void init(); + private static native void setWhitelistAll(boolean value); } diff --git a/tools/libcore_network_failures.txt b/tools/libcore_network_failures.txt index e7e31dbe67..380f56bce2 100644 --- a/tools/libcore_network_failures.txt +++ b/tools/libcore_network_failures.txt @@ -8,7 +8,7 @@ description: "Ignore failure of network-related tests on new devices running Android O", result: EXEC_FAILED, bug: 74725685, - modes: [device], + modes: [device_testdex], names: ["libcore.libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet", "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection", "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection", |