diff options
| -rw-r--r-- | cmdline/cmdline_parser.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 9 | ||||
| -rw-r--r-- | compiler/utils/swap_space.h | 2 | ||||
| -rw-r--r-- | dex2oat/dex2oat.cc | 2 | ||||
| -rw-r--r-- | runtime/base/mutex.h | 6 | ||||
| -rw-r--r-- | runtime/dex_file_annotations.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/accounting/card_table.h | 1 | ||||
| -rw-r--r-- | runtime/gc/accounting/mod_union_table.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying-inl.h | 3 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 112 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.h | 5 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_compact.cc | 2 | ||||
| -rw-r--r-- | runtime/oat.h | 3 | ||||
| -rw-r--r-- | runtime/thread.cc | 7 |
15 files changed, 122 insertions, 48 deletions
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h index d82fd488e9..32480dd915 100644 --- a/cmdline/cmdline_parser.h +++ b/cmdline/cmdline_parser.h @@ -612,7 +612,7 @@ struct CmdlineParser { template <typename TVariantMap, template <typename TKeyValue> class TVariantMapKey> template <typename TArg> -CmdlineParser<TVariantMap, TVariantMapKey>::ArgumentBuilder<TArg> +typename CmdlineParser<TVariantMap, TVariantMapKey>::template ArgumentBuilder<TArg> CmdlineParser<TVariantMap, TVariantMapKey>::CreateArgumentBuilder( CmdlineParser<TVariantMap, TVariantMapKey>::Builder& parent) { return CmdlineParser<TVariantMap, TVariantMapKey>::ArgumentBuilder<TArg>( diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 08a752f1d2..8afc6bab89 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -7177,10 +7177,10 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad( instruction, root, /* unpoison_ref_before_marking */ false); codegen_->AddSlowPath(slow_path); - // Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`). - const int32_t entry_point_offset = - CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(root.reg()); - __ fs()->cmpl(Address::Absolute(entry_point_offset), Immediate(0)); + // Test if the GC is marking. Note that X86 and X86_64 don't switch the entrypoints when the + // GC is marking. + const int32_t is_marking_offset = Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value(); + __ fs()->cmpl(Address::Absolute(is_marking_offset), Immediate(0)); // The entrypoint is null when the GC is not marking. __ j(kNotEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index ff6e099d12..c2b1a3190d 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -6542,10 +6542,11 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad( instruction, root, /* unpoison_ref_before_marking */ false); codegen_->AddSlowPath(slow_path); - // Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint. - const int32_t entry_point_offset = - CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg()); - __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0)); + // Test if the GC is marking. Note that X86 and X86_64 don't switch the entrypoints when the + // GC is marking. + const int32_t is_marking_offset = + Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(); + __ gs()->cmpl(Address::Absolute(is_marking_offset, /* no_rip */ true), Immediate(0)); // The entrypoint is null when the GC is not marking. __ j(kNotEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h index c286b820fe..0ff9fc69ed 100644 --- a/compiler/utils/swap_space.h +++ b/compiler/utils/swap_space.h @@ -78,7 +78,7 @@ class SwapSpace { mutable FreeByStartSet::const_iterator free_by_start_entry; }; struct FreeBySizeComparator { - bool operator()(const FreeBySizeEntry& lhs, const FreeBySizeEntry& rhs) { + bool operator()(const FreeBySizeEntry& lhs, const FreeBySizeEntry& rhs) const { if (lhs.size != rhs.size) { return lhs.size < rhs.size; } else { diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 4cba36aef6..ea82133751 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -2406,6 +2406,8 @@ class Dex2Oat FINAL { if (!IsBootImage()) { raw_options.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr)); } + // Never allow implicit image compilation. + raw_options.push_back(std::make_pair("-Xnoimage-dex2oat", nullptr)); // Disable libsigchain. We don't don't need it during compilation and it prevents us // from getting a statically linked version of dex2oat (because of dlsym and RTLD_NEXT). raw_options.push_back(std::make_pair("-Xno-sig-chain", nullptr)); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 2414b5f937..03ae63a068 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -373,19 +373,19 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { bool IsSharedHeld(const Thread* self) const; // Assert the current thread has shared access to the ReaderWriterMutex. - void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { + ALWAYS_INLINE void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { // TODO: we can only assert this well when self != null. CHECK(IsSharedHeld(self) || self == nullptr) << *this; } } - void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { + ALWAYS_INLINE void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { AssertSharedHeld(self); } // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive // mode. - void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) { + ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsSharedHeld(self)) << *this; } diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc index 7d56bca6ce..13979160bd 100644 --- a/runtime/dex_file_annotations.cc +++ b/runtime/dex_file_annotations.cc @@ -1135,7 +1135,7 @@ mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method, bool GetParametersMetadataForMethod(ArtMethod* method, MutableHandle<mirror::ObjectArray<mirror::String>>* names, MutableHandle<mirror::IntArray>* access_flags) { - const DexFile::AnnotationSetItem::AnnotationSetItem* annotation_set = + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); if (annotation_set == nullptr) { return false; diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h index 68ef15d0cf..14aa730745 100644 --- a/runtime/gc/accounting/card_table.h +++ b/runtime/gc/accounting/card_table.h @@ -51,6 +51,7 @@ class CardTable { static constexpr size_t kCardSize = 1 << kCardShift; static constexpr uint8_t kCardClean = 0x0; static constexpr uint8_t kCardDirty = 0x70; + static constexpr uint8_t kCardAged = kCardDirty - 1; static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity); ~CardTable(); diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index 34e30c177f..c416b9cc3d 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -391,7 +391,7 @@ void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* a uintptr_t end = start + CardTable::kCardSize; live_bitmap->VisitMarkedRange(start, end, - [this, callback, arg](mirror::Object* obj) { + [callback, arg](mirror::Object* obj) { callback(obj, arg); }); } @@ -402,7 +402,7 @@ void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* a uintptr_t end = start + CardTable::kCardSize; live_bitmap->VisitMarkedRange(start, end, - [this, callback, arg](mirror::Object* obj) { + [callback, arg](mirror::Object* obj) { callback(obj, arg); }); } @@ -560,7 +560,7 @@ void ModUnionTableCardCache::VisitObjects(ObjectCallback* callback, void* arg) { << start << " " << *space_; space_->GetLiveBitmap()->VisitMarkedRange(start, start + CardTable::kCardSize, - [this, callback, arg](mirror::Object* obj) { + [callback, arg](mirror::Object* obj) { callback(obj, arg); }); }); diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index dd449f991b..1f06f15ae6 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -152,7 +152,8 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref, inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) { mirror::Object* ret; - if (from_ref == nullptr) { + // We can get here before marking starts since we gray immune objects before the marking phase. + if (from_ref == nullptr || !Thread::Current()->GetIsGcMarking()) { return from_ref; } // TODO: Consider removing this check when we are done investigating slow paths. b/30162165 diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 4192f34879..9c3ce0b413 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -163,6 +163,12 @@ void ConcurrentCopying::RunPhases() { ReaderMutexLock mu(self, *Locks::mutator_lock_); InitializePhase(); } + if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) { + // Gray dirty immune objects concurrently to reduce GC pause times. We re-process gray cards in + // the pause. + ReaderMutexLock mu(self, *Locks::mutator_lock_); + GrayAllDirtyImmuneObjects(); + } FlipThreadRoots(); { ReaderMutexLock mu(self, *Locks::mutator_lock_); @@ -352,9 +358,12 @@ class ConcurrentCopying::FlipCallback : public Closure { if (kVerifyNoMissingCardMarks) { cc->VerifyNoMissingCardMarks(); } - CHECK(thread == self); + CHECK_EQ(thread, self); Locks::mutator_lock_->AssertExclusiveHeld(self); - cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); + { + TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings()); + cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); + } cc->SwapStacks(); if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { cc->RecordLiveStackFreezeSize(self); @@ -368,11 +377,11 @@ class ConcurrentCopying::FlipCallback : public Closure { } if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { CHECK(Runtime::Current()->IsAotCompiler()); - TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); + TimingLogger::ScopedTiming split3("(Paused)VisitTransactionRoots", cc->GetTimings()); Runtime::Current()->VisitTransactionRoots(cc); } if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) { - cc->GrayAllDirtyImmuneObjects(); + cc->GrayAllNewlyDirtyImmuneObjects(); if (kIsDebugBuild) { // Check that all non-gray immune objects only refernce immune objects. cc->VerifyGrayImmuneObjects(); @@ -519,8 +528,8 @@ class ConcurrentCopying::VerifyNoMissingCardMarkVisitor { void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) { auto* collector = reinterpret_cast<ConcurrentCopying*>(arg); - // Objects not on dirty cards should never have references to newly allocated regions. - if (!collector->heap_->GetCardTable()->IsDirty(obj)) { + // Objects not on dirty or aged cards should never have references to newly allocated regions. + if (collector->heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) { VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj); obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>( visitor, @@ -583,53 +592,100 @@ void ConcurrentCopying::FlipThreadRoots() { } } +template <bool kConcurrent> class ConcurrentCopying::GrayImmuneObjectVisitor { public: - explicit GrayImmuneObjectVisitor() {} + explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {} ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) { - if (kUseBakerReadBarrier) { - if (kIsDebugBuild) { - Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::WhiteState()) { + if (kConcurrent) { + Locks::mutator_lock_->AssertSharedHeld(self_); + obj->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState()); + // Mod union table VisitObjects may visit the same object multiple times so we can't check + // the result of the atomic set. + } else { + Locks::mutator_lock_->AssertExclusiveHeld(self_); + obj->SetReadBarrierState(ReadBarrier::GrayState()); } - obj->SetReadBarrierState(ReadBarrier::GrayState()); } } static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) { - reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj); + reinterpret_cast<GrayImmuneObjectVisitor<kConcurrent>*>(arg)->operator()(obj); } + + private: + Thread* const self_; }; void ConcurrentCopying::GrayAllDirtyImmuneObjects() { - TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); - gc::Heap* const heap = Runtime::Current()->GetHeap(); - accounting::CardTable* const card_table = heap->GetCardTable(); - WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings()); + accounting::CardTable* const card_table = heap_->GetCardTable(); + Thread* const self = Thread::Current(); + using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>; + VisitorType visitor(self); + WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) { DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); - GrayImmuneObjectVisitor visitor; - accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space); + accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); // Mark all the objects on dirty cards since these may point to objects in other space. // Once these are marked, the GC will eventually clear them later. // Table is non null for boot image and zygote spaces. It is only null for application image // spaces. if (table != nullptr) { - // TODO: Consider adding precleaning outside the pause. table->ProcessCards(); - table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor); - // Since the cards are recorded in the mod-union table and this is paused, we can clear - // the cards for the space (to madvise). + table->VisitObjects(&VisitorType::Callback, &visitor); + // Don't clear cards here since we need to rescan in the pause. If we cleared the cards here, + // there would be races with the mutator marking new cards. + } else { + // Keep cards aged if we don't have a mod-union table since we may need to scan them in future + // GCs. This case is for app images. + card_table->ModifyCardsAtomic( + space->Begin(), + space->End(), + [](uint8_t card) { + return (card != gc::accounting::CardTable::kCardClean) + ? gc::accounting::CardTable::kCardAged + : card; + }, + /* card modified visitor */ VoidFunctor()); + card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(), + space->Begin(), + space->End(), + visitor, + gc::accounting::CardTable::kCardAged); + } + } +} + +void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() { + TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings()); + accounting::CardTable* const card_table = heap_->GetCardTable(); + using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>; + Thread* const self = Thread::Current(); + VisitorType visitor(self); + WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) { + DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); + accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); + + // Don't need to scan aged cards since we did these before the pause. Note that scanning cards + // also handles the mod-union table cards. + card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(), + space->Begin(), + space->End(), + visitor, + gc::accounting::CardTable::kCardDirty); + if (table != nullptr) { + // Add the cards to the mod-union table so that we can clear cards to save RAM. + table->ProcessCards(); TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings()); card_table->ClearCardRange(space->Begin(), AlignDown(space->End(), accounting::CardTable::kCardSize)); - } else { - // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the - // pause because app image spaces are all dirty pages anyways. - card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor); } } - // Since all of the objects that may point to other spaces are marked, we can avoid all the read + // Since all of the objects that may point to other spaces are gray, we can avoid all the read // barriers in the immune spaces. updated_all_immune_objects_.StoreRelaxed(true); } @@ -658,6 +714,7 @@ class ConcurrentCopying::ImmuneSpaceScanObjVisitor { ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) { if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) { + // Only need to scan gray objects. if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) { collector_->ScanImmuneObject(obj); // Done scanning the object, go back to white. @@ -707,6 +764,7 @@ void ConcurrentCopying::MarkingPhase() { if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) { table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor); } else { + // TODO: Scan only the aged cards. live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), reinterpret_cast<uintptr_t>(space->Limit()), visitor); diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index f8773145f0..d8dc9f6b2b 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -162,6 +162,9 @@ class ConcurrentCopying : public GarbageCollector { void GrayAllDirtyImmuneObjects() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void GrayAllNewlyDirtyImmuneObjects() + REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); void VerifyGrayImmuneObjects() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); @@ -336,7 +339,7 @@ class ConcurrentCopying : public GarbageCollector { class DisableMarkingCheckpoint; class DisableWeakRefAccessCallback; class FlipCallback; - class GrayImmuneObjectVisitor; + template <bool kConcurrent> class GrayImmuneObjectVisitor; class ImmuneSpaceScanObjVisitor; class LostCopyVisitor; class RefFieldsVisitor; diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index cab293f23c..9d3d950a0f 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -140,7 +140,7 @@ inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) { } } else { DCHECK(!space_->HasAddress(obj)); - auto slow_path = [this](const mirror::Object* ref) + auto slow_path = [](const mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { // Marking a large object, make sure its aligned as a sanity check. if (!IsAligned<kPageSize>(ref)) { diff --git a/runtime/oat.h b/runtime/oat.h index 05706252fa..84ca2a6f4e 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,8 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '1', '1', '9', '\0' }; // Add thread_local_limit. + // Go back to checking is_marking for x86 and x86_64. + static constexpr uint8_t kOatVersion[] = { '1', '2', '0', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/thread.cc b/runtime/thread.cc index 62a616b646..0a81bfdae7 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -134,6 +134,13 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking); void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { CHECK(kUseReadBarrier); tls32_.is_gc_marking = is_marking; + if (kUseReadBarrier && (kRuntimeISA == kX86_64 || kRuntimeISA == kX86)) { + // Disable entrypoint switching for X86 since we don't always check is_marking with the gray + // bit. This causes a race between GrayAllDirtyImmuneObjects and FlipThreadRoots where + // we may try to go slow path with a null entrypoint. The fix is to never do entrypoint + // switching for x86. + is_marking = true; + } UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, is_marking); ResetQuickAllocEntryPointsForThread(is_marking); } |