diff options
author | 2024-01-09 20:55:01 +0000 | |
---|---|---|
committer | 2024-01-17 15:14:35 +0000 | |
commit | 0a4ae19b18601966e6a87d889f6d52b2d1fd7cf3 (patch) | |
tree | fed431adc2c4c1fb63df44853231eea30f0e1b45 | |
parent | 0f62043c1670cd365aba1894ad8046cdfc1c905d (diff) |
Add visibility attributes in runtime/gc
Bug: 260881207
Test: presubmit
Test: abtd app_compat_drm
Test: abtd app_compat_top_100
Test: abtd app_compat_banking
Change-Id: I34de0d083ec0bb476bb39cc31a2f64d15c80fe7b
30 files changed, 116 insertions, 99 deletions
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h index f286c6cf6e..15daab2136 100644 --- a/runtime/gc/allocation_listener.h +++ b/runtime/gc/allocation_listener.h @@ -26,7 +26,7 @@ #include "handle.h" #include "obj_ptr.h" -namespace art { +namespace art HIDDEN { namespace mirror { class Class; diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index f0d379fde6..f98edf4aa8 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -27,7 +27,7 @@ #include <android-base/properties.h> -namespace art { +namespace art HIDDEN { namespace gc { int32_t AllocRecordStackTraceElement::ComputeLineNumber() const { diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h index 8b4cc67b91..8273ea4df8 100644 --- a/runtime/gc/allocation_record.h +++ b/runtime/gc/allocation_record.h @@ -20,11 +20,12 @@ #include <list> #include <memory> +#include "base/macros.h" #include "base/mutex.h" #include "gc_root.h" #include "obj_ptr.h" -namespace art { +namespace art HIDDEN { class ArtMethod; class IsMarkedVisitor; @@ -215,11 +216,8 @@ class AllocRecordObjectMap { // Caller needs to check that it is enabled before calling since we read the stack trace before // checking the enabled boolean. - void RecordAllocation(Thread* self, - ObjPtr<mirror::Object>* obj, - size_t byte_count) - REQUIRES(!Locks::alloc_tracker_lock_) - REQUIRES_SHARED(Locks::mutator_lock_); + EXPORT void RecordAllocation(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count) + REQUIRES(!Locks::alloc_tracker_lock_) REQUIRES_SHARED(Locks::mutator_lock_); static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h index fb298379c9..71188868b2 100644 --- a/runtime/gc/allocator_type.h +++ b/runtime/gc/allocator_type.h @@ -19,7 +19,9 @@ #include <iosfwd> -namespace art { +#include "base/macros.h" + +namespace art HIDDEN { namespace gc { // Different types of allocators. diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h index 290860136b..3c19079c08 100644 --- a/runtime/gc/collector_type.h +++ b/runtime/gc/collector_type.h @@ -19,7 +19,9 @@ #include <iosfwd> -namespace art { +#include "base/macros.h" + +namespace art HIDDEN { namespace gc { // Which types of collections are able to be performed. diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc index 02fe2f975c..ec213e50e8 100644 --- a/runtime/gc/gc_cause.cc +++ b/runtime/gc/gc_cause.cc @@ -23,7 +23,7 @@ #include <ostream> -namespace art { +namespace art HIDDEN { namespace gc { const char* PrettyCause(GcCause cause) { diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h index 5c039b31ee..e035510969 100644 --- a/runtime/gc/gc_cause.h +++ b/runtime/gc/gc_cause.h @@ -19,7 +19,9 @@ #include <iosfwd> -namespace art { +#include "base/macros.h" + +namespace art HIDDEN { namespace gc { // What caused the GC? diff --git a/runtime/gc/gc_pause_listener.h b/runtime/gc/gc_pause_listener.h index da35d2a0de..a626a3ca29 100644 --- a/runtime/gc/gc_pause_listener.h +++ b/runtime/gc/gc_pause_listener.h @@ -17,7 +17,9 @@ #ifndef ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_ #define ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_ -namespace art { +#include "base/macros.h" + +namespace art HIDDEN { namespace gc { class GcPauseListener { diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 9d4b4a0986..5d6e149b98 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -38,7 +38,7 @@ #include "verify_object.h" #include "write_barrier-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h index a235c44033..2b719ee241 100644 --- a/runtime/gc/heap-visit-objects-inl.h +++ b/runtime/gc/heap-visit-objects-inl.h @@ -29,7 +29,7 @@ #include "thread-current-inl.h" #include "thread_list.h" -namespace art { +namespace art HIDDEN { namespace gc { // Visit objects when threads aren't suspended. If concurrent moving diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index c10f41719a..b7821a3e98 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -108,7 +108,7 @@ #include "verify_object-inl.h" #include "well_known_classes.h" -namespace art { +namespace art HIDDEN { #ifdef ART_TARGET_ANDROID namespace { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index ab7a95f55a..d0944e0f55 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -49,7 +49,7 @@ #include "scoped_thread_state_change.h" #include "verify_object.h" -namespace art { +namespace art HIDDEN { class ConditionVariable; enum class InstructionSet; @@ -387,8 +387,8 @@ class Heap { bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_); // Enables us to compacting GC until objects are released. - void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); - void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); + EXPORT void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); + EXPORT void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); // Temporarily disable thread flip for JNI critical calls. void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_); @@ -407,7 +407,7 @@ class Heap { // Initiates an explicit garbage collection. Guarantees that a GC started after this call has // completed. - void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit) + EXPORT void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); // Does a concurrent GC, provided the GC numbered requested_gc_num has not already been @@ -485,7 +485,8 @@ class Heap { // Blocks the caller until the garbage collector becomes idle and returns the type of GC we // waited for. Only waits for running collections, ignoring a requested but unstarted GC. Only // heuristic, since a new GC may have started by the time we return. - collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_); + EXPORT collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) + REQUIRES(!*gc_complete_lock_); // Update the heap's process state to a new value, may cause compaction to occur. void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) @@ -554,7 +555,7 @@ class Heap { return rb_table_.get(); } - void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object); + EXPORT void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object); // Returns the number of bytes currently allocated. // The result should be treated as an approximation, if it is being concurrently updated. @@ -605,7 +606,7 @@ class Heap { // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently // consumed by an application. - size_t GetTotalMemory() const; + EXPORT size_t GetTotalMemory() const; // Returns approximately how much free memory we have until the next GC happens. size_t GetFreeMemoryUntilGC() const { @@ -628,7 +629,8 @@ class Heap { // Get the space that corresponds to an object's address. Current implementation searches all // spaces in turn. If fail_ok is false then failing to find a space will cause an abort. // TODO: consider using faster data structure like binary tree. - space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const + EXPORT space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, + bool fail_ok) const REQUIRES_SHARED(Locks::mutator_lock_); space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const @@ -638,7 +640,7 @@ class Heap { bool fail_ok) const REQUIRES_SHARED(Locks::mutator_lock_); - space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const + EXPORT space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const REQUIRES_SHARED(Locks::mutator_lock_); space::Space* FindSpaceFromAddress(const void* ptr) const @@ -654,7 +656,7 @@ class Heap { REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); // Deflate monitors, ... and trim the spaces. - void Trim(Thread* self) REQUIRES(!*gc_complete_lock_); + EXPORT void Trim(Thread* self) REQUIRES(!*gc_complete_lock_); void RevokeThreadLocalBuffers(Thread* thread); void RevokeRosAllocThreadLocalBuffers(Thread* thread); @@ -683,12 +685,11 @@ class Heap { void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS; // Mark and empty stack. - void FlushAllocStack() - REQUIRES_SHARED(Locks::mutator_lock_) + EXPORT void FlushAllocStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); // Revoke all the thread-local allocation stacks. - void RevokeAllThreadLocalAllocationStacks(Thread* self) + EXPORT void RevokeAllThreadLocalAllocationStacks(Thread* self) REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); // Mark all the objects in the allocation stack in the specified bitmap. @@ -715,7 +716,9 @@ class Heap { return boot_image_spaces_; } - bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const + // TODO(b/260881207): refactor to only use this function in debug builds and + // remove EXPORT. + EXPORT bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_); bool IsInBootImageOatFile(const void* p) const @@ -772,7 +775,7 @@ class Heap { } void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_); - std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_); + EXPORT std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_); // GC performance measuring void DumpGcPerformanceInfo(std::ostream& os) @@ -921,9 +924,7 @@ class Heap { // Also update state (bytes_until_sample). // By calling JHPCheckNonTlabSampleAllocation from different functions for Large allocations and // non-moving allocations we are able to use the stack to identify these allocations separately. - void JHPCheckNonTlabSampleAllocation(Thread* self, - mirror::Object* ret, - size_t alloc_size); + EXPORT void JHPCheckNonTlabSampleAllocation(Thread* self, mirror::Object* ret, size_t alloc_size); // In Tlab case: Calculate the next tlab size (location of next sample point) and whether // a sample should be taken. size_t JHPCalculateNextTlabSize(Thread* self, @@ -984,31 +985,31 @@ class Heap { bool IsGCDisabledForShutdown() const REQUIRES(!*gc_complete_lock_); // Create a new alloc space and compact default alloc space to it. - HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() + EXPORT HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_, !process_state_update_lock_); - bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const; + EXPORT bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const; // Install an allocation listener. - void SetAllocationListener(AllocationListener* l); + EXPORT void SetAllocationListener(AllocationListener* l); // Remove an allocation listener. Note: the listener must not be deleted, as for performance // reasons, we assume it stays valid when we read it (so that we don't require a lock). - void RemoveAllocationListener(); + EXPORT void RemoveAllocationListener(); // Install a gc pause listener. - void SetGcPauseListener(GcPauseListener* l); + EXPORT void SetGcPauseListener(GcPauseListener* l); // Get the currently installed gc pause listener, or null. GcPauseListener* GetGcPauseListener() { return gc_pause_listener_.load(std::memory_order_acquire); } // Remove a gc pause listener. Note: the listener must not be deleted, as for performance // reasons, we assume it stays valid when we read it (so that we don't require a lock). - void RemoveGcPauseListener(); + EXPORT void RemoveGcPauseListener(); - const Verification* GetVerification() const; + EXPORT const Verification* GetVerification() const; void PostForkChildAction(Thread* self) REQUIRES(!*gc_complete_lock_); - void TraceHeapSize(size_t heap_size); + EXPORT void TraceHeapSize(size_t heap_size); bool AddHeapTask(gc::HeapTask* task); @@ -1114,17 +1115,16 @@ class Heap { // attempt failed. // Called with thread suspension disallowed, but re-enables it, and may suspend, internally. // Returns null if instrumentation or the allocator changed. - mirror::Object* AllocateInternalWithGc(Thread* self, - AllocatorType allocator, - bool instrumented, - size_t num_bytes, - size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated, - ObjPtr<mirror::Class>* klass) + EXPORT mirror::Object* AllocateInternalWithGc(Thread* self, + AllocatorType allocator, + bool instrumented, + size_t num_bytes, + size_t* bytes_allocated, + size_t* usable_size, + size_t* bytes_tl_bulk_allocated, + ObjPtr<mirror::Class>* klass) REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_) - REQUIRES(Roles::uninterruptible_) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); // Allocate into a specific space. mirror::Object* AllocateInto(Thread* self, @@ -1148,13 +1148,13 @@ class Heap { size_t* bytes_tl_bulk_allocated) REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* AllocWithNewTLAB(Thread* self, - AllocatorType allocator_type, - size_t alloc_size, - bool grow, - size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) + EXPORT mirror::Object* AllocWithNewTLAB(Thread* self, + AllocatorType allocator_type, + size_t alloc_size, + bool grow, + size_t* bytes_allocated, + size_t* usable_size, + size_t* bytes_tl_bulk_allocated) REQUIRES_SHARED(Locks::mutator_lock_); void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) @@ -1177,12 +1177,11 @@ class Heap { void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) REQUIRES(!*pending_task_lock_); - void RequestConcurrentGCAndSaveObject(Thread* self, - bool force_full, - uint32_t observed_gc_num, - ObjPtr<mirror::Object>* obj) - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!*pending_task_lock_); + EXPORT void RequestConcurrentGCAndSaveObject(Thread* self, + bool force_full, + uint32_t observed_gc_num, + ObjPtr<mirror::Object>* obj) + REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_); static constexpr uint32_t GC_NUM_ANY = std::numeric_limits<uint32_t>::max(); @@ -1261,9 +1260,10 @@ class Heap { void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); - void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj) + EXPORT void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, + ObjPtr<mirror::Object>* obj) REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_); void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_); void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_); @@ -1294,10 +1294,11 @@ class Heap { void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_); // GC stress mode attempts to do one GC per unique backtrace. - void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, - !*backtrace_lock_, !process_state_update_lock_); + EXPORT void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) + REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, + !*pending_task_lock_, + !*backtrace_lock_, + !process_state_update_lock_); collector::GcType NonStickyGcType() const { return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull; @@ -1322,7 +1323,9 @@ class Heap { void IncrementFreedEver(); // Remove a vlog code from heap-inl.h which is transitively included in half the world. - static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size); + EXPORT static void VlogHeapGrowth(size_t max_allowed_footprint, + size_t new_footprint, + size_t alloc_size); // Return our best approximation of the number of bytes of native memory that // are currently in use, and could possibly be reclaimed as an indirect result diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index 42ba911062..bd8fdc6b46 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -28,7 +28,7 @@ #include "mirror/object_array-inl.h" #include "scoped_thread_state_change-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { class HeapTest : public CommonRuntimeTest { diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc index a7583fe7f1..168feefea0 100644 --- a/runtime/gc/heap_verification_test.cc +++ b/runtime/gc/heap_verification_test.cc @@ -28,7 +28,7 @@ #include "scoped_thread_state_change-inl.h" #include "verification-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { class VerificationTest : public CommonRuntimeTest { diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index f24c94279c..cb777c895c 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -36,7 +36,7 @@ #include "thread_pool.h" #include "well_known_classes.h" -namespace art { +namespace art HIDDEN { namespace gc { static constexpr bool kAsyncReferenceQueueAdd = false; diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h index 0f84211a87..48aff6c2a2 100644 --- a/runtime/gc/reference_processor.h +++ b/runtime/gc/reference_processor.h @@ -17,12 +17,13 @@ #ifndef ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_ #define ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_ +#include "base/macros.h" #include "base/locks.h" #include "jni.h" #include "reference_queue.h" #include "runtime_globals.h" -namespace art { +namespace art HIDDEN { class IsMarkedVisitor; class TimingLogger; diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 53eef9c027..82fd89ecb1 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -25,7 +25,7 @@ #include "mirror/reference-inl.h" #include "object_callbacks.h" -namespace art { +namespace art HIDDEN { namespace gc { ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) { diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h index 69f04d783a..d43eb347a9 100644 --- a/runtime/gc/reference_queue.h +++ b/runtime/gc/reference_queue.h @@ -23,6 +23,7 @@ #include "base/atomic.h" #include "base/locks.h" +#include "base/macros.h" #include "base/timing_logger.h" #include "jni.h" #include "obj_ptr.h" @@ -30,7 +31,7 @@ #include "runtime_globals.h" #include "thread_pool.h" -namespace art { +namespace art HIDDEN { class Mutex; diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc index c8e71b02ac..2b5c3fdea7 100644 --- a/runtime/gc/reference_queue_test.cc +++ b/runtime/gc/reference_queue_test.cc @@ -23,7 +23,7 @@ #include "reference_queue.h" #include "scoped_thread_state_change-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { class ReferenceQueueTest : public CommonRuntimeTest { diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc index 7a0a6e8736..368649a563 100644 --- a/runtime/gc/scoped_gc_critical_section.cc +++ b/runtime/gc/scoped_gc_critical_section.cc @@ -21,7 +21,7 @@ #include "runtime.h" #include "thread-current-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { const char* GCCriticalSection::Enter(GcCause cause, CollectorType type) { diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h index 8ad01580c2..8481767667 100644 --- a/runtime/gc/scoped_gc_critical_section.h +++ b/runtime/gc/scoped_gc_critical_section.h @@ -18,10 +18,11 @@ #define ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_ #include "base/locks.h" +#include "base/macros.h" #include "collector_type.h" #include "gc_cause.h" -namespace art { +namespace art HIDDEN { class Thread; @@ -35,10 +36,10 @@ class GCCriticalSection { ~GCCriticalSection() {} // Starts a GCCriticalSection. Returns the previous no-suspension reason. - const char* Enter(GcCause cause, CollectorType type) ACQUIRE(Roles::uninterruptible_); + EXPORT const char* Enter(GcCause cause, CollectorType type) ACQUIRE(Roles::uninterruptible_); // Ends a GCCriticalSection. Takes the old no-suspension reason. - void Exit(const char* old_reason) RELEASE(Roles::uninterruptible_); + EXPORT void Exit(const char* old_reason) RELEASE(Roles::uninterruptible_); private: Thread* const self_; @@ -50,9 +51,9 @@ class GCCriticalSection { // suspended. class ScopedGCCriticalSection { public: - ScopedGCCriticalSection(Thread* self, GcCause cause, CollectorType collector_type) + EXPORT ScopedGCCriticalSection(Thread* self, GcCause cause, CollectorType collector_type) ACQUIRE(Roles::uninterruptible_); - ~ScopedGCCriticalSection() RELEASE(Roles::uninterruptible_); + EXPORT ~ScopedGCCriticalSection() RELEASE(Roles::uninterruptible_); private: GCCriticalSection critical_section_; diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h index 57d593c756..721977d596 100644 --- a/runtime/gc/system_weak.h +++ b/runtime/gc/system_weak.h @@ -17,11 +17,12 @@ #ifndef ART_RUNTIME_GC_SYSTEM_WEAK_H_ #define ART_RUNTIME_GC_SYSTEM_WEAK_H_ +#include "base/macros.h" #include "base/mutex.h" #include "object_callbacks.h" #include "thread-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { class AbstractSystemWeakHolder { diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc index dd936538e5..b2a8c4d179 100644 --- a/runtime/gc/system_weak_test.cc +++ b/runtime/gc/system_weak_test.cc @@ -31,7 +31,7 @@ #include "scoped_thread_state_change-inl.h" #include "thread_list.h" -namespace art { +namespace art HIDDEN { namespace gc { class SystemWeakTest : public CommonRuntimeTest { diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc index e56dbd17c3..50be93e844 100644 --- a/runtime/gc/task_processor.cc +++ b/runtime/gc/task_processor.cc @@ -19,7 +19,7 @@ #include "base/time_utils.h" #include "scoped_thread_state_change-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { TaskProcessor::TaskProcessor() diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h index b9e6938b09..65d703d082 100644 --- a/runtime/gc/task_processor.h +++ b/runtime/gc/task_processor.h @@ -20,11 +20,12 @@ #include <memory> #include <set> +#include "base/macros.h" #include "base/mutex.h" #include "runtime_globals.h" #include "thread_pool.h" -namespace art { +namespace art HIDDEN { namespace gc { class HeapTask : public SelfDeletingTask { diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc index 3614a51d90..5ee49415a5 100644 --- a/runtime/gc/task_processor_test.cc +++ b/runtime/gc/task_processor_test.cc @@ -20,7 +20,7 @@ #include "thread-current-inl.h" #include "thread_pool.h" -namespace art { +namespace art HIDDEN { namespace gc { class TaskProcessorTest : public CommonRuntimeTest { diff --git a/runtime/gc/verification-inl.h b/runtime/gc/verification-inl.h index 1ef96e2954..6becd26f4e 100644 --- a/runtime/gc/verification-inl.h +++ b/runtime/gc/verification-inl.h @@ -21,7 +21,7 @@ #include "mirror/class-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { template <ReadBarrierOption kReadBarrierOption> diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc index 8ef61cbea0..926156fd8f 100644 --- a/runtime/gc/verification.cc +++ b/runtime/gc/verification.cc @@ -25,7 +25,7 @@ #include "mirror/class-inl.h" #include "mirror/object-refvisitor-inl.h" -namespace art { +namespace art HIDDEN { namespace gc { std::string Verification::DumpRAMAroundAddress(uintptr_t addr, uintptr_t bytes) const { diff --git a/runtime/gc/verification.h b/runtime/gc/verification.h index 7a5d01a40a..0a9f78a26f 100644 --- a/runtime/gc/verification.h +++ b/runtime/gc/verification.h @@ -17,11 +17,12 @@ #ifndef ART_RUNTIME_GC_VERIFICATION_H_ #define ART_RUNTIME_GC_VERIFICATION_H_ +#include "base/macros.h" #include "obj_ptr.h" #include "offsets.h" #include "read_barrier_option.h" -namespace art { +namespace art HIDDEN { namespace mirror { class Class; @@ -45,10 +46,10 @@ class Verification { REQUIRES_SHARED(Locks::mutator_lock_); // Don't use ObjPtr for things that might not be aligned like the invalid reference. - void LogHeapCorruption(ObjPtr<mirror::Object> holder, - MemberOffset offset, - mirror::Object* ref, - bool fatal) const REQUIRES_SHARED(Locks::mutator_lock_); + EXPORT void LogHeapCorruption(ObjPtr<mirror::Object> holder, + MemberOffset offset, + mirror::Object* ref, + bool fatal) const REQUIRES_SHARED(Locks::mutator_lock_); // Return true if the klass is likely to be a valid mirror::Class. // Returns true if the class is a valid mirror::Class or possibly spuriously. @@ -68,7 +69,7 @@ class Verification { // Find the first path to the target from the root set. Should be called while paused since // visiting roots is not safe otherwise. - std::string FirstPathFromRootSet(ObjPtr<mirror::Object> target) const + EXPORT std::string FirstPathFromRootSet(ObjPtr<mirror::Object> target) const REQUIRES_SHARED(Locks::mutator_lock_); // Does not check alignment, used by DumpRAMAroundAddress. diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h index 0784d3c738..316527fce5 100644 --- a/runtime/gc/weak_root_state.h +++ b/runtime/gc/weak_root_state.h @@ -19,7 +19,9 @@ #include <iosfwd> -namespace art { +#include "base/macros.h" + +namespace art HIDDEN { namespace gc { enum WeakRootState { |