diff options
Diffstat (limited to 'runtime')
| -rw-r--r-- | runtime/base/mem_map_arena_pool.cc | 2 | ||||
| -rw-r--r-- | runtime/exec_utils_test.cc | 24 | ||||
| -rw-r--r-- | runtime/gc/allocator/rosalloc.h | 4 | ||||
| -rw-r--r-- | runtime/gc/heap-inl.h | 4 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 3 | ||||
| -rw-r--r-- | runtime/gc/space/large_object_space.cc | 5 | ||||
| -rw-r--r-- | runtime/gc/space/memory_tool_malloc_space-inl.h | 141 | ||||
| -rw-r--r-- | runtime/gc/space/rosalloc_space.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/space/rosalloc_space.h | 4 | ||||
| -rw-r--r-- | runtime/interpreter/unstarted_runtime_test.cc | 5 | ||||
| -rw-r--r-- | runtime/jit/jit.cc | 2 | ||||
| -rw-r--r-- | runtime/native_stack_dump.cc | 6 | ||||
| -rw-r--r-- | runtime/runtime.cc | 10 | ||||
| -rw-r--r-- | runtime/runtime_callbacks_test.cc | 4 | ||||
| -rw-r--r-- | runtime/thread.cc | 13 |
15 files changed, 128 insertions, 105 deletions
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc index 9ac7886e5d..702f0e453b 100644 --- a/runtime/base/mem_map_arena_pool.cc +++ b/runtime/base/mem_map_arena_pool.cc @@ -125,7 +125,7 @@ size_t MemMapArenaPool::GetBytesAllocated() const { } void MemMapArenaPool::FreeArenaChain(Arena* first) { - if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) { + if (kRunningOnMemoryTool) { for (Arena* arena = first; arena != nullptr; arena = arena->next_) { MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_); } diff --git a/runtime/exec_utils_test.cc b/runtime/exec_utils_test.cc index 68edfa8b72..a9c1ea2ae0 100644 --- a/runtime/exec_utils_test.cc +++ b/runtime/exec_utils_test.cc @@ -36,8 +36,10 @@ TEST_F(ExecUtilsTest, ExecSuccess) { command.push_back("/usr/bin/id"); } std::string error_msg; - if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { - // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. + if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) { + // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks. + // TODO: Valgrind is no longer supported, but Address Sanitizer is: + // check whether the following code works with ASan. EXPECT_TRUE(Exec(command, &error_msg)); } EXPECT_EQ(0U, error_msg.size()) << error_msg; @@ -50,8 +52,10 @@ TEST_F(ExecUtilsTest, ExecError) { std::vector<std::string> command; command.push_back("bogus"); std::string error_msg; - if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { - // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. + if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) { + // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks. + // TODO: Valgrind is no longer supported, but Address Sanitizer is: + // check whether the following code works with ASan. EXPECT_FALSE(Exec(command, &error_msg)); EXPECT_FALSE(error_msg.empty()); } @@ -72,8 +76,10 @@ TEST_F(ExecUtilsTest, EnvSnapshotAdditionsAreNotVisible) { } command.push_back(kModifiedVariable); std::string error_msg; - if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { - // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. + if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) { + // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks. + // TODO: Valgrind is no longer supported, but Address Sanitizer is: + // check whether the following code works with ASan. EXPECT_FALSE(Exec(command, &error_msg)); EXPECT_NE(0U, error_msg.size()) << error_msg; } @@ -97,8 +103,10 @@ TEST_F(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) { } command.push_back(kDeletedVariable); std::string error_msg; - if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { - // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. + if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) { + // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks. + // TODO: Valgrind is no longer supported, but Address Sanitizer is: + // check whether the following code works with ASan. EXPECT_TRUE(Exec(command, &error_msg)); EXPECT_EQ(0U, error_msg.size()) << error_msg; } diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index 150fe956ae..30213d55c5 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -625,7 +625,7 @@ class RosAlloc { // If true, check that the returned memory is actually zero. static constexpr bool kCheckZeroMemory = kIsDebugBuild; - // Valgrind protects memory, so do not check memory when running under valgrind. In a normal + // Do not check memory when running under a memory tool. In a normal // build with kCheckZeroMemory the whole test should be optimized away. // TODO: Unprotect before checks. ALWAYS_INLINE bool ShouldCheckZeroMemory(); @@ -768,7 +768,7 @@ class RosAlloc { // greater than or equal to this value, release pages. const size_t page_release_size_threshold_; - // Whether this allocator is running under Valgrind. + // Whether this allocator is running on a memory tool. bool is_running_on_memory_tool_; // The base address of the memory region that's managed by this allocator. diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 948d23303c..675686830e 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -272,7 +272,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, } case kAllocatorTypeRosAlloc: { if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) { - // If running on valgrind or asan, we should be using the instrumented path. + // If running on ASan, we should be using the instrumented path. size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size); if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, max_bytes_tl_bulk_allocated, @@ -303,7 +303,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, } case kAllocatorTypeDlMalloc: { if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) { - // If running on valgrind, we should be using the instrumented path. + // If running on ASan, we should be using the instrumented path. ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 25ed652b41..8e3bbde224 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -2248,7 +2248,8 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace { // Add a new bin with the remaining space. AddBin(size - alloc_size, pos + alloc_size); } - // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error. + // Copy the object over to its new location. + // Historical note: We did not use `alloc_size` to avoid a Valgrind error. memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size); if (kUseBakerReadBarrier) { obj->AssertReadBarrierState(); diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 512cde484d..a24ca32314 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -45,8 +45,9 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace { } ~MemoryToolLargeObjectMapSpace() OVERRIDE { - // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't - // freed since they are held live by the class linker. + // Historical note: We were deleting large objects to keep Valgrind happy if there were + // any large objects such as Dex cache arrays which aren't freed since they are held live + // by the class linker. MutexLock mu(Thread::Current(), lock_); for (auto& m : large_objects_) { delete m.second.mem_map; diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h index 8282f3dda7..c022171082 100644 --- a/runtime/gc/space/memory_tool_malloc_space-inl.h +++ b/runtime/gc/space/memory_tool_malloc_space-inl.h @@ -30,11 +30,14 @@ namespace space { namespace memory_tool_details { template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable> -inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes, - size_t bytes_allocated, size_t usable_size, - size_t bytes_tl_bulk_allocated, - size_t* bytes_allocated_out, size_t* usable_size_out, - size_t* bytes_tl_bulk_allocated_out) { +inline mirror::Object* AdjustForMemoryTool(void* obj_with_rdz, + size_t num_bytes, + size_t bytes_allocated, + size_t usable_size, + size_t bytes_tl_bulk_allocated, + size_t* bytes_allocated_out, + size_t* usable_size_out, + size_t* bytes_tl_bulk_allocated_out) { if (bytes_allocated_out != nullptr) { *bytes_allocated_out = bytes_allocated; } @@ -84,24 +87,31 @@ template <typename S, bool kUseObjSizeForUsable> mirror::Object* MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::AllocWithGrowth( - Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out, + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::AllocWithGrowth( + Thread* self, + size_t num_bytes, + size_t* bytes_allocated_out, + size_t* usable_size_out, size_t* bytes_tl_bulk_allocated_out) { size_t bytes_allocated; size_t usable_size; size_t bytes_tl_bulk_allocated; - void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes, - &bytes_allocated, &usable_size, + void* obj_with_rdz = S::AllocWithGrowth(self, + num_bytes + 2 * kMemoryToolRedZoneBytes, + &bytes_allocated, + &usable_size, &bytes_tl_bulk_allocated); if (obj_with_rdz == nullptr) { return nullptr; } - return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( - obj_with_rdz, num_bytes, - bytes_allocated, usable_size, + return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( + obj_with_rdz, + num_bytes, + bytes_allocated, + usable_size, bytes_tl_bulk_allocated, bytes_allocated_out, usable_size_out, @@ -113,27 +123,35 @@ template <typename S, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> mirror::Object* MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::Alloc( - Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out, + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::Alloc( + Thread* self, + size_t num_bytes, + size_t* bytes_allocated_out, + size_t* usable_size_out, size_t* bytes_tl_bulk_allocated_out) { size_t bytes_allocated; size_t usable_size; size_t bytes_tl_bulk_allocated; - void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes, - &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated); + void* obj_with_rdz = S::Alloc(self, + num_bytes + 2 * kMemoryToolRedZoneBytes, + &bytes_allocated, + &usable_size, + &bytes_tl_bulk_allocated); if (obj_with_rdz == nullptr) { return nullptr; } - return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, - kUseObjSizeForUsable>(obj_with_rdz, num_bytes, - bytes_allocated, usable_size, - bytes_tl_bulk_allocated, - bytes_allocated_out, - usable_size_out, - bytes_tl_bulk_allocated_out); + return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( + obj_with_rdz, + num_bytes, + bytes_allocated, + usable_size, + bytes_tl_bulk_allocated, + bytes_allocated_out, + usable_size_out, + bytes_tl_bulk_allocated_out); } template <typename S, @@ -141,24 +159,31 @@ template <typename S, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> mirror::Object* MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::AllocThreadUnsafe( - Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out, + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::AllocThreadUnsafe( + Thread* self, + size_t num_bytes, + size_t* bytes_allocated_out, + size_t* usable_size_out, size_t* bytes_tl_bulk_allocated_out) { size_t bytes_allocated; size_t usable_size; size_t bytes_tl_bulk_allocated; - void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes, - &bytes_allocated, &usable_size, + void* obj_with_rdz = S::AllocThreadUnsafe(self, + num_bytes + 2 * kMemoryToolRedZoneBytes, + &bytes_allocated, + &usable_size, &bytes_tl_bulk_allocated); if (obj_with_rdz == nullptr) { return nullptr; } - return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( - obj_with_rdz, num_bytes, - bytes_allocated, usable_size, + return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( + obj_with_rdz, + num_bytes, + bytes_allocated, + usable_size, bytes_tl_bulk_allocated, bytes_allocated_out, usable_size_out, @@ -170,12 +195,14 @@ template <typename S, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> size_t MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::AllocationSize( + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::AllocationSize( mirror::Object* obj, size_t* usable_size) { - size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>( - reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)), + size_t result = S::AllocationSize( + reinterpret_cast<mirror::Object*>( + reinterpret_cast<uint8_t*>(obj) + - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)), usable_size); if (usable_size != nullptr) { if (kUseObjSizeForUsable) { @@ -192,10 +219,9 @@ template <typename S, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> size_t MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::Free( - Thread* self, mirror::Object* ptr) { + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::Free(Thread* self, mirror::Object* ptr) { void* obj_after_rdz = reinterpret_cast<void*>(ptr); uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes; @@ -220,10 +246,10 @@ template <typename S, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> size_t MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::FreeList( - Thread* self, size_t num_ptrs, mirror::Object** ptrs) { + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::FreeList( + Thread* self, size_t num_ptrs, mirror::Object** ptrs) { size_t freed = 0; for (size_t i = 0; i < num_ptrs; i++) { freed += Free(self, ptrs[i]); @@ -238,11 +264,12 @@ template <typename S, bool kUseObjSizeForUsable> template <typename... Params> MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::MemoryToolMallocSpace( - MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) { - // Don't want to change the valgrind states of the mem map here as the allocator is already + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::MemoryToolMallocSpace( + MemMap* mem_map, size_t initial_size, Params... params) + : S(mem_map, initial_size, params...) { + // Don't want to change the memory tool states of the mem map here as the allocator is already // initialized at this point and that may interfere with what the allocator does internally. Note // that the tail beyond the initial size is mprotected. } @@ -252,9 +279,9 @@ template <typename S, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> size_t MemoryToolMallocSpace<S, - kMemoryToolRedZoneBytes, - kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) { + kMemoryToolRedZoneBytes, + kAdjustForRedzoneInAllocSize, + kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) { return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes); } diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index e7865363a1..b0402e4b83 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -77,7 +77,7 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin // Everything is set so record in immutable structure and leave uint8_t* begin = mem_map->Begin(); - // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with + // TODO: Fix RosAllocSpace to support ASan. There is currently some issues with // AllocationSize caused by redzones. b/12944686 if (running_on_memory_tool) { return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>( @@ -382,12 +382,12 @@ size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usab size_t size = obj->SizeOf<kVerifyNone>(); bool add_redzones = false; if (kMaybeIsRunningOnMemoryTool) { - add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0; + add_redzones = kRunningOnMemoryTool && kMemoryToolAddsRedzones; if (add_redzones) { size += 2 * kDefaultMemoryToolRedZoneBytes; } } else { - DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U); + DCHECK(!kRunningOnMemoryTool); } size_t size_by_size = rosalloc_->UsableSize(size); if (kIsDebugBuild) { diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 9d16b87b7d..4c17233360 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -159,8 +159,8 @@ class RosAllocSpace : public MallocSpace { void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, size_t maximum_size, bool low_memory_mode) OVERRIDE { - return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode, - RUNNING_ON_MEMORY_TOOL != 0); + return CreateRosAlloc( + base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool); } static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size, size_t maximum_size, bool low_memory_mode, diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index 655713e8c6..01e74962ba 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -867,11 +867,6 @@ TEST_F(UnstartedRuntimeTest, Cos) { } TEST_F(UnstartedRuntimeTest, Pow) { - // Valgrind seems to get this wrong, actually. Disable for valgrind. - if (RUNNING_ON_MEMORY_TOOL != 0 && kMemoryToolIsValgrind) { - return; - } - Thread* self = Thread::Current(); ScopedObjectAccess soa(self); diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index b7b779ce31..f31a24ec2a 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -333,7 +333,7 @@ void Jit::DeleteThreadPool() { } // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue. - if (!RUNNING_ON_MEMORY_TOOL) { + if (!kRunningOnMemoryTool) { pool->StopWorkers(self); pool->RemoveAllTasks(self); } diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc index 14f3f45f9e..b3a47c3053 100644 --- a/runtime/native_stack_dump.cc +++ b/runtime/native_stack_dump.cc @@ -289,8 +289,10 @@ void DumpNativeStack(std::ostream& os, ArtMethod* current_method, void* ucontext_ptr, bool skip_frames) { - // b/18119146 - if (RUNNING_ON_MEMORY_TOOL != 0) { + // Historical note: This was disabled when running under Valgrind (b/18119146). + // TODO: Valgrind is no longer supported, but Address Sanitizer is: + // check whether this test works with ASan. + if (kRunningOnMemoryTool) { return; } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 1e327fc8ed..6d10a224e9 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -240,7 +240,7 @@ Runtime::Runtime() exit_(nullptr), abort_(nullptr), stats_enabled_(false), - is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL), + is_running_on_memory_tool_(kRunningOnMemoryTool), instrumentation_(), main_thread_group_(nullptr), system_thread_group_(nullptr), @@ -1362,8 +1362,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { case InstructionSet::kMips: case InstructionSet::kMips64: implicit_null_checks_ = true; - // Installing stack protection does not play well with valgrind. - implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind); + // Historical note: Installing stack protection was not playing well with Valgrind. + implicit_so_checks_ = true; break; default: // Keep the defaults. @@ -1378,8 +1378,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // These need to be in a specific order. The null point check handler must be // after the suspend check and stack overflow check handlers. // - // Note: the instances attach themselves to the fault manager and are handled by it. The manager - // will delete the instance on Shutdown(). + // Note: the instances attach themselves to the fault manager and are handled by it. The + // manager will delete the instance on Shutdown(). if (implicit_suspend_checks_) { new SuspensionHandler(&fault_manager); } diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc index 72d9919971..54769f9c49 100644 --- a/runtime/runtime_callbacks_test.cc +++ b/runtime/runtime_callbacks_test.cc @@ -339,8 +339,8 @@ class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest { }; TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) { - // SigQuit induces a dump. ASAN isn't happy with libunwind reading memory. - TEST_DISABLED_FOR_MEMORY_TOOL_ASAN(); + // SigQuit induces a dump. ASan isn't happy with libunwind reading memory. + TEST_DISABLED_FOR_MEMORY_TOOL(); // The runtime needs to be started for the signal handler. Thread* self = Thread::Current(); diff --git a/runtime/thread.cc b/runtime/thread.cc index a8133a1fda..210e1b0c51 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1116,21 +1116,10 @@ bool Thread::InitStackHwm() { Runtime* runtime = Runtime::Current(); bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler(); - // Valgrind on arm doesn't give the right values here. Do not install the guard page, and - // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting - // stack_begin to 0. - const bool valgrind_on_arm = - (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) && - kMemoryToolIsValgrind && - RUNNING_ON_MEMORY_TOOL != 0; - if (valgrind_on_arm) { - tlsPtr_.stack_begin = nullptr; - } - ResetDefaultStackEnd(); // Install the protected region if we are doing implicit overflow checks. - if (implicit_stack_check && !valgrind_on_arm) { + if (implicit_stack_check) { // The thread might have protected region at the bottom. We need // to install our own region so we need to move the limits // of the stack to make room for it. |