Generalize Valgrind annotations in ART to support ASan.

Also add redzones around non-fixed mem_map(s).
Also extend -Wframe-larger-than limit to enable arm64 ASan build.

Change-Id: Ie572481a25fead59fc8978d2c317a33ac418516c
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index bba92a1..25fdd7c 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -24,7 +24,7 @@
 namespace allocator {
 
 inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() {
-  return kCheckZeroMemory && !running_on_valgrind_;
+  return kCheckZeroMemory && !is_running_on_memory_tool_;
 }
 
 template<bool kThreadSafe>
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 49c7fda..bd10f7b 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -16,8 +16,9 @@
 
 #include "rosalloc.h"
 
+#include "base/memory_tool.h"
 #include "base/mutex-inl.h"
-#include "gc/space/valgrind_settings.h"
+#include "gc/space/memory_tool_settings.h"
 #include "mem_map.h"
 #include "mirror/class-inl.h"
 #include "mirror/object.h"
@@ -50,7 +51,7 @@
     reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
 
 RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
-                   PageReleaseMode page_release_mode, bool running_on_valgrind,
+                   PageReleaseMode page_release_mode, bool running_on_memory_tool,
                    size_t page_release_size_threshold)
     : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
       capacity_(capacity), max_capacity_(max_capacity),
@@ -58,7 +59,7 @@
       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
       page_release_mode_(page_release_mode),
       page_release_size_threshold_(page_release_size_threshold),
-      running_on_valgrind_(running_on_valgrind) {
+      is_running_on_memory_tool_(running_on_memory_tool) {
   DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
   DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
   CHECK_LE(capacity, max_capacity);
@@ -110,6 +111,9 @@
   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
     delete size_bracket_locks_[i];
   }
+  if (is_running_on_memory_tool_) {
+    MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
+  }
 }
 
 void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
@@ -1897,8 +1901,8 @@
     MutexLock lock_mu(self, lock_);
     size_t pm_end = page_map_size_;
     size_t i = 0;
-    size_t valgrind_modifier =  running_on_valgrind_ ?
-        2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :  // Redzones before and after.
+    size_t memory_tool_modifier =  is_running_on_memory_tool_ ?
+        2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :  // Redzones before and after.
         0;
     while (i < pm_end) {
       uint8_t pm = page_map_[i];
@@ -1938,15 +1942,15 @@
             idx++;
           }
           uint8_t* start = base_ + i * kPageSize;
-          if (running_on_valgrind_) {
-            start += ::art::gc::space::kDefaultValgrindRedZoneBytes;
+          if (is_running_on_memory_tool_) {
+            start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
           }
           mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
           size_t obj_size = obj->SizeOf();
-          CHECK_GT(obj_size + valgrind_modifier, kLargeSizeThreshold)
+          CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
               << "A rosalloc large object size must be > " << kLargeSizeThreshold;
-          CHECK_EQ(num_pages, RoundUp(obj_size + valgrind_modifier, kPageSize) / kPageSize)
-              << "A rosalloc large object size " << obj_size + valgrind_modifier
+          CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
+              << "A rosalloc large object size " << obj_size + memory_tool_modifier
               << " does not match the page map table " << (num_pages * kPageSize)
               << std::endl << DumpPageMap();
           i += num_pages;
@@ -2011,11 +2015,11 @@
   }
   // Call Verify() here for the lock order.
   for (auto& run : runs) {
-    run->Verify(self, this, running_on_valgrind_);
+    run->Verify(self, this, is_running_on_memory_tool_);
   }
 }
 
-void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) {
+void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
   const size_t idx = size_bracket_idx_;
   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
@@ -2098,8 +2102,8 @@
   }
   // Check each slot.
   size_t slots = 0;
-  size_t valgrind_modifier = running_on_valgrind ?
-      2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :
+  size_t memory_tool_modifier = running_on_memory_tool ?
+      2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
       0U;
   for (size_t v = 0; v < num_vec; v++, slots += 32) {
     DCHECK_GE(num_slots, slots) << "Out of bounds";
@@ -2113,16 +2117,16 @@
       bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
       if (is_allocated && !is_thread_local_freed) {
         uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
-        if (running_on_valgrind) {
-          slot_addr += ::art::gc::space::kDefaultValgrindRedZoneBytes;
+        if (running_on_memory_tool) {
+          slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
         }
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
         size_t obj_size = obj->SizeOf();
-        CHECK_LE(obj_size + valgrind_modifier, kLargeSizeThreshold)
+        CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
             << "A run slot contains a large object " << Dump();
-        CHECK_EQ(SizeToIndex(obj_size + valgrind_modifier), idx)
+        CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
             << PrettyTypeOf(obj) << " "
-            << "obj_size=" << obj_size << "(" << obj_size + valgrind_modifier << "), idx=" << idx
+            << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
             << " A run slot contains an object with wrong size " << Dump();
       }
     }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0fcfe72..c356a39 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -253,7 +253,7 @@
     // Dump the run metadata for debugging.
     std::string Dump();
     // Verify for debugging.
-    void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind)
+    void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
 
@@ -503,7 +503,7 @@
   const size_t page_release_size_threshold_;
 
   // Whether this allocator is running under Valgrind.
-  bool running_on_valgrind_;
+  bool is_running_on_memory_tool_;
 
   // The base address of the memory region that's managed by this allocator.
   uint8_t* Begin() { return base_; }
@@ -561,7 +561,7 @@
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
            PageReleaseMode page_release_mode,
-           bool running_on_valgrind,
+           bool running_on_memory_tool,
            size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
   ~RosAlloc();
 
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 2e66160..cb750eb 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -92,7 +92,7 @@
   } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
              (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
              LIKELY(obj != nullptr)) {
-    DCHECK(!running_on_valgrind_);
+    DCHECK(!is_running_on_memory_tool_);
     obj->SetClass(klass);
     if (kUseBakerOrBrooksReadBarrier) {
       if (kUseBrooksReadBarrier) {
@@ -244,8 +244,8 @@
       break;
     }
     case kAllocatorTypeRosAlloc: {
-      if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
-        // If running on valgrind, we should be using the instrumented path.
+      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
+        // If running on valgrind or asan, we should be using the instrumented path.
         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
         if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
                                                       max_bytes_tl_bulk_allocated))) {
@@ -254,7 +254,7 @@
         ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                      bytes_tl_bulk_allocated);
       } else {
-        DCHECK(!running_on_valgrind_);
+        DCHECK(!is_running_on_memory_tool_);
         size_t max_bytes_tl_bulk_allocated =
             rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
         if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
@@ -270,12 +270,12 @@
       break;
     }
     case kAllocatorTypeDlMalloc: {
-      if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
+      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
         // If running on valgrind, we should be using the instrumented path.
         ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                      bytes_tl_bulk_allocated);
       } else {
-        DCHECK(!running_on_valgrind_);
+        DCHECK(!is_running_on_memory_tool_);
         ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
                                                bytes_tl_bulk_allocated);
       }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0ae9cdf..6317351 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -192,7 +192,7 @@
       total_allocation_time_(0),
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
-      running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
+      is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
       use_tlab_(use_tlab),
       main_space_backup_(nullptr),
       min_interval_homogeneous_space_compaction_by_oom_(
@@ -518,7 +518,7 @@
   if (gc_stress_mode_) {
     backtrace_lock_ = new Mutex("GC complete lock");
   }
-  if (running_on_valgrind_ || gc_stress_mode_) {
+  if (is_running_on_memory_tool_ || gc_stress_mode_) {
     instrumentation->InstrumentQuickAllocEntryPoints();
   }
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -2077,9 +2077,12 @@
 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
  public:
-  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
-      bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
-  }
+  explicit ZygoteCompactingCollector(gc::Heap* heap,
+                                     bool is_running_on_memory_tool)
+      : SemiSpace(heap, false, "zygote collector"),
+        bin_live_bitmap_(nullptr),
+        bin_mark_bitmap_(nullptr),
+        is_running_on_memory_tool_(is_running_on_memory_tool) {}
 
   void BuildBins(space::ContinuousSpace* space) {
     bin_live_bitmap_ = space->GetLiveBitmap();
@@ -2105,6 +2108,7 @@
   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
   // Mark bitmap of the space which contains the bins.
   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
+  const bool is_running_on_memory_tool_;
 
   static void Callback(mirror::Object* obj, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -2119,6 +2123,9 @@
   }
 
   void AddBin(size_t size, uintptr_t position) {
+    if (is_running_on_memory_tool_) {
+      MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
+    }
     if (size != 0) {
       bins_.insert(std::make_pair(size, position));
     }
@@ -2212,7 +2219,7 @@
     // Temporarily disable rosalloc verification because the zygote
     // compaction will mess up the rosalloc internal metadata.
     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
-    ZygoteCompactingCollector zygote_collector(this);
+    ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
     zygote_collector.BuildBins(non_moving_space_);
     // Create a new bump pointer space which we will compact into.
     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d0040f2..2df5a4e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1173,7 +1173,7 @@
   collector::MarkCompact* mark_compact_collector_;
   collector::ConcurrentCopying* concurrent_copying_collector_;
 
-  const bool running_on_valgrind_;
+  const bool is_running_on_memory_tool_;
   const bool use_tlab_;
 
   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 5237c7b..e1c5b64 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -20,13 +20,13 @@
 #include "gc/accounting/card_table.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
+#include "memory_tool_malloc_space-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "runtime.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-#include "valgrind_malloc_space-inl.h"
 
 namespace art {
 namespace gc {
@@ -62,8 +62,8 @@
 
   // Everything is set so record in immutable structure and leave
   uint8_t* begin = mem_map->Begin();
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
         mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
         can_move_objects, starting_size);
   } else {
@@ -152,8 +152,8 @@
                                            void* allocator, uint8_t* begin, uint8_t* end,
                                            uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
         mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
         can_move_objects, starting_size_);
   } else {
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 1f80f1f..ab527a4 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -30,7 +30,7 @@
 namespace space {
 
 // An alloc space is a space where objects may be allocated and garbage collected. Not final as may
-// be overridden by a ValgrindMallocSpace.
+// be overridden by a MemoryToolMallocSpace.
 class DlMallocSpace : public MallocSpace {
  public:
   // Create a DlMallocSpace from an existing mem_map.
@@ -46,27 +46,27 @@
   static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
                                size_t capacity, uint8_t* requested_begin, bool can_move_objects);
 
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated)
       OVERRIDE LOCKS_EXCLUDED(lock_);
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       OVERRIDE LOCKS_EXCLUDED(lock_) {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
       LOCKS_EXCLUDED(lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
       LOCKS_EXCLUDED(lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2b567fe..a913e59 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -34,12 +34,12 @@
 namespace gc {
 namespace space {
 
-class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
  public:
-  explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+  explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
   }
 
-  ~ValgrindLargeObjectMapSpace() OVERRIDE {
+  ~MemoryToolLargeObjectMapSpace() OVERRIDE {
     // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
     // freed since they are held live by the class linker.
     MutexLock mu(Thread::Current(), lock_);
@@ -52,13 +52,14 @@
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       OVERRIDE {
     mirror::Object* obj =
-        LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+        LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
                                    usable_size, bytes_tl_bulk_allocated);
     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
-                               kValgrindRedZoneBytes);
+        reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
+    MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
+    MEMORY_TOOL_MAKE_NOACCESS(
+        reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
+        kMemoryToolRedZoneBytes);
     if (usable_size != nullptr) {
       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
     }
@@ -75,7 +76,7 @@
 
   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
-    VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+    MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
     return LargeObjectMapSpace::Free(self, object_with_rdz);
   }
 
@@ -86,15 +87,15 @@
  private:
   static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
     return reinterpret_cast<const mirror::Object*>(
-        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+        reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
   }
 
   static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
     return reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+        reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
   }
 
-  static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+  static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
 };
 
 void LargeObjectSpace::SwapBitmaps() {
@@ -121,8 +122,8 @@
       lock_("large object map space lock", kAllocSpaceLock) {}
 
 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindLargeObjectMapSpace(name);
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolLargeObjectMapSpace(name);
   } else {
     return new LargeObjectMapSpace(name);
   }
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 9495864..6c689cd 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -20,6 +20,7 @@
 #include "space.h"
 
 #include <ostream>
+#include "base/memory_tool.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
similarity index 72%
rename from runtime/gc/space/valgrind_malloc_space-inl.h
rename to runtime/gc/space/memory_tool_malloc_space-inl.h
index bc329e1..ea8b8aa 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -14,22 +14,20 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
 
-#include "valgrind_malloc_space.h"
-
-#include <memcheck/memcheck.h>
-
-#include "valgrind_settings.h"
+#include "base/memory_tool.h"
+#include "memory_tool_malloc_space.h"
+#include "memory_tool_settings.h"
 
 namespace art {
 namespace gc {
 namespace space {
 
-namespace valgrind_details {
+namespace memory_tool_details {
 
-template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable>
+template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable>
 inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
                                          size_t bytes_allocated, size_t usable_size,
                                          size_t bytes_tl_bulk_allocated,
@@ -48,26 +46,26 @@
     if (kUseObjSizeForUsable) {
       *usable_size_out = num_bytes;
     } else {
-      *usable_size_out = usable_size - 2 * kValgrindRedZoneBytes;
+      *usable_size_out = usable_size - 2 * kMemoryToolRedZoneBytes;
     }
   }
 
   // Left redzone.
-  VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
+  MEMORY_TOOL_MAKE_NOACCESS(obj_with_rdz, kMemoryToolRedZoneBytes);
 
   // Make requested memory readable.
   // (If the allocator assumes memory is zeroed out, we might get UNDEFINED warnings, so make
   //  everything DEFINED initially.)
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
-  VALGRIND_MAKE_MEM_DEFINED(result, num_bytes);
+      reinterpret_cast<uint8_t*>(obj_with_rdz) + kMemoryToolRedZoneBytes);
+  MEMORY_TOOL_MAKE_DEFINED(result, num_bytes);
 
   // Right redzone. Assumes that if bytes_allocated > usable_size, then the difference is
   // management data at the upper end, and for simplicity we will not protect that.
   // At the moment, this fits RosAlloc (no management data in a slot, usable_size == alloc_size)
   // and DlMalloc (allocation_size = (usable_size == num_bytes) + 4, 4 is management)
-  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
-                             usable_size - (num_bytes + kValgrindRedZoneBytes));
+  MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
+                    usable_size - (num_bytes + kMemoryToolRedZoneBytes));
 
   return result;
 }
@@ -76,15 +74,15 @@
   return obj->SizeOf<kVerifyNone>();
 }
 
-}  // namespace valgrind_details
+}  // namespace memory_tool_details
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 mirror::Object*
-ValgrindMallocSpace<S,
-                    kValgrindRedZoneBytes,
+MemoryToolMallocSpace<S,
+                    kMemoryToolRedZoneBytes,
                     kAdjustForRedzoneInAllocSize,
                     kUseObjSizeForUsable>::AllocWithGrowth(
     Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -92,14 +90,14 @@
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
+  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
                                           &bytes_allocated, &usable_size,
                                           &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
       obj_with_rdz, num_bytes,
       bytes_allocated, usable_size,
       bytes_tl_bulk_allocated,
@@ -109,11 +107,11 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-mirror::Object* ValgrindMallocSpace<S,
-                                    kValgrindRedZoneBytes,
+mirror::Object* MemoryToolMallocSpace<S,
+                                    kMemoryToolRedZoneBytes,
                                     kAdjustForRedzoneInAllocSize,
                                     kUseObjSizeForUsable>::Alloc(
     Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -121,13 +119,13 @@
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
+  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
                                 &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
+  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes,
                                              kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
                                                                    bytes_allocated, usable_size,
                                                                    bytes_tl_bulk_allocated,
@@ -137,11 +135,11 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-mirror::Object* ValgrindMallocSpace<S,
-                                    kValgrindRedZoneBytes,
+mirror::Object* MemoryToolMallocSpace<S,
+                                    kMemoryToolRedZoneBytes,
                                     kAdjustForRedzoneInAllocSize,
                                     kUseObjSizeForUsable>::AllocThreadUnsafe(
     Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -149,14 +147,14 @@
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes,
+  void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
                                             &bytes_allocated, &usable_size,
                                             &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
       obj_with_rdz, num_bytes,
       bytes_allocated, usable_size,
       bytes_tl_bulk_allocated,
@@ -166,38 +164,39 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::AllocationSize(
     mirror::Object* obj, size_t* usable_size) {
   size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kValgrindRedZoneBytes : 0)),
+      reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
       usable_size);
   if (usable_size != nullptr) {
     if (kUseObjSizeForUsable) {
-      *usable_size = valgrind_details::GetObjSizeNoThreadSafety(obj);
+      *usable_size = memory_tool_details::GetObjSizeNoThreadSafety(obj);
     } else {
-      *usable_size = *usable_size - 2 * kValgrindRedZoneBytes;
+      *usable_size = *usable_size - 2 * kMemoryToolRedZoneBytes;
     }
   }
   return result;
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::Free(
     Thread* self, mirror::Object* ptr) {
   void* obj_after_rdz = reinterpret_cast<void*>(ptr);
-  uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
+  uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes;
+
   // Make redzones undefined.
   size_t usable_size;
   size_t allocation_size = AllocationSize(ptr, &usable_size);
@@ -206,20 +205,20 @@
   // Use the obj-size-for-usable flag to determine whether usable_size is the more important one,
   // e.g., whether there's data in the allocation_size (and usable_size can't be trusted).
   if (kUseObjSizeForUsable) {
-    VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
+    MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, allocation_size);
   } else {
-    VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, usable_size + 2 * kValgrindRedZoneBytes);
+    MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, usable_size + 2 * kMemoryToolRedZoneBytes);
   }
 
   return S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::FreeList(
     Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
@@ -232,32 +231,33 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 template <typename... Params>
-ValgrindMallocSpace<S,
-                    kValgrindRedZoneBytes,
+MemoryToolMallocSpace<S,
+                    kMemoryToolRedZoneBytes,
                     kAdjustForRedzoneInAllocSize,
-                    kUseObjSizeForUsable>::ValgrindMallocSpace(
+                    kUseObjSizeForUsable>::MemoryToolMallocSpace(
     MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
-  VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size,
-                              mem_map->Size() - initial_size);
+  MEMORY_TOOL_MAKE_DEFINED(mem_map->Begin(), initial_size);
+  MEMORY_TOOL_MAKE_UNDEFINED(mem_map->Begin() + initial_size,
+                     mem_map->Size() - initial_size);
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
-  return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kValgrindRedZoneBytes);
+  return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes);
 }
 
 }  // namespace space
 }  // namespace gc
 }  // namespace art
 
-#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#endif  // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
similarity index 78%
rename from runtime/gc/space/valgrind_malloc_space.h
rename to runtime/gc/space/memory_tool_malloc_space.h
index a6b010a..64c6f35 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -14,24 +14,22 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
 
 #include "malloc_space.h"
 
-#include <valgrind.h>
-
 namespace art {
 namespace gc {
 namespace space {
 
-// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around
-// allocations.
+// A specialization of DlMallocSpace/RosAllocSpace that places memory tool red
+// zones around allocations.
 template <typename BaseMallocSpaceType,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
  public:
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
@@ -57,15 +55,15 @@
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
 
   template <typename... Params>
-  explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
-  virtual ~ValgrindMallocSpace() {}
+  explicit MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+  virtual ~MemoryToolMallocSpace() {}
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
+  DISALLOW_COPY_AND_ASSIGN(MemoryToolMallocSpace);
 };
 
 }  // namespace space
 }  // namespace gc
 }  // namespace art
 
-#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#endif  // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
diff --git a/runtime/gc/space/valgrind_settings.h b/runtime/gc/space/memory_tool_settings.h
similarity index 80%
rename from runtime/gc/space/valgrind_settings.h
rename to runtime/gc/space/memory_tool_settings.h
index 73da0fd..e9333c8 100644
--- a/runtime/gc/space/valgrind_settings.h
+++ b/runtime/gc/space/memory_tool_settings.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
 
 namespace art {
 namespace gc {
@@ -23,10 +23,10 @@
 
 // Default number of bytes to use as a red zone (rdz). A red zone of this size will be placed before
 // and after each allocation. 8 bytes provides long/double alignment.
-static constexpr size_t kDefaultValgrindRedZoneBytes = 8;
+static constexpr size_t kDefaultMemoryToolRedZoneBytes = 8;
 
 }  // namespace space
 }  // namespace gc
 }  // namespace art
 
-#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#endif  // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index f94ec23..8bff2b4 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -17,10 +17,9 @@
 #ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
 #define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
 
-#include <valgrind.h>
-
+#include "base/memory_tool.h"
 #include "gc/allocator/rosalloc-inl.h"
-#include "gc/space/valgrind_settings.h"
+#include "gc/space/memory_tool_settings.h"
 #include "rosalloc_space.h"
 #include "thread.h"
 
@@ -28,26 +27,26 @@
 namespace gc {
 namespace space {
 
-template<bool kMaybeRunningOnValgrind>
+template<bool kMaybeIsRunningOnMemoryTool>
 inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
   // obj is a valid object. Use its class in the header to get the size.
   // Don't use verification since the object may be dead if we are sweeping.
   size_t size = obj->SizeOf<kVerifyNone>();
-  bool running_on_valgrind = false;
-  if (kMaybeRunningOnValgrind) {
-    running_on_valgrind = RUNNING_ON_VALGRIND != 0;
-    if (running_on_valgrind) {
-      size += 2 * kDefaultValgrindRedZoneBytes;
+  bool add_redzones = false;
+  if (kMaybeIsRunningOnMemoryTool) {
+    add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+    if (add_redzones) {
+      size += 2 * kDefaultMemoryToolRedZoneBytes;
     }
   } else {
-    DCHECK_EQ(RUNNING_ON_VALGRIND, 0U);
+    DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
   }
   size_t size_by_size = rosalloc_->UsableSize(size);
   if (kIsDebugBuild) {
-    // On valgrind, the red zone has an impact...
+    // On memory tool, the red zone has an impact...
     const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
     size_t size_by_ptr = rosalloc_->UsableSize(
-        obj_ptr - (running_on_valgrind ? kDefaultValgrindRedZoneBytes : 0));
+        obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0));
     if (size_by_size != size_by_ptr) {
       LOG(INFO) << "Found a bad sized obj of size " << size
                 << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index bc4414d..1a193c3 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -30,7 +30,7 @@
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-#include "valgrind_malloc_space-inl.h"
+#include "memory_tool_malloc_space-inl.h"
 
 namespace art {
 namespace gc {
@@ -43,7 +43,7 @@
 static constexpr bool kVerifyFreedBytes = false;
 
 // TODO: Fix
-// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
+// template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
 RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
                              art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
@@ -61,10 +61,10 @@
                                                bool low_memory_mode, bool can_move_objects) {
   DCHECK(mem_map != nullptr);
 
-  bool running_on_valgrind = Runtime::Current()->RunningOnValgrind();
+  bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
 
   allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
-                                                 capacity, low_memory_mode, running_on_valgrind);
+                                                 capacity, low_memory_mode, running_on_memory_tool);
   if (rosalloc == nullptr) {
     LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
     return nullptr;
@@ -78,10 +78,10 @@
 
   // Everything is set so record in immutable structure and leave
   uint8_t* begin = mem_map->Begin();
-  // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
+  // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
-  if (running_on_valgrind) {
-    return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+  if (running_on_memory_tool) {
+    return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
         mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
         can_move_objects, starting_size, low_memory_mode);
   } else {
@@ -134,7 +134,7 @@
 allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
                                                    size_t initial_size,
                                                    size_t maximum_size, bool low_memory_mode,
-                                                   bool running_on_valgrind) {
+                                                   bool running_on_memory_tool) {
   // clear errno to allow PLOG on error
   errno = 0;
   // create rosalloc using our backing storage starting at begin and
@@ -145,7 +145,7 @@
       low_memory_mode ?
           art::gc::allocator::RosAlloc::kPageReleaseModeAll :
           art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
-      running_on_valgrind);
+      running_on_memory_tool);
   if (rosalloc != nullptr) {
     rosalloc->SetFootprintLimit(initial_size);
   } else {
@@ -180,8 +180,8 @@
                                            void* allocator, uint8_t* begin, uint8_t* end,
                                            uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
         mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
         limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
   } else {
@@ -370,7 +370,7 @@
   delete rosalloc_;
   rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
                              NonGrowthLimitCapacity(), low_memory_mode_,
-                             Runtime::Current()->RunningOnValgrind());
+                             Runtime::Current()->IsRunningOnMemoryTool());
   SetFootprintLimit(footprint_limit);
 }
 
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 36268f7..9dc6f31 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -31,7 +31,7 @@
 namespace space {
 
 // An alloc space implemented using a runs-of-slots memory allocator. Not final as may be
-// overridden by a ValgrindMallocSpace.
+// overridden by a MemoryToolMallocSpace.
 class RosAllocSpace : public MallocSpace {
  public:
   // Create a RosAllocSpace with the requested sizes. The requested
@@ -95,7 +95,7 @@
   ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
 
   // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
-  template<bool kMaybeRunningOnValgrind>
+  template<bool kMaybeIsRunningOnMemoryTool>
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
       NO_THREAD_SAFETY_ANALYSIS;
 
@@ -158,11 +158,11 @@
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
                         size_t maximum_size, bool low_memory_mode) OVERRIDE {
     return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
-                          RUNNING_ON_VALGRIND != 0);
+                          RUNNING_ON_MEMORY_TOOL != 0);
   }
   static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
                                              size_t maximum_size, bool low_memory_mode,
-                                             bool running_on_valgrind);
+                                             bool running_on_memory_tool);
 
   void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
                           void* arg, bool do_null_callback_at_end)