Inclusive language fixes in ART's garbage collector.
Update language to comply with Android’s inclusive language guidance.
See https://source.android.com/setup/contribute/respectful-code for
reference.
#inclusivefixit
Test: Build ART and run ART tests
Bug: 161336379
Bug: 161896447
Change-Id: Ib4aef98db2a9fd1031a076af0eab3fcb4cf6afef
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7a1a505..0e15681 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -561,7 +561,7 @@
}
}
// May be null during runtime creation, in this case leave java_lang_Object null.
- // This is safe since single threaded behavior should mean FillDummyObject does not
+ // This is safe since single threaded behavior should mean FillWithFakeObject does not
// happen when java_lang_Object_ is null.
if (WellKnownClasses::java_lang_Object != nullptr) {
cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(thread,
@@ -3231,17 +3231,17 @@
bool enabled_;
};
-// Fill the given memory block with a dummy object. Used to fill in a
+// Fill the given memory block with a fake object. Used to fill in a
// copy of objects that was lost in race.
-void ConcurrentCopying::FillWithDummyObject(Thread* const self,
- mirror::Object* dummy_obj,
- size_t byte_size) {
+void ConcurrentCopying::FillWithFakeObject(Thread* const self,
+ mirror::Object* fake_obj,
+ size_t byte_size) {
// GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
// barriers here because we need the updated reference to the int array class, etc. Temporary set
// gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
CHECK_ALIGNED(byte_size, kObjectAlignment);
- memset(dummy_obj, 0, byte_size);
+ memset(fake_obj, 0, byte_size);
// Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
// Explicitly mark to make sure to get an object in the to-space.
mirror::Class* int_array_class = down_cast<mirror::Class*>(
@@ -3260,19 +3260,19 @@
AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
}
CHECK_EQ(byte_size, java_lang_Object_->GetObjectSize<kVerifyNone>());
- dummy_obj->SetClass(java_lang_Object_);
- CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
+ fake_obj->SetClass(java_lang_Object_);
+ CHECK_EQ(byte_size, (fake_obj->SizeOf<kVerifyNone>()));
} else {
// Use an int array.
- dummy_obj->SetClass(int_array_class);
- CHECK(dummy_obj->IsArrayInstance<kVerifyNone>());
+ fake_obj->SetClass(int_array_class);
+ CHECK(fake_obj->IsArrayInstance<kVerifyNone>());
int32_t length = (byte_size - data_offset) / component_size;
- ObjPtr<mirror::Array> dummy_arr = dummy_obj->AsArray<kVerifyNone>();
- dummy_arr->SetLength(length);
- CHECK_EQ(dummy_arr->GetLength(), length)
+ ObjPtr<mirror::Array> fake_arr = fake_obj->AsArray<kVerifyNone>();
+ fake_arr->SetLength(length);
+ CHECK_EQ(fake_arr->GetLength(), length)
<< "byte_size=" << byte_size << " length=" << length
<< " component_size=" << component_size << " data_offset=" << data_offset;
- CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()))
+ CHECK_EQ(byte_size, (fake_obj->SizeOf<kVerifyNone>()))
<< "byte_size=" << byte_size << " length=" << length
<< " component_size=" << component_size << " data_offset=" << data_offset;
}
@@ -3295,7 +3295,7 @@
byte_size = it->first;
CHECK_GE(byte_size, alloc_size);
if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
- // If remainder would be too small for a dummy object, retry with a larger request size.
+ // If remainder would be too small for a fake object, retry with a larger request size.
it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
if (it == skipped_blocks_map_.end()) {
// Not found.
@@ -3322,12 +3322,12 @@
// Return the remainder to the map.
CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
CHECK_GE(byte_size - alloc_size, min_object_size);
- // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
+ // FillWithFakeObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
// violation and possible deadlock. The deadlock case is a recursive case:
- // FillWithDummyObject -> Mark(IntArray.class) -> Copy -> AllocateInSkippedBlock.
- FillWithDummyObject(self,
- reinterpret_cast<mirror::Object*>(addr + alloc_size),
- byte_size - alloc_size);
+ // FillWithFakeObject -> Mark(IntArray.class) -> Copy -> AllocateInSkippedBlock.
+ FillWithFakeObject(self,
+ reinterpret_cast<mirror::Object*>(addr + alloc_size),
+ byte_size - alloc_size);
CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
{
MutexLock mu(self, skipped_blocks_lock_);
@@ -3360,10 +3360,10 @@
size_t region_space_bytes_allocated = 0U;
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
- size_t dummy;
+ size_t unused_size;
bool fall_back_to_non_moving = false;
mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
- region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy);
+ region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &unused_size);
bytes_allocated = region_space_bytes_allocated;
if (LIKELY(to_ref != nullptr)) {
DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
@@ -3389,8 +3389,8 @@
<< " skipped_objects="
<< to_space_objects_skipped_.load(std::memory_order_relaxed);
}
- to_ref = heap_->non_moving_space_->Alloc(self, obj_size,
- &non_moving_space_bytes_allocated, nullptr, &dummy);
+ to_ref = heap_->non_moving_space_->Alloc(
+ self, obj_size, &non_moving_space_bytes_allocated, nullptr, &unused_size);
if (UNLIKELY(to_ref == nullptr)) {
LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
<< obj_size << " byte object in region type "
@@ -3423,9 +3423,9 @@
if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
// Lost the race. Another thread (either GC or mutator) stored
// the forwarding pointer first. Make the lost copy (to_ref)
- // look like a valid but dead (dummy) object and keep it for
+ // look like a valid but dead (fake) object and keep it for
// future reuse.
- FillWithDummyObject(self, to_ref, bytes_allocated);
+ FillWithFakeObject(self, to_ref, bytes_allocated);
if (!fall_back_to_non_moving) {
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index b2bc5a4..98b1788 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -257,7 +257,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void MarkZygoteLargeObjects()
REQUIRES_SHARED(Locks::mutator_lock_);
- void FillWithDummyObject(Thread* const self, mirror::Object* dummy_obj, size_t byte_size)
+ void FillWithFakeObject(Thread* const self, mirror::Object* fake_obj, size_t byte_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
@@ -462,7 +462,7 @@
std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
// Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
- // be filled in before flipping thread roots so that FillDummyObject can run. Not
+ // be filled in before flipping thread roots so that FillWithFakeObject can run. Not
// ObjPtr since the GC may transition to suspended and runnable between phases.
mirror::Class* java_lang_Object_;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index b1a21d4..222b3d5 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -30,21 +30,21 @@
namespace gc {
namespace collector {
-class DummyOatFile : public OatFile {
+class FakeOatFile : public OatFile {
public:
- DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
+ FakeOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
begin_ = begin;
end_ = end;
}
};
-class DummyImageSpace : public space::ImageSpace {
+class FakeImageSpace : public space::ImageSpace {
public:
- DummyImageSpace(MemMap&& map,
- accounting::ContinuousSpaceBitmap&& live_bitmap,
- std::unique_ptr<DummyOatFile>&& oat_file,
- MemMap&& oat_map)
- : ImageSpace("DummyImageSpace",
+ FakeImageSpace(MemMap&& map,
+ accounting::ContinuousSpaceBitmap&& live_bitmap,
+ std::unique_ptr<FakeOatFile>&& oat_file,
+ MemMap&& oat_map)
+ : ImageSpace("FakeImageSpace",
/*image_location=*/"",
/*profile_file=*/"",
std::move(map),
@@ -66,7 +66,7 @@
ImmuneSpacesTest() {}
void ReserveBitmaps() {
- // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
+ // Create a bunch of fake bitmaps since these are required to create image spaces. The bitmaps
// do not need to cover the image spaces though.
for (size_t i = 0; i < kMaxBitmaps; ++i) {
accounting::ContinuousSpaceBitmap bitmap(
@@ -79,14 +79,14 @@
}
// Create an image space, the oat file is optional.
- DummyImageSpace* CreateImageSpace(size_t image_size,
- size_t oat_size,
- MemMap* image_reservation,
- MemMap* oat_reservation) {
+ FakeImageSpace* CreateImageSpace(size_t image_size,
+ size_t oat_size,
+ MemMap* image_reservation,
+ MemMap* oat_reservation) {
DCHECK(image_reservation != nullptr);
DCHECK(oat_reservation != nullptr);
std::string error_str;
- MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
+ MemMap image_map = MemMap::MapAnonymous("FakeImageSpace",
image_size,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
@@ -109,7 +109,7 @@
LOG(ERROR) << error_str;
return nullptr;
}
- std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
+ std::unique_ptr<FakeOatFile> oat_file(new FakeOatFile(oat_map.Begin(), oat_map.End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
new (image_map.Begin()) ImageHeader(
@@ -130,22 +130,22 @@
/*boot_image_component_count=*/ 0u,
/*boot_image_checksum=*/ 0u,
/*pointer_size=*/ sizeof(void*));
- return new DummyImageSpace(std::move(image_map),
- std::move(live_bitmap),
- std::move(oat_file),
- std::move(oat_map));
+ return new FakeImageSpace(std::move(image_map),
+ std::move(live_bitmap),
+ std::move(oat_file),
+ std::move(oat_map));
}
private:
- // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
+ // Bitmap pool for pre-allocated fake bitmaps. We need to pre-allocate them since we don't want
// them to randomly get placed somewhere where we want an image space.
std::vector<accounting::ContinuousSpaceBitmap> live_bitmaps_;
};
-class DummySpace : public space::ContinuousSpace {
+class FakeSpace : public space::ContinuousSpace {
public:
- DummySpace(uint8_t* begin, uint8_t* end)
- : ContinuousSpace("DummySpace",
+ FakeSpace(uint8_t* begin, uint8_t* end)
+ : ContinuousSpace("FakeSpace",
space::kGcRetentionPolicyNeverCollect,
begin,
end,
@@ -171,8 +171,8 @@
TEST_F(ImmuneSpacesTest, AppendBasic) {
ImmuneSpaces spaces;
uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
- DummySpace a(base, base + 45 * KB);
- DummySpace b(a.Limit(), a.Limit() + 813 * KB);
+ FakeSpace a(base, base + 45 * KB);
+ FakeSpace b(a.Limit(), a.Limit() + 813 * KB);
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
spaces.AddSpace(&a);
@@ -203,16 +203,16 @@
ASSERT_TRUE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
- kImageOatSize,
- &image_reservation,
- &reservation));
+ std::unique_ptr<FakeImageSpace> image_space(CreateImageSpace(kImageSize,
+ kImageOatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(image_space != nullptr);
ASSERT_FALSE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
const ImageHeader& image_header = image_space->GetImageHeader();
- DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
+ FakeSpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
EXPECT_EQ(image_header.GetImageSize(), kImageSize);
EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
@@ -266,18 +266,18 @@
ASSERT_TRUE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
- kImage1OatSize,
- &image_reservation,
- &reservation));
+ std::unique_ptr<FakeImageSpace> space1(CreateImageSpace(kImage1Size,
+ kImage1OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space1 != nullptr);
ASSERT_TRUE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
- kImage2OatSize,
- &image_reservation,
- &reservation));
+ std::unique_ptr<FakeImageSpace> space2(CreateImageSpace(kImage2Size,
+ kImage2OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space2 != nullptr);
ASSERT_FALSE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
@@ -286,10 +286,10 @@
image_reservation = reservation.TakeReservedMemory(kImage3Size);
ASSERT_TRUE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
- kImage3OatSize,
- &image_reservation,
- &reservation));
+ std::unique_ptr<FakeImageSpace> space3(CreateImageSpace(kImage3Size,
+ kImage3OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space3 != nullptr);
ASSERT_FALSE(image_reservation.IsValid());
ASSERT_FALSE(reservation.IsValid());
@@ -343,10 +343,10 @@
image_reservation = reservation.TakeReservedMemory(kImage4Size);
ASSERT_TRUE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
- kImage4OatSize,
- &image_reservation,
- &reservation));
+ std::unique_ptr<FakeImageSpace> space4(CreateImageSpace(kImage4Size,
+ kImage4OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space4 != nullptr);
ASSERT_FALSE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
@@ -382,10 +382,10 @@
image_reservation = reservation.TakeReservedMemory(kImage5Size);
ASSERT_TRUE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
- kImage5OatSize,
- &image_reservation,
- &reservation));
+ std::unique_ptr<FakeImageSpace> space5(CreateImageSpace(kImage5Size,
+ kImage5OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space5 != nullptr);
ASSERT_FALSE(image_reservation.IsValid());
ASSERT_TRUE(reservation.IsValid());
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c93410e..d191031 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -426,21 +426,18 @@
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
const size_t object_size = obj->SizeOf();
- size_t bytes_allocated, dummy;
+ size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
// Copy it to the to-space.
- mirror::Object* forward_address = to_space_->AllocThreadUnsafe(self_,
- object_size,
- &bytes_allocated,
- nullptr,
- &dummy);
+ mirror::Object* forward_address = to_space_->AllocThreadUnsafe(
+ self_, object_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
}
// If it's still null, attempt to use the fallback space.
if (UNLIKELY(forward_address == nullptr)) {
- forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr, &dummy);
+ forward_address = fallback_space_->AllocThreadUnsafe(
+ self_, object_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
if (bitmap != nullptr) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d1a5014..624e65a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2248,8 +2248,9 @@
if (it == bins_.end()) {
// No available space in the bins, place it in the target space instead (grows the zygote
// space).
- size_t bytes_allocated, dummy;
- forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
+ size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
+ forward_address = to_space_->Alloc(
+ self_, alloc_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
if (to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
} else {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 3da303d..afb42b0 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -769,7 +769,7 @@
Region* current_region_; // The region currently used for allocation.
Region* evac_region_; // The region currently used for evacuation.
- Region full_region_; // The dummy/sentinel region that looks full.
+ Region full_region_; // The fake/sentinel region that looks full.
// Index into the region array pointing to the starting region when
// trying to allocate a new region. Only used when
diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc
index d3db679..4849d6c 100644
--- a/runtime/gc/space/space_create_test.cc
+++ b/runtime/gc/space/space_create_test.cc
@@ -87,7 +87,7 @@
// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
// the GC works with the ZygoteSpace.
TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
- size_t dummy;
+ size_t unused;
MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
@@ -112,7 +112,7 @@
EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
+ mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
@@ -130,11 +130,11 @@
EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr, &dummy);
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr, &dummy);
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
@@ -163,7 +163,7 @@
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
- EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr, &dummy) != nullptr);
+ EXPECT_TRUE(space->Alloc(self, 1U * MB, &unused, nullptr, &unused) != nullptr);
gc::Heap* heap = Runtime::Current()->GetHeap();
space::Space* old_space = space;
@@ -197,7 +197,7 @@
EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
+ ptr2 = Alloc(space, self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
@@ -221,7 +221,7 @@
}
TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
- size_t dummy = 0;
+ size_t unused = 0;
MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
Thread* self = Thread::Current();
@@ -246,7 +246,7 @@
EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
+ mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
@@ -264,11 +264,11 @@
EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
+ mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr, &dummy);
+ mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &unused, nullptr, &unused);
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index c1f15dd..039c3de 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -55,7 +55,7 @@
namespace gc {
namespace collector {
-class DummyOatFile;
+class FakeOatFile;
} // namespace collector
} // namespace gc
@@ -463,7 +463,7 @@
// by the `dex_filenames` parameter, in case the OatFile does not embed the dex code.
std::vector<std::unique_ptr<const DexFile>> external_dex_files_;
- friend class gc::collector::DummyOatFile; // For modifying begin_ and end_.
+ friend class gc::collector::FakeOatFile; // For modifying begin_ and end_.
friend class OatClass;
friend class art::OatDexFile;
friend class OatDumper; // For GetBase and GetLimit