Remove disable card marks, fix SetPatchLocation.
Should slightly improve performance. Added a no thread suspension allowed in patch oat code and
methods.
Added a new function to class linker, GetOatCodeFor which takes in a method reference instead of
pointer.
This fixes the issue where pruned methods were getting re-loaded during code and method patching.
Change-Id: I676bb88cb021b6d2e0db00adbcf1f2f04f82b72a
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 75f0f38..abb6dcf 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -1461,6 +1461,42 @@
return oat_class;
}
+static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint32_t method_idx) {
+ const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+ const DexFile::TypeId& type_id = dex_file.GetTypeId(method_id.class_idx_);
+ const DexFile::ClassDef* class_def = dex_file.FindClassDef(dex_file.GetTypeDescriptor(type_id));
+ CHECK(class_def != NULL);
+ const byte* class_data = dex_file.GetClassData(*class_def);
+ CHECK(class_data != NULL);
+ ClassDataItemIterator it(dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ // Process methods
+ size_t class_def_method_index = 0;
+ while (it.HasNextDirectMethod()) {
+ if (it.GetMemberIndex() == method_idx) {
+ return class_def_method_index;
+ }
+ class_def_method_index++;
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ if (it.GetMemberIndex() == method_idx) {
+ return class_def_method_index;
+ }
+ class_def_method_index++;
+ it.Next();
+ }
+ DCHECK(!it.HasNext());
+ LOG(FATAL) << "Failed to find method index " << method_idx << " in " << dex_file.GetLocation();
+ return 0;
+}
+
const OatFile::OatMethod ClassLinker::GetOatMethodFor(const AbstractMethod* method) {
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
@@ -1487,6 +1523,10 @@
ClassHelper kh(declaring_class);
UniquePtr<const OatFile::OatClass> oat_class(GetOatClass(kh.GetDexFile(), kh.GetDescriptor()));
CHECK(oat_class.get() != NULL);
+ DCHECK_EQ(oat_method_index,
+ GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
+ method->GetDexMethodIndex()));
+
return oat_class->GetOatMethod(oat_method_index);
}
@@ -1496,6 +1536,15 @@
return GetOatMethodFor(method).GetCode();
}
+const void* ClassLinker::GetOatCodeFor(const DexFile& dex_file, uint32_t method_idx) {
+ const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+ const char* descriptor = dex_file.GetTypeDescriptor(dex_file.GetTypeId(method_id.class_idx_));
+ uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, method_idx);
+ UniquePtr<const OatFile::OatClass> oat_class(GetOatClass(dex_file, descriptor));
+ CHECK(oat_class.get() != NULL);
+ return oat_class->GetOatMethod(oat_method_idx).GetCode();
+}
+
void ClassLinker::FixupStaticTrampolines(Class* klass) {
ClassHelper kh(klass);
const DexFile::ClassDef* dex_class_def = kh.GetClassDef();
diff --git a/src/class_linker.h b/src/class_linker.h
index f1530f3..14c719e 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -372,6 +372,9 @@
const void* GetOatCodeFor(const AbstractMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Get the oat code for a method from a method index.
+ const void* GetOatCodeFor(const DexFile& dex_file, uint32_t method_idx);
+
// Relocate the OatFiles (ELF images)
void RelocateExecutable() LOCKS_EXCLUDED(dex_lock_);
diff --git a/src/heap.cc b/src/heap.cc
index 33f8366..bfc5256 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -136,7 +136,6 @@
card_table_(NULL),
concurrent_gc_(concurrent_gc),
have_zygote_space_(false),
- card_marking_disabled_(false),
is_gc_running_(false),
last_gc_type_(kGcTypeNone),
enforce_heap_growth_rate_(false),
diff --git a/src/heap.h b/src/heap.h
index 984a329..8ed5881 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -230,28 +230,19 @@
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) {
- if (!card_marking_disabled_) {
- card_table_->MarkCard(dst);
- }
+ card_table_->MarkCard(dst);
}
// Write barrier for array operations that update many field positions
void WriteBarrierArray(const Object* dst, int /*start_offset*/,
size_t /*length TODO: element_count or byte_count?*/) {
- if (UNLIKELY(!card_marking_disabled_)) {
- card_table_->MarkCard(dst);
- }
+ card_table_->MarkCard(dst);
}
CardTable* GetCardTable() {
return card_table_.get();
}
- void DisableCardMarking() {
- // TODO: we shouldn't need to disable card marking, this is here to help the image_writer
- card_marking_disabled_ = true;
- }
-
void AddFinalizerReference(Thread* self, Object* object);
size_t GetBytesAllocated() const;
@@ -412,10 +403,6 @@
// If we have a zygote space.
bool have_zygote_space_;
- // Used by the image writer to disable card marking on copied objects
- // TODO: remove
- bool card_marking_disabled_;
-
// Guards access to the state of GC, associated conditional variable is used to signal when a GC
// completes.
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/src/image_writer.cc b/src/image_writer.cc
index 497ca22..642cee6 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -99,14 +99,11 @@
CheckNonImageClassesRemoved();
}
#endif
- heap->DisableCardMarking();
- {
- Thread::Current()->TransitionFromSuspendedToRunnable();
- CalculateNewObjectOffsets();
- CopyAndFixupObjects();
- PatchOatCodeAndMethods(compiler);
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
- }
+ Thread::Current()->TransitionFromSuspendedToRunnable();
+ CalculateNewObjectOffsets();
+ CopyAndFixupObjects();
+ PatchOatCodeAndMethods(compiler);
+ Thread::Current()->TransitionFromRunnableToSuspended(kNative);
UniquePtr<File> file(OS::OpenFile(image_filename.c_str(), true));
if (file.get() == NULL) {
@@ -192,7 +189,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: Check image spaces only?
Heap* heap = Runtime::Current()->GetHeap();
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
heap->FlushAllocStack();
heap->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this);
}
@@ -397,11 +394,8 @@
image_end_ += RoundUp(sizeof(ImageHeader), 8); // 64-bit-alignment
{
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap->FlushAllocStack();
- }
-
- {
// TODO: Image spaces only?
// TODO: Add InOrderWalk to heap bitmap.
const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
@@ -434,7 +428,7 @@
// TODO: heap validation can't handle this fix up pass
heap->DisableObjectValidation();
// TODO: Image spaces only?
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap->FlushAllocStack();
heap->GetLiveBitmap()->Walk(CopyAndFixupObjectsCallback, this);
self->EndAssertNoThreadSuspension(old_cause);
@@ -553,7 +547,7 @@
void ImageWriter::FixupObjectArray(const ObjectArray<Object>* orig, ObjectArray<Object>* copy) {
for (int32_t i = 0; i < orig->GetLength(); ++i) {
const Object* element = orig->Get(i);
- copy->SetWithoutChecks(i, GetImageAddress(element));
+ copy->SetPtrWithoutChecks(i, GetImageAddress(element));
}
}
@@ -587,7 +581,8 @@
size_t right_shift = CLZ(ref_offsets);
MemberOffset byte_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
const Object* ref = orig->GetFieldObject<const Object*>(byte_offset, false);
- copy->SetFieldObject(byte_offset, GetImageAddress(ref), false);
+ // Use SetFieldPtr to avoid card marking since we are writing to the image.
+ copy->SetFieldPtr(byte_offset, GetImageAddress(ref), false);
ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
}
} else {
@@ -607,34 +602,13 @@
: klass->GetInstanceField(i));
MemberOffset field_offset = field->GetOffset();
const Object* ref = orig->GetFieldObject<const Object*>(field_offset, false);
- copy->SetFieldObject(field_offset, GetImageAddress(ref), false);
+ // Use SetFieldPtr to avoid card marking since we are writing to the image.
+ copy->SetFieldPtr(field_offset, GetImageAddress(ref), false);
}
}
}
}
-static AbstractMethod* GetReferrerMethod(const Compiler::PatchInformation* patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile());
- AbstractMethod* method = class_linker->ResolveMethod(patch->GetDexFile(),
- patch->GetReferrerMethodIdx(),
- dex_cache,
- NULL,
- NULL,
- patch->GetReferrerInvokeType());
- CHECK(method != NULL)
- << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx();
- CHECK(!method->IsRuntimeMethod())
- << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx();
- CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetReferrerMethodIdx()) == method)
- << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
- << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetReferrerMethodIdx())) << " "
- << PrettyMethod(method);
- return method;
-}
-
static AbstractMethod* GetTargetMethod(const Compiler::PatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -657,9 +631,12 @@
}
void ImageWriter::PatchOatCodeAndMethods(const Compiler& compiler) {
+ Thread* self = Thread::Current();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
- const std::vector<const Compiler::PatchInformation*>& code_to_patch = compiler.GetCodeToPatch();
+ typedef std::vector<const Compiler::PatchInformation*> Patches;
+ const Patches& code_to_patch = compiler.GetCodeToPatch();
for (size_t i = 0; i < code_to_patch.size(); i++) {
const Compiler::PatchInformation* patch = code_to_patch[i];
AbstractMethod* target = GetTargetMethod(patch);
@@ -669,8 +646,7 @@
SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetOatAddress(code_offset)));
}
- const std::vector<const Compiler::PatchInformation*>& methods_to_patch
- = compiler.GetMethodsToPatch();
+ const Patches& methods_to_patch = compiler.GetMethodsToPatch();
for (size_t i = 0; i < methods_to_patch.size(); i++) {
const Compiler::PatchInformation* patch = methods_to_patch[i];
AbstractMethod* target = GetTargetMethod(patch);
@@ -680,16 +656,16 @@
// Update the image header with the new checksum after patching
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum());
+ self->EndAssertNoThreadSuspension(old_cause);
}
void ImageWriter::SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- AbstractMethod* method = GetReferrerMethod(patch);
- // Goodbye const, we are about to modify some code.
- void* code = const_cast<void*>(class_linker->GetOatCodeFor(method));
+ const void* oat_code = class_linker->GetOatCodeFor(patch->GetDexFile(),
+ patch->GetReferrerMethodIdx());
OatHeader& oat_header = const_cast<OatHeader&>(oat_file_->GetOatHeader());
// TODO: make this Thumb2 specific
- uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uint32_t>(code) & ~0x1);
+ uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uint32_t>(oat_code) & ~0x1);
uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset());
#ifndef NDEBUG
const DexFile::MethodId& id = patch->GetDexFile().GetMethodId(patch->GetTargetMethodIdx());
diff --git a/src/object.h b/src/object.h
index 61fa335..87f132e 100644
--- a/src/object.h
+++ b/src/object.h
@@ -1142,6 +1142,10 @@
// circumstances, such as during boot image writing
void SetWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Set element without bound and element type checks, to be used in limited circumstances, such
+ // as during boot image writing. Does not do write barrier.
+ void SetPtrWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void Copy(const ObjectArray<T>* src, int src_pos,
@@ -2262,6 +2266,13 @@
}
template<class T>
+void ObjectArray<T>::SetPtrWithoutChecks(int32_t i, T* object) {
+ DCHECK(IsValidIndex(i));
+ MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*));
+ SetFieldPtr(data_offset, object, false);
+}
+
+template<class T>
T* ObjectArray<T>::GetWithoutChecks(int32_t i) const {
DCHECK(IsValidIndex(i));
MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*));