diff options
Diffstat (limited to 'runtime')
44 files changed, 1003 insertions, 344 deletions
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h index 19ad302c9d..32283d0a0a 100644 --- a/runtime/base/array_slice.h +++ b/runtime/base/array_slice.h @@ -129,6 +129,10 @@ class ArraySlice { return element_size_; } + bool Contains(const T* element) const { + return &AtUnchecked(0) <= element && element < &AtUnchecked(size_); + } + private: T& AtUnchecked(size_t index) { return *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array_) + index * element_size_); diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 212e5bd922..df8a3692aa 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -193,6 +193,8 @@ LogMessage::LogMessage(const char* file, unsigned int line, LogSeverity severity } } LogMessage::~LogMessage() { + std::string msg; + if (!PrintDirectly(data_->GetSeverity()) && data_->GetSeverity() != LogSeverity::NONE) { if (data_->GetSeverity() < gMinimumLogSeverity) { return; // No need to format something we're not going to output. @@ -202,7 +204,7 @@ LogMessage::~LogMessage() { if (data_->GetError() != -1) { data_->GetBuffer() << ": " << strerror(data_->GetError()); } - std::string msg(data_->ToString()); + msg = data_->ToString(); // Do the actual logging with the lock held. { @@ -216,6 +218,8 @@ LogMessage::~LogMessage() { size_t nl = msg.find('\n', i); msg[nl] = '\0'; LogLine(data_->GetFile(), data_->GetLineNumber(), data_->GetSeverity(), &msg[i]); + // Undo zero-termination, so we retain the complete message. + msg[nl] = '\n'; i = nl + 1; } } @@ -224,7 +228,7 @@ LogMessage::~LogMessage() { // Abort if necessary. if (data_->GetSeverity() == FATAL) { - Runtime::Abort(); + Runtime::Abort(msg.c_str()); } } diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 3d7624d979..a4e05bd5b7 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -88,7 +88,6 @@ enum LockLevel { kTracingUniqueMethodsLock, kTracingStreamingLock, kDeoptimizedMethodsLock, - kJitCodeCacheLock, kClassLoaderClassesLock, kDefaultMutexLevel, kMarkSweepLargeObjectLock, @@ -99,6 +98,7 @@ enum LockLevel { kMonitorPoolLock, kMethodVerifiersLock, kClassLinkerClassesLock, // TODO rename. + kJitCodeCacheLock, kBreakpointLock, kMonitorLock, kMonitorListLock, diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 1914733b7c..a34e029089 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -2179,20 +2179,37 @@ mirror::Class* ClassLinker::EnsureResolved(Thread* self, } // Wait for the class if it has not already been linked. - if (!klass->IsResolved() && !klass->IsErroneous()) { + size_t index = 0; + // Maximum number of yield iterations until we start sleeping. + static const size_t kNumYieldIterations = 1000; + // How long each sleep is in us. + static const size_t kSleepDurationUS = 1000; // 1 ms. + while (!klass->IsResolved() && !klass->IsErroneous()) { StackHandleScope<1> hs(self); HandleWrapper<mirror::Class> h_class(hs.NewHandleWrapper(&klass)); - ObjectLock<mirror::Class> lock(self, h_class); - // Check for circular dependencies between classes. - if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) { - ThrowClassCircularityError(h_class.Get()); - mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self); - return nullptr; + { + ObjectTryLock<mirror::Class> lock(self, h_class); + // Can not use a monitor wait here since it may block when returning and deadlock if another + // thread has locked klass. + if (lock.Acquired()) { + // Check for circular dependencies between classes, the lock is required for SetStatus. + if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) { + ThrowClassCircularityError(h_class.Get()); + mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self); + return nullptr; + } + } } - // Wait for the pending initialization to complete. - while (!h_class->IsResolved() && !h_class->IsErroneous()) { - lock.WaitIgnoringInterrupts(); + { + // Handle wrapper deals with klass moving. + ScopedThreadSuspension sts(self, kSuspended); + if (index < kNumYieldIterations) { + sched_yield(); + } else { + usleep(kSleepDurationUS); + } } + ++index; } if (klass->IsErroneous()) { @@ -3546,32 +3563,40 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k } LOG(INFO) << "Loaded class " << descriptor << source; } - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - mirror::ClassLoader* const class_loader = klass->GetClassLoader(); - ClassTable* const class_table = InsertClassTableForClassLoader(class_loader); - mirror::Class* existing = class_table->Lookup(descriptor, hash); - if (existing != nullptr) { - return existing; - } - if (kIsDebugBuild && - !klass->IsTemp() && - class_loader == nullptr && - dex_cache_boot_image_class_lookup_required_) { - // Check a class loaded with the system class loader matches one in the image if the class - // is in the image. - existing = LookupClassFromBootImage(descriptor); + { + WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + mirror::ClassLoader* const class_loader = klass->GetClassLoader(); + ClassTable* const class_table = InsertClassTableForClassLoader(class_loader); + mirror::Class* existing = class_table->Lookup(descriptor, hash); if (existing != nullptr) { - CHECK_EQ(klass, existing); + return existing; + } + if (kIsDebugBuild && + !klass->IsTemp() && + class_loader == nullptr && + dex_cache_boot_image_class_lookup_required_) { + // Check a class loaded with the system class loader matches one in the image if the class + // is in the image. + existing = LookupClassFromBootImage(descriptor); + if (existing != nullptr) { + CHECK_EQ(klass, existing); + } + } + VerifyObject(klass); + class_table->InsertWithHash(klass, hash); + if (class_loader != nullptr) { + // This is necessary because we need to have the card dirtied for remembered sets. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader); + } + if (log_new_class_table_roots_) { + new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); } } - VerifyObject(klass); - class_table->InsertWithHash(klass, hash); - if (class_loader != nullptr) { - // This is necessary because we need to have the card dirtied for remembered sets. - Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader); - } - if (log_new_class_table_roots_) { - new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); + if (kIsDebugBuild) { + // Test that copied methods correctly can find their holder. + for (ArtMethod& method : klass->GetCopiedMethods(image_pointer_size_)) { + CHECK_EQ(GetHoldingClassOfCopiedMethod(&method), klass); + } } return nullptr; } @@ -6514,39 +6539,90 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class> return true; } -// Finds the method with a name/signature that matches cmp in the given list of methods. The list of -// methods must be unique. +// Finds the method with a name/signature that matches cmp in the given lists of methods. The list +// of methods must be unique. +static ArtMethod* FindSameNameAndSignature(MethodNameAndSignatureComparator& cmp ATTRIBUTE_UNUSED) { + return nullptr; +} + +template <typename ... Types> static ArtMethod* FindSameNameAndSignature(MethodNameAndSignatureComparator& cmp, - const ScopedArenaVector<ArtMethod*>& list) + const ScopedArenaVector<ArtMethod*>& list, + const Types& ... rest) SHARED_REQUIRES(Locks::mutator_lock_) { for (ArtMethod* method : list) { if (cmp.HasSameNameAndSignature(method)) { return method; } } - return nullptr; + return FindSameNameAndSignature(cmp, rest...); } -static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size) +// Check that all vtable entries are present in this class's virtuals or are the same as a +// superclasses vtable entry. +static void CheckClassOwnsVTableEntries(Thread* self, + Handle<mirror::Class> klass, + size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) { - mirror::PointerArray* check_vtable = klass->GetVTableDuringLinking(); - mirror::Class* superclass = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr; - int32_t super_vtable_length = (superclass != nullptr) ? superclass->GetVTableLength() : 0; + StackHandleScope<2> hs(self); + Handle<mirror::PointerArray> check_vtable(hs.NewHandle(klass->GetVTableDuringLinking())); + mirror::Class* super_temp = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr; + Handle<mirror::Class> superclass(hs.NewHandle(super_temp)); + int32_t super_vtable_length = (superclass.Get() != nullptr) ? superclass->GetVTableLength() : 0; for (int32_t i = 0; i < check_vtable->GetLength(); ++i) { ArtMethod* m = check_vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size); CHECK(m != nullptr); + CHECK_EQ(m->GetMethodIndexDuringLinking(), i) + << PrettyMethod(m) << " has an unexpected method index for its spot in the vtable for class" + << PrettyClass(klass.Get()); ArraySlice<ArtMethod> virtuals = klass->GetVirtualMethodsSliceUnchecked(pointer_size); auto is_same_method = [m] (const ArtMethod& meth) { return &meth == m; }; CHECK((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) || std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end()) - << "While linking class '" << PrettyClass(klass.Get()) << "' unable to find owning class " - << "of '" << PrettyMethod(m) << "' (vtable index: " << i << ")."; + << PrettyMethod(m) << " does not seem to be owned by current class " + << PrettyClass(klass.Get()) << " or any of its superclasses!"; } } +// Check to make sure the vtable does not have duplicates. Duplicates could cause problems when a +// method is overridden in a subclass. +static void CheckVTableHasNoDuplicates(Thread* self, + Handle<mirror::Class> klass, + size_t pointer_size) + SHARED_REQUIRES(Locks::mutator_lock_) { + StackHandleScope<1> hs(self); + Handle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking())); + int32_t num_entries = vtable->GetLength(); + for (int32_t i = 0; i < num_entries; i++) { + ArtMethod* vtable_entry = vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size); + // Don't bother if we cannot 'see' the vtable entry (i.e. it is a package-private member maybe). + if (!klass->CanAccessMember(vtable_entry->GetDeclaringClass(), + vtable_entry->GetAccessFlags())) { + continue; + } + MethodNameAndSignatureComparator name_comparator( + vtable_entry->GetInterfaceMethodIfProxy(pointer_size)); + for (int32_t j = i+1; j < num_entries; j++) { + ArtMethod* other_entry = vtable->GetElementPtrSize<ArtMethod*>(j, pointer_size); + CHECK(vtable_entry != other_entry && + !name_comparator.HasSameNameAndSignature( + other_entry->GetInterfaceMethodIfProxy(pointer_size))) + << "vtable entries " << i << " and " << j << " are identical for " + << PrettyClass(klass.Get()) << " in method " << PrettyMethod(vtable_entry) << " and " + << PrettyMethod(other_entry); + } + } +} + +static void SanityCheckVTable(Thread* self, Handle<mirror::Class> klass, size_t pointer_size) + SHARED_REQUIRES(Locks::mutator_lock_) { + CheckClassOwnsVTableEntries(self, klass, pointer_size); + CheckVTableHasNoDuplicates(self, klass, pointer_size); +} + void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass, ArtMethod* unimplemented_method, ArtMethod* imt_conflict_method, @@ -6605,8 +6681,10 @@ bool ClassLinker::LinkInterfaceMethods( ScopedArenaAllocator allocator(&stack); ScopedArenaVector<ArtMethod*> default_conflict_methods(allocator.Adapter()); + ScopedArenaVector<ArtMethod*> overriding_default_conflict_methods(allocator.Adapter()); ScopedArenaVector<ArtMethod*> miranda_methods(allocator.Adapter()); ScopedArenaVector<ArtMethod*> default_methods(allocator.Adapter()); + ScopedArenaVector<ArtMethod*> overriding_default_methods(allocator.Adapter()); MutableHandle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking())); ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod(); @@ -6808,8 +6886,10 @@ bool ClassLinker::LinkInterfaceMethods( default_conflict_method = vtable_impl; } else { // See if we already have a conflict method for this method. - ArtMethod* preexisting_conflict = FindSameNameAndSignature(interface_name_comparator, - default_conflict_methods); + ArtMethod* preexisting_conflict = FindSameNameAndSignature( + interface_name_comparator, + default_conflict_methods, + overriding_default_conflict_methods); if (LIKELY(preexisting_conflict != nullptr)) { // We already have another conflict we can reuse. default_conflict_method = preexisting_conflict; @@ -6820,7 +6900,13 @@ bool ClassLinker::LinkInterfaceMethods( default_conflict_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size)); new(default_conflict_method) ArtMethod(interface_method, image_pointer_size_); - default_conflict_methods.push_back(default_conflict_method); + if (vtable_impl == nullptr) { + // Save the conflict method. We need to add it to the vtable. + default_conflict_methods.push_back(default_conflict_method); + } else { + // Save the conflict method but it is already in the vtable. + overriding_default_conflict_methods.push_back(default_conflict_method); + } } } current_method = default_conflict_method; @@ -6840,11 +6926,18 @@ bool ClassLinker::LinkInterfaceMethods( // TODO It might be worthwhile to copy default methods on interfaces anyway since it // would make lookup for interface super much faster. (We would only need to scan // the iftable to find if there is a NSME or AME.) - ArtMethod* old = FindSameNameAndSignature(interface_name_comparator, default_methods); + ArtMethod* old = FindSameNameAndSignature(interface_name_comparator, + default_methods, + overriding_default_methods); if (old == nullptr) { // We found a default method implementation and there were no conflicts. - // Save the default method. We need to add it to the vtable. - default_methods.push_back(current_method); + if (vtable_impl == nullptr) { + // Save the default method. We need to add it to the vtable. + default_methods.push_back(current_method); + } else { + // Save the default method but it is already in the vtable. + overriding_default_methods.push_back(current_method); + } } else { CHECK(old == current_method) << "Multiple default implementations selected!"; } @@ -6899,6 +6992,8 @@ bool ClassLinker::LinkInterfaceMethods( } // For each interface. const bool has_new_virtuals = !(miranda_methods.empty() && default_methods.empty() && + overriding_default_methods.empty() && + overriding_default_conflict_methods.empty() && default_conflict_methods.empty()); // TODO don't extend virtuals of interface unless necessary (when is it?). if (has_new_virtuals) { @@ -6906,11 +7001,16 @@ bool ClassLinker::LinkInterfaceMethods( << "Interfaces should only have default-conflict methods appended to them."; VLOG(class_linker) << PrettyClass(klass.Get()) << ": miranda_methods=" << miranda_methods.size() << " default_methods=" << default_methods.size() - << " default_conflict_methods=" << default_conflict_methods.size(); + << " overriding_default_methods=" << overriding_default_methods.size() + << " default_conflict_methods=" << default_conflict_methods.size() + << " overriding_default_conflict_methods=" + << overriding_default_conflict_methods.size(); const size_t old_method_count = klass->NumMethods(); const size_t new_method_count = old_method_count + miranda_methods.size() + default_methods.size() + + overriding_default_conflict_methods.size() + + overriding_default_methods.size() + default_conflict_methods.size(); // Attempt to realloc to save RAM if possible. LengthPrefixedArray<ArtMethod>* old_methods = klass->GetMethodsPtr(); @@ -6965,36 +7065,42 @@ bool ClassLinker::LinkInterfaceMethods( // interface but will have different ArtMethod*s for them. This also means we cannot compare a // default method found on a class with one found on the declaring interface directly and must // look at the declaring class to determine if they are the same. - for (ArtMethod* def_method : default_methods) { - ArtMethod& new_method = *out; - new_method.CopyFrom(def_method, image_pointer_size_); - // Clear the kAccSkipAccessChecks flag if it is present. Since this class hasn't been verified - // yet it shouldn't have methods that are skipping access checks. - // TODO This is rather arbitrary. We should maybe support classes where only some of its - // methods are skip_access_checks. - constexpr uint32_t kSetFlags = kAccDefault | kAccCopied; - constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks; - new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags); - move_table.emplace(def_method, &new_method); - ++out; + for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_methods, + overriding_default_methods}) { + for (ArtMethod* def_method : methods_vec) { + ArtMethod& new_method = *out; + new_method.CopyFrom(def_method, image_pointer_size_); + // Clear the kAccSkipAccessChecks flag if it is present. Since this class hasn't been + // verified yet it shouldn't have methods that are skipping access checks. + // TODO This is rather arbitrary. We should maybe support classes where only some of its + // methods are skip_access_checks. + constexpr uint32_t kSetFlags = kAccDefault | kAccCopied; + constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks; + new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags); + move_table.emplace(def_method, &new_method); + ++out; + } } - for (ArtMethod* conf_method : default_conflict_methods) { - ArtMethod& new_method = *out; - new_method.CopyFrom(conf_method, image_pointer_size_); - // This is a type of default method (there are default method impls, just a conflict) so mark - // this as a default, non-abstract method, since thats what it is. Also clear the - // kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have - // methods that are skipping access checks. - constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied; - constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks); - new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags); - DCHECK(new_method.IsDefaultConflicting()); - // The actual method might or might not be marked abstract since we just copied it from a - // (possibly default) interface method. We need to set it entry point to be the bridge so that - // the compiler will not invoke the implementation of whatever method we copied from. - EnsureThrowsInvocationError(&new_method); - move_table.emplace(conf_method, &new_method); - ++out; + for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_conflict_methods, + overriding_default_conflict_methods}) { + for (ArtMethod* conf_method : methods_vec) { + ArtMethod& new_method = *out; + new_method.CopyFrom(conf_method, image_pointer_size_); + // This is a type of default method (there are default method impls, just a conflict) so + // mark this as a default, non-abstract method, since thats what it is. Also clear the + // kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have + // methods that are skipping access checks. + constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied; + constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks); + new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags); + DCHECK(new_method.IsDefaultConflicting()); + // The actual method might or might not be marked abstract since we just copied it from a + // (possibly default) interface method. We need to set it entry point to be the bridge so + // that the compiler will not invoke the implementation of whatever method we copied from. + EnsureThrowsInvocationError(&new_method); + move_table.emplace(conf_method, &new_method); + ++out; + } } methods->SetSize(new_method_count); UpdateClassMethods(klass.Get(), methods); @@ -7010,22 +7116,31 @@ bool ClassLinker::LinkInterfaceMethods( miranda_methods.size() + default_methods.size() + default_conflict_methods.size(); + vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count))); if (UNLIKELY(vtable.Get() == nullptr)) { self->AssertPendingOOMException(); return false; } - out = methods->begin(method_size, method_alignment) + old_method_count; size_t vtable_pos = old_vtable_count; // Update all the newly copied method's indexes so they denote their placement in the vtable. - for (size_t i = old_method_count; i < new_method_count; ++i) { - // Leave the declaring class alone the method's dex_code_item_offset_ and dex_method_index_ - // fields are references into the dex file the method was defined in. Since the ArtMethod - // does not store that information it uses declaring_class_->dex_cache_. - out->SetMethodIndex(0xFFFF & vtable_pos); - vtable->SetElementPtrSize(vtable_pos, &*out, image_pointer_size_); - ++out; - ++vtable_pos; + for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_methods, + default_conflict_methods, + miranda_methods}) { + // These are the functions that are not already in the vtable! + for (ArtMethod* new_method : methods_vec) { + auto translated_method_it = move_table.find(new_method); + CHECK(translated_method_it != move_table.end()) + << "We must have a translation for methods added to the classes methods_ array! We " + << "could not find the ArtMethod added for " << PrettyMethod(new_method); + ArtMethod* new_vtable_method = translated_method_it->second; + // Leave the declaring class alone the method's dex_code_item_offset_ and dex_method_index_ + // fields are references into the dex file the method was defined in. Since the ArtMethod + // does not store that information it uses declaring_class_->dex_cache_. + new_vtable_method->SetMethodIndex(0xFFFF & vtable_pos); + vtable->SetElementPtrSize(vtable_pos, new_vtable_method, image_pointer_size_); + ++vtable_pos; + } } CHECK_EQ(vtable_pos, new_vtable_count); // Update old vtable methods. We use the default_translations map to figure out what each @@ -7041,8 +7156,10 @@ bool ClassLinker::LinkInterfaceMethods( // Find which conflict method we are to use for this method. MethodNameAndSignatureComparator old_method_comparator( translated_method->GetInterfaceMethodIfProxy(image_pointer_size_)); - ArtMethod* new_conflict_method = FindSameNameAndSignature(old_method_comparator, - default_conflict_methods); + // We only need to look through overriding_default_conflict_methods since this is an + // overridden method we are fixing up here. + ArtMethod* new_conflict_method = FindSameNameAndSignature( + old_method_comparator, overriding_default_conflict_methods); CHECK(new_conflict_method != nullptr) << "Expected a conflict method!"; translated_method = new_conflict_method; } else if (translation_it->second.IsAbstract()) { @@ -7050,7 +7167,7 @@ bool ClassLinker::LinkInterfaceMethods( MethodNameAndSignatureComparator old_method_comparator( translated_method->GetInterfaceMethodIfProxy(image_pointer_size_)); ArtMethod* miranda_method = FindSameNameAndSignature(old_method_comparator, - miranda_methods); + miranda_methods); DCHECK(miranda_method != nullptr); translated_method = miranda_method; } else { @@ -7065,6 +7182,14 @@ bool ClassLinker::LinkInterfaceMethods( if (it != move_table.end()) { auto* new_method = it->second; DCHECK(new_method != nullptr); + // Make sure the new_methods index is set. + if (new_method->GetMethodIndexDuringLinking() != i) { + DCHECK_LE(reinterpret_cast<uintptr_t>(&*methods->begin(method_size, method_alignment)), + reinterpret_cast<uintptr_t>(new_method)); + DCHECK_LT(reinterpret_cast<uintptr_t>(new_method), + reinterpret_cast<uintptr_t>(&*methods->end(method_size, method_alignment))); + new_method->SetMethodIndex(0xFFFF & i); + } vtable->SetElementPtrSize(i, new_method, image_pointer_size_); } else { // If it was not going to be updated we wouldn't have put it into the default_translations @@ -7128,7 +7253,7 @@ bool ClassLinker::LinkInterfaceMethods( self->EndAssertNoThreadSuspension(old_cause); } if (kIsDebugBuild && !is_interface) { - SanityCheckVTable(klass, image_pointer_size_); + SanityCheckVTable(self, klass, image_pointer_size_); } return true; } @@ -8119,19 +8244,27 @@ void ClassLinker::InsertDexFileInToClassLoader(mirror::Object* dex_file, void ClassLinker::CleanupClassLoaders() { Thread* const self = Thread::Current(); - WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); - for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) { - const ClassLoaderData& data = *it; - // Need to use DecodeJObject so that we get null for cleared JNI weak globals. - auto* const class_loader = down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root)); - if (class_loader != nullptr) { - ++it; - } else { - VLOG(class_linker) << "Freeing class loader"; - DeleteClassLoader(self, data); - it = class_loaders_.erase(it); + std::vector<ClassLoaderData> to_delete; + // Do the delete outside the lock to avoid lock violation in jit code cache. + { + WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); + for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) { + const ClassLoaderData& data = *it; + // Need to use DecodeJObject so that we get null for cleared JNI weak globals. + auto* const class_loader = + down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root)); + if (class_loader != nullptr) { + ++it; + } else { + VLOG(class_linker) << "Freeing class loader"; + to_delete.push_back(data); + it = class_loaders_.erase(it); + } } } + for (ClassLoaderData& data : to_delete) { + DeleteClassLoader(self, data); + } } std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) { @@ -8250,6 +8383,33 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys( return ret; } +class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor { + public: + FindVirtualMethodHolderVisitor(const ArtMethod* method, size_t pointer_size) + : method_(method), + pointer_size_(pointer_size) {} + + bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE { + if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) { + holder_ = klass; + } + // Return false to stop searching if holder_ is not null. + return holder_ == nullptr; + } + + mirror::Class* holder_ = nullptr; + const ArtMethod* const method_; + const size_t pointer_size_; +}; + +mirror::Class* ClassLinker::GetHoldingClassOfCopiedMethod(ArtMethod* method) { + ScopedTrace trace(__FUNCTION__); // Since this function is slow, have a trace to notify people. + CHECK(method->IsCopied()); + FindVirtualMethodHolderVisitor visitor(method, image_pointer_size_); + VisitClasses(&visitor); + return visitor.holder_; +} + // Instantiate ResolveMethod. template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>( const DexFile& dex_file, diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 4305dc6560..8aceffbdcc 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -647,6 +647,10 @@ class ClassLinker { SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); + // Get the actual holding class for a copied method. Pretty slow, don't call often. + mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method) + SHARED_REQUIRES(Locks::mutator_lock_); + struct DexCacheData { // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may // not work properly. @@ -675,7 +679,6 @@ class ClassLinker { SHARED_REQUIRES(Locks::mutator_lock_); static void DeleteClassLoader(Thread* self, const ClassLoaderData& data) - REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void VisitClassLoaders(ClassLoaderVisitor* visitor) const @@ -1167,6 +1170,8 @@ class ClassLinker { // Image pointer size. size_t image_pointer_size_; + class FindVirtualMethodHolderVisitor; + friend struct CompilationHelper; // For Compile in ImageTest. friend class ImageDumper; // for DexLock friend class ImageWriter; // for GetClassRoots friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub diff --git a/runtime/class_table.cc b/runtime/class_table.cc index e9154cb400..909511c018 100644 --- a/runtime/class_table.cc +++ b/runtime/class_table.cc @@ -107,6 +107,10 @@ void ClassTable::Insert(mirror::Class* klass) { classes_.back().Insert(GcRoot<mirror::Class>(klass)); } +void ClassTable::InsertWithoutLocks(mirror::Class* klass) { + classes_.back().Insert(GcRoot<mirror::Class>(klass)); +} + void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) { WriterMutexLock mu(Thread::Current(), lock_); classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash); diff --git a/runtime/class_table.h b/runtime/class_table.h index 6fb420605c..e3fc217cc8 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -163,6 +163,8 @@ class ClassTable { } private: + void InsertWithoutLocks(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS; + // Lock to guard inserting and removing. mutable ReaderWriterMutex lock_; // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot. @@ -171,6 +173,8 @@ class ClassTable { // loader which may not be owned by the class loader must be held strongly live. Also dex caches // are held live to prevent them being unloading once they have classes in them. std::vector<GcRoot<mirror::Object>> strong_roots_ GUARDED_BY(lock_); + + friend class ImageWriter; // for InsertWithoutLocks. }; } // namespace art diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index 3a1b650837..27e54b4410 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -76,9 +76,10 @@ ScratchFile::ScratchFile() { file_.reset(new File(fd, GetFilename(), true)); } -ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) { - filename_ = other.GetFilename(); - filename_ += suffix; +ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) + : ScratchFile(other.GetFilename() + suffix) {} + +ScratchFile::ScratchFile(const std::string& filename) : filename_(filename) { int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666); CHECK_NE(-1, fd); file_.reset(new File(fd, GetFilename(), true)); @@ -90,6 +91,18 @@ ScratchFile::ScratchFile(File* file) { file_.reset(file); } +ScratchFile::ScratchFile(ScratchFile&& other) { + *this = std::move(other); +} + +ScratchFile& ScratchFile::operator=(ScratchFile&& other) { + if (GetFile() != other.GetFile()) { + std::swap(filename_, other.filename_); + std::swap(file_, other.file_); + } + return *this; +} + ScratchFile::~ScratchFile() { Unlink(); } diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index b68eb19f2a..e290928334 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -40,8 +40,14 @@ class ScratchFile { public: ScratchFile(); + explicit ScratchFile(const std::string& filename); + ScratchFile(const ScratchFile& other, const char* suffix); + explicit ScratchFile(ScratchFile&& other); + + ScratchFile& operator=(ScratchFile&& other); + explicit ScratchFile(File* file); ~ScratchFile(); @@ -113,8 +119,7 @@ class CommonRuntimeTestImpl { std::string GetTestDexFileName(const char* name) const; - std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name) - SHARED_REQUIRES(Locks::mutator_lock_); + std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name); std::unique_ptr<const DexFile> OpenTestDexFile(const char* name) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 1fb8b407ec..7aa9ac4835 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -1755,22 +1755,32 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje return error; } - mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error); - if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { + Thread* self = Thread::Current(); + StackHandleScope<2> hs(self); + MutableHandle<mirror::Object> + o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error))); + if ((!is_static && o.Get() == nullptr) || error != JDWP::ERR_NONE) { return JDWP::ERR_INVALID_OBJECT; } ArtField* f = FromFieldId(field_id); mirror::Class* receiver_class = c; - if (receiver_class == nullptr && o != nullptr) { + if (receiver_class == nullptr && o.Get() != nullptr) { receiver_class = o->GetClass(); } + // TODO: should we give up now if receiver_class is null? if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) { LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class); return JDWP::ERR_INVALID_FIELDID; } + // Ensure the field's class is initialized. + Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass())); + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) { + LOG(WARNING) << "Not able to initialize class for SetValues: " << PrettyClass(klass.Get()); + } + // The RI only enforces the static/non-static mismatch in one direction. // TODO: should we change the tests and check both? if (is_static) { @@ -1784,10 +1794,10 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje } } if (f->IsStatic()) { - o = f->GetDeclaringClass(); + o.Assign(f->GetDeclaringClass()); } - JValue field_value(GetArtFieldValue(f, o)); + JValue field_value(GetArtFieldValue(f, o.Get())); JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); Dbg::OutputJValue(tag, &field_value, pReply); return JDWP::ERR_NONE; @@ -1877,12 +1887,21 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId uint64_t value, int width, bool is_static) SHARED_REQUIRES(Locks::mutator_lock_) { JDWP::JdwpError error; - mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error); - if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { + Thread* self = Thread::Current(); + StackHandleScope<2> hs(self); + MutableHandle<mirror::Object> + o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error))); + if ((!is_static && o.Get() == nullptr) || error != JDWP::ERR_NONE) { return JDWP::ERR_INVALID_OBJECT; } ArtField* f = FromFieldId(field_id); + // Ensure the field's class is initialized. + Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass())); + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) { + LOG(WARNING) << "Not able to initialize class for SetValues: " << PrettyClass(klass.Get()); + } + // The RI only enforces the static/non-static mismatch in one direction. // TODO: should we change the tests and check both? if (is_static) { @@ -1896,9 +1915,9 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId } } if (f->IsStatic()) { - o = f->GetDeclaringClass(); + o.Assign(f->GetDeclaringClass()); } - return SetArtFieldValue(f, o, value, width); + return SetArtFieldValue(f, o.Get(), value, width); } JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, @@ -4049,7 +4068,7 @@ void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInv // Prepare JDWP ids for the reply. JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty()); const bool is_object_result = (result_tag == JDWP::JT_OBJECT); - StackHandleScope<2> hs(soa.Self()); + StackHandleScope<3> hs(soa.Self()); Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr); Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException()); soa.Self()->ClearException(); @@ -4088,10 +4107,17 @@ void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInv // unless we threw, in which case we return null. DCHECK_EQ(JDWP::JT_VOID, result_tag); if (exceptionObjectId == 0) { - // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the - // object registry. - result_value = GetObjectRegistry()->Add(pReq->receiver.Read()); - result_tag = TagFromObject(soa, pReq->receiver.Read()); + if (m->GetDeclaringClass()->IsStringClass()) { + // For string constructors, the new string is remapped to the receiver (stored in ref). + Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get())); + result_value = gRegistry->Add(decoded_ref); + result_tag = TagFromObject(soa, decoded_ref.Get()); + } else { + // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the + // object registry. + result_value = GetObjectRegistry()->Add(pReq->receiver.Read()); + result_tag = TagFromObject(soa, pReq->receiver.Read()); + } } else { result_value = 0; result_tag = JDWP::JT_OBJECT; diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index 096f003de3..fd05221d82 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -36,8 +36,7 @@ template <typename ElfTypes> ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base) - : file_(file), - writable_(writable), + : writable_(writable), program_header_only_(program_header_only), header_(nullptr), base_address_(nullptr), @@ -74,7 +73,7 @@ ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file, prot = PROT_READ; flags = MAP_PRIVATE; } - if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) { + if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) { return nullptr; } return elf_file.release(); @@ -89,39 +88,44 @@ ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file, std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes> (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false, /*requested_base*/nullptr)); - if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) { + if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) { return nullptr; } return elf_file.release(); } template <typename ElfTypes> -bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string* error_msg) { - int64_t temp_file_length = file_->GetLength(); +bool ElfFileImpl<ElfTypes>::Setup(File* file, + int prot, + int flags, + bool low_4gb, + std::string* error_msg) { + int64_t temp_file_length = file->GetLength(); if (temp_file_length < 0) { errno = -temp_file_length; *error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s", - file_->GetPath().c_str(), file_->Fd(), strerror(errno)); + file->GetPath().c_str(), file->Fd(), strerror(errno)); return false; } size_t file_length = static_cast<size_t>(temp_file_length); if (file_length < sizeof(Elf_Ehdr)) { *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF header of " "%zd bytes: '%s'", file_length, sizeof(Elf_Ehdr), - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (program_header_only_) { // first just map ELF header to get program header size information size_t elf_header_size = sizeof(Elf_Ehdr); - if (!SetMap(MemMap::MapFile(elf_header_size, + if (!SetMap(file, + MemMap::MapFile(elf_header_size, prot, flags, - file_->Fd(), + file->Fd(), 0, low_4gb, - file_->GetPath().c_str(), + file->GetPath().c_str(), error_msg), error_msg)) { return false; @@ -131,16 +135,17 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string if (file_length < program_header_size) { *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF program " "header of %zd bytes: '%s'", file_length, - sizeof(Elf_Ehdr), file_->GetPath().c_str()); + sizeof(Elf_Ehdr), file->GetPath().c_str()); return false; } - if (!SetMap(MemMap::MapFile(program_header_size, + if (!SetMap(file, + MemMap::MapFile(program_header_size, prot, flags, - file_->Fd(), + file->Fd(), 0, low_4gb, - file_->GetPath().c_str(), + file->GetPath().c_str(), error_msg), error_msg)) { *error_msg = StringPrintf("Failed to map ELF program headers: %s", error_msg->c_str()); @@ -148,13 +153,14 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string } } else { // otherwise map entire file - if (!SetMap(MemMap::MapFile(file_->GetLength(), + if (!SetMap(file, + MemMap::MapFile(file->GetLength(), prot, flags, - file_->Fd(), + file->Fd(), 0, low_4gb, - file_->GetPath().c_str(), + file->GetPath().c_str(), error_msg), error_msg)) { *error_msg = StringPrintf("Failed to map ELF file: %s", error_msg->c_str()); @@ -178,7 +184,7 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string Elf_Shdr* shstrtab_section_header = GetSectionNameStringSection(); if (shstrtab_section_header == nullptr) { *error_msg = StringPrintf("Failed to find shstrtab section header in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } @@ -186,7 +192,7 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string dynamic_program_header_ = FindProgamHeaderByType(PT_DYNAMIC); if (dynamic_program_header_ == nullptr) { *error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } @@ -200,7 +206,7 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string Elf_Shdr* section_header = GetSectionHeader(i); if (section_header == nullptr) { *error_msg = StringPrintf("Failed to find section header for section %d in ELF file: '%s'", - i, file_->GetPath().c_str()); + i, file->GetPath().c_str()); return false; } switch (section_header->sh_type) { @@ -245,7 +251,7 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string if (reinterpret_cast<uint8_t*>(dynamic_section_start_) != Begin() + section_header->sh_offset) { LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in " - << file_->GetPath() << ": " << std::hex + << file->GetPath() << ": " << std::hex << reinterpret_cast<void*>(dynamic_section_start_) << " != " << reinterpret_cast<void*>(Begin() + section_header->sh_offset); return false; @@ -263,7 +269,7 @@ bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string } // Check for the existence of some sections. - if (!CheckSectionsExist(error_msg)) { + if (!CheckSectionsExist(file, error_msg)) { return false; } } @@ -283,7 +289,7 @@ bool ElfFileImpl<ElfTypes>::CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg) { if (Begin() + offset >= End()) { *error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label, - file_->GetPath().c_str()); + file_path_.c_str()); return false; } *target = Begin() + offset; @@ -324,11 +330,11 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsLinked(const uint8_t* source, } template <typename ElfTypes> -bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { + bool ElfFileImpl<ElfTypes>::CheckSectionsExist(File* file, std::string* error_msg) const { if (!program_header_only_) { // If in full mode, need section headers. if (section_headers_start_ == nullptr) { - *error_msg = StringPrintf("No section headers in ELF file: '%s'", file_->GetPath().c_str()); + *error_msg = StringPrintf("No section headers in ELF file: '%s'", file->GetPath().c_str()); return false; } } @@ -336,14 +342,14 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { // This is redundant, but defensive. if (dynamic_program_header_ == nullptr) { *error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } // Need a dynamic section. This is redundant, but defensive. if (dynamic_section_start_ == nullptr) { *error_msg = StringPrintf("Failed to find dynamic section in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } @@ -352,7 +358,7 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { if (symtab_section_start_ != nullptr) { // When there's a symtab, there should be a strtab. if (strtab_section_start_ == nullptr) { - *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file_->GetPath().c_str()); + *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file->GetPath().c_str()); return false; } @@ -360,25 +366,25 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(symtab_section_start_), reinterpret_cast<const uint8_t*>(strtab_section_start_))) { *error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } } // We always need a dynstr & dynsym. if (dynstr_section_start_ == nullptr) { - *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file_->GetPath().c_str()); + *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file->GetPath().c_str()); return false; } if (dynsym_section_start_ == nullptr) { - *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file_->GetPath().c_str()); + *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file->GetPath().c_str()); return false; } // Need a hash section for dynamic symbol lookup. if (hash_section_start_ == nullptr) { *error_msg = StringPrintf("Failed to find hash section in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } @@ -386,7 +392,7 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(hash_section_start_), reinterpret_cast<const uint8_t*>(dynsym_section_start_))) { *error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } @@ -397,9 +403,9 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { // It might not be mapped, but we can compare against the file size. int64_t offset = static_cast<int64_t>(GetHeader().e_shoff + (GetHeader().e_shstrndx * GetHeader().e_shentsize)); - if (offset >= file_->GetLength()) { + if (offset >= file->GetLength()) { *error_msg = StringPrintf("Shstrtab is not in the mapped ELF file: '%s'", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } } @@ -408,15 +414,15 @@ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const { } template <typename ElfTypes> -bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) { +bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) { if (map == nullptr) { // MemMap::Open should have already set an error. DCHECK(!error_msg->empty()); return false; } map_.reset(map); - CHECK(map_.get() != nullptr) << file_->GetPath(); - CHECK(map_->Begin() != nullptr) << file_->GetPath(); + CHECK(map_.get() != nullptr) << file->GetPath(); + CHECK(map_->Begin() != nullptr) << file->GetPath(); header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin()); if ((ELFMAG0 != header_->e_ident[EI_MAG0]) @@ -425,7 +431,7 @@ bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) { || (ELFMAG3 != header_->e_ident[EI_MAG3])) { *error_msg = StringPrintf("Failed to find ELF magic value %d %d %d %d in %s, found %d %d %d %d", ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, - file_->GetPath().c_str(), + file->GetPath().c_str(), header_->e_ident[EI_MAG0], header_->e_ident[EI_MAG1], header_->e_ident[EI_MAG2], @@ -436,90 +442,90 @@ bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) { if (elf_class != header_->e_ident[EI_CLASS]) { *error_msg = StringPrintf("Failed to find expected EI_CLASS value %d in %s, found %d", elf_class, - file_->GetPath().c_str(), + file->GetPath().c_str(), header_->e_ident[EI_CLASS]); return false; } if (ELFDATA2LSB != header_->e_ident[EI_DATA]) { *error_msg = StringPrintf("Failed to find expected EI_DATA value %d in %s, found %d", ELFDATA2LSB, - file_->GetPath().c_str(), + file->GetPath().c_str(), header_->e_ident[EI_CLASS]); return false; } if (EV_CURRENT != header_->e_ident[EI_VERSION]) { *error_msg = StringPrintf("Failed to find expected EI_VERSION value %d in %s, found %d", EV_CURRENT, - file_->GetPath().c_str(), + file->GetPath().c_str(), header_->e_ident[EI_CLASS]); return false; } if (ET_DYN != header_->e_type) { *error_msg = StringPrintf("Failed to find expected e_type value %d in %s, found %d", ET_DYN, - file_->GetPath().c_str(), + file->GetPath().c_str(), header_->e_type); return false; } if (EV_CURRENT != header_->e_version) { *error_msg = StringPrintf("Failed to find expected e_version value %d in %s, found %d", EV_CURRENT, - file_->GetPath().c_str(), + file->GetPath().c_str(), header_->e_version); return false; } if (0 != header_->e_entry) { *error_msg = StringPrintf("Failed to find expected e_entry value %d in %s, found %d", 0, - file_->GetPath().c_str(), + file->GetPath().c_str(), static_cast<int32_t>(header_->e_entry)); return false; } if (0 == header_->e_phoff) { *error_msg = StringPrintf("Failed to find non-zero e_phoff value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_shoff) { *error_msg = StringPrintf("Failed to find non-zero e_shoff value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_ehsize) { *error_msg = StringPrintf("Failed to find non-zero e_ehsize value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_phentsize) { *error_msg = StringPrintf("Failed to find non-zero e_phentsize value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_phnum) { *error_msg = StringPrintf("Failed to find non-zero e_phnum value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_shentsize) { *error_msg = StringPrintf("Failed to find non-zero e_shentsize value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_shnum) { *error_msg = StringPrintf("Failed to find non-zero e_shnum value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (0 == header_->e_shstrndx) { *error_msg = StringPrintf("Failed to find non-zero e_shstrndx value in %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (header_->e_shstrndx >= header_->e_shnum) { *error_msg = StringPrintf("Failed to find e_shnum value %d less than %d in %s", header_->e_shstrndx, header_->e_shnum, - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } @@ -528,14 +534,14 @@ bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) { *error_msg = StringPrintf("Failed to find e_phoff value %" PRIu64 " less than %zd in %s", static_cast<uint64_t>(header_->e_phoff), Size(), - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (header_->e_shoff >= Size()) { *error_msg = StringPrintf("Failed to find e_shoff value %" PRIu64 " less than %zd in %s", static_cast<uint64_t>(header_->e_shoff), Size(), - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } } @@ -577,7 +583,7 @@ typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::GetDynamicSectionStart() const { template <typename ElfTypes> typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbolSectionStart( Elf_Word section_type) const { - CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; + CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type; switch (section_type) { case SHT_SYMTAB: { return symtab_section_start_; @@ -597,7 +603,7 @@ typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbolSectionStart( template <typename ElfTypes> const char* ElfFileImpl<ElfTypes>::GetStringSectionStart( Elf_Word section_type) const { - CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; + CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type; switch (section_type) { case SHT_SYMTAB: { return strtab_section_start_; @@ -615,7 +621,7 @@ const char* ElfFileImpl<ElfTypes>::GetStringSectionStart( template <typename ElfTypes> const char* ElfFileImpl<ElfTypes>::GetString(Elf_Word section_type, Elf_Word i) const { - CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; + CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type; if (i == 0) { return nullptr; } @@ -673,7 +679,7 @@ typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetProgramHeaderNum() const { template <typename ElfTypes> typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const { - CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller. + CHECK_LT(i, GetProgramHeaderNum()) << file_path_; // Sanity check for caller. uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize); if (program_header >= End()) { return nullptr; // Failure condition. @@ -701,7 +707,7 @@ template <typename ElfTypes> typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionHeader(Elf_Word i) const { // Can only access arbitrary sections when we have the whole file, not just program header. // Even if we Load(), it doesn't bring in all the sections. - CHECK(!program_header_only_) << file_->GetPath(); + CHECK(!program_header_only_) << file_path_; if (i >= GetSectionHeaderNum()) { return nullptr; // Failure condition. } @@ -716,7 +722,7 @@ template <typename ElfTypes> typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByType(Elf_Word type) const { // Can only access arbitrary sections when we have the whole file, not just program header. // We could change this to switch on known types if they were detected during loading. - CHECK(!program_header_only_) << file_->GetPath(); + CHECK(!program_header_only_) << file_path_; for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) { Elf_Shdr* section_header = GetSectionHeader(i); if (section_header->sh_type == type) { @@ -802,8 +808,8 @@ bool ElfFileImpl<ElfTypes>::IsSymbolSectionType(Elf_Word section_type) { template <typename ElfTypes> typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSymbolNum(Elf_Shdr& section_header) const { CHECK(IsSymbolSectionType(section_header.sh_type)) - << file_->GetPath() << " " << section_header.sh_type; - CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath(); + << file_path_ << " " << section_header.sh_type; + CHECK_NE(0U, section_header.sh_entsize) << file_path_; return section_header.sh_size / section_header.sh_entsize; } @@ -819,7 +825,7 @@ typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbol(Elf_Word section_type, template <typename ElfTypes> typename ElfFileImpl<ElfTypes>::SymbolTable** ElfFileImpl<ElfTypes>::GetSymbolTable(Elf_Word section_type) { - CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; + CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type; switch (section_type) { case SHT_SYMTAB: { return &symtab_symbol_table_; @@ -837,8 +843,8 @@ ElfFileImpl<ElfTypes>::GetSymbolTable(Elf_Word section_type) { template <typename ElfTypes> typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindSymbolByName( Elf_Word section_type, const std::string& symbol_name, bool build_map) { - CHECK(!program_header_only_) << file_->GetPath(); - CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; + CHECK(!program_header_only_) << file_path_; + CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type; SymbolTable** symbol_table = GetSymbolTable(section_type); if (*symbol_table != nullptr || build_map) { @@ -928,7 +934,7 @@ typename ElfTypes::Addr ElfFileImpl<ElfTypes>::FindSymbolAddress( template <typename ElfTypes> const char* ElfFileImpl<ElfTypes>::GetString(Elf_Shdr& string_section, Elf_Word i) const { - CHECK(!program_header_only_) << file_->GetPath(); + CHECK(!program_header_only_) << file_path_; // TODO: remove this static_cast from enum when using -std=gnu++0x if (static_cast<Elf_Word>(SHT_STRTAB) != string_section.sh_type) { return nullptr; // Failure condition. @@ -954,7 +960,7 @@ typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetDynamicNum() const { template <typename ElfTypes> typename ElfTypes::Dyn& ElfFileImpl<ElfTypes>::GetDynamic(Elf_Word i) const { - CHECK_LT(i, GetDynamicNum()) << file_->GetPath(); + CHECK_LT(i, GetDynamicNum()) << file_path_; return *(GetDynamicSectionStart() + i); } @@ -981,40 +987,40 @@ typename ElfTypes::Word ElfFileImpl<ElfTypes>::FindDynamicValueByType(Elf_Sword template <typename ElfTypes> typename ElfTypes::Rel* ElfFileImpl<ElfTypes>::GetRelSectionStart(Elf_Shdr& section_header) const { - CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; + CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type; return reinterpret_cast<Elf_Rel*>(Begin() + section_header.sh_offset); } template <typename ElfTypes> typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelNum(Elf_Shdr& section_header) const { - CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; - CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath(); + CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type; + CHECK_NE(0U, section_header.sh_entsize) << file_path_; return section_header.sh_size / section_header.sh_entsize; } template <typename ElfTypes> typename ElfTypes::Rel& ElfFileImpl<ElfTypes>::GetRel(Elf_Shdr& section_header, Elf_Word i) const { - CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; - CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath(); + CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type; + CHECK_LT(i, GetRelNum(section_header)) << file_path_; return *(GetRelSectionStart(section_header) + i); } template <typename ElfTypes> typename ElfTypes::Rela* ElfFileImpl<ElfTypes>::GetRelaSectionStart(Elf_Shdr& section_header) const { - CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; + CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type; return reinterpret_cast<Elf_Rela*>(Begin() + section_header.sh_offset); } template <typename ElfTypes> typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelaNum(Elf_Shdr& section_header) const { - CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; + CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type; return section_header.sh_size / section_header.sh_entsize; } template <typename ElfTypes> typename ElfTypes::Rela& ElfFileImpl<ElfTypes>::GetRela(Elf_Shdr& section_header, Elf_Word i) const { - CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; - CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath(); + CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type; + CHECK_LT(i, GetRelaNum(section_header)) << file_path_; return *(GetRelaSectionStart(section_header) + i); } @@ -1037,7 +1043,7 @@ bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg) std::ostringstream oss; oss << "Program header #" << i << " has overflow in p_vaddr+p_memsz: 0x" << std::hex << program_header->p_vaddr << "+0x" << program_header->p_memsz << "=0x" << end_vaddr - << " in ELF file \"" << file_->GetPath() << "\""; + << " in ELF file \"" << file_path_ << "\""; *error_msg = oss.str(); *size = static_cast<size_t>(-1); return false; @@ -1048,13 +1054,13 @@ bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg) } min_vaddr = RoundDown(min_vaddr, kPageSize); max_vaddr = RoundUp(max_vaddr, kPageSize); - CHECK_LT(min_vaddr, max_vaddr) << file_->GetPath(); + CHECK_LT(min_vaddr, max_vaddr) << file_path_; Elf_Addr loaded_size = max_vaddr - min_vaddr; // Check that the loaded_size fits in size_t. if (UNLIKELY(loaded_size > std::numeric_limits<size_t>::max())) { std::ostringstream oss; oss << "Loaded size is 0x" << std::hex << loaded_size << " but maximum size_t is 0x" - << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_->GetPath() << "\""; + << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_path_ << "\""; *error_msg = oss.str(); *size = static_cast<size_t>(-1); return false; @@ -1064,8 +1070,11 @@ bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg) } template <typename ElfTypes> -bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* error_msg) { - CHECK(program_header_only_) << file_->GetPath(); +bool ElfFileImpl<ElfTypes>::Load(File* file, + bool executable, + bool low_4gb, + std::string* error_msg) { + CHECK(program_header_only_) << file->GetPath(); if (executable) { InstructionSet elf_ISA = GetInstructionSetFromELF(GetHeader().e_machine, GetHeader().e_flags); @@ -1082,7 +1091,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err Elf_Phdr* program_header = GetProgramHeader(i); if (program_header == nullptr) { *error_msg = StringPrintf("No program header for entry %d in ELF file %s.", - i, file_->GetPath().c_str()); + i, file->GetPath().c_str()); return false; } @@ -1106,11 +1115,11 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err // non-zero, the segments require the specific address specified, // which either was specified in the file because we already set // base_address_ after the first zero segment). - int64_t temp_file_length = file_->GetLength(); + int64_t temp_file_length = file->GetLength(); if (temp_file_length < 0) { errno = -temp_file_length; *error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s", - file_->GetPath().c_str(), file_->Fd(), strerror(errno)); + file->GetPath().c_str(), file->Fd(), strerror(errno)); return false; } size_t file_length = static_cast<size_t>(temp_file_length); @@ -1122,7 +1131,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err reserve_base_override = requested_base_; } std::string reservation_name("ElfFile reservation for "); - reservation_name += file_->GetPath(); + reservation_name += file->GetPath(); size_t loaded_size; if (!GetLoadedSize(&loaded_size, error_msg)) { DCHECK(!error_msg->empty()); @@ -1178,7 +1187,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err *error_msg = StringPrintf("Invalid p_filesz > p_memsz (%" PRIu64 " > %" PRIu64 "): %s", static_cast<uint64_t>(program_header->p_filesz), static_cast<uint64_t>(program_header->p_memsz), - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (program_header->p_filesz < program_header->p_memsz && @@ -1187,14 +1196,14 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err " < %" PRIu64 "): %s", static_cast<uint64_t>(program_header->p_filesz), static_cast<uint64_t>(program_header->p_memsz), - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (file_length < (program_header->p_offset + program_header->p_filesz)) { *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment " "%d of %" PRIu64 " bytes: '%s'", file_length, i, static_cast<uint64_t>(program_header->p_offset + program_header->p_filesz), - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } if (program_header->p_filesz != 0u) { @@ -1203,28 +1212,28 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err program_header->p_filesz, prot, flags, - file_->Fd(), + file->Fd(), program_header->p_offset, /*low4_gb*/false, /*reuse*/true, // implies MAP_FIXED - file_->GetPath().c_str(), + file->GetPath().c_str(), error_msg)); if (segment.get() == nullptr) { *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s", - i, file_->GetPath().c_str(), error_msg->c_str()); + i, file->GetPath().c_str(), error_msg->c_str()); return false; } if (segment->Begin() != p_vaddr) { *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, " "instead mapped to %p", - i, file_->GetPath().c_str(), p_vaddr, segment->Begin()); + i, file->GetPath().c_str(), p_vaddr, segment->Begin()); return false; } segments_.push_back(segment.release()); } if (program_header->p_filesz < program_header->p_memsz) { std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s", - static_cast<uint64_t>(i), file_->GetPath().c_str()); + static_cast<uint64_t>(i), file->GetPath().c_str()); std::unique_ptr<MemMap> segment( MemMap::MapAnonymous(name.c_str(), p_vaddr + program_header->p_filesz, @@ -1232,13 +1241,13 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err prot, false, true /* reuse */, error_msg)); if (segment == nullptr) { *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s", - i, file_->GetPath().c_str(), error_msg->c_str()); + i, file->GetPath().c_str(), error_msg->c_str()); return false; } if (segment->Begin() != p_vaddr) { *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s " "at expected address %p, instead mapped to %p", - i, file_->GetPath().c_str(), p_vaddr, segment->Begin()); + i, file->GetPath().c_str(), p_vaddr, segment->Begin()); return false; } segments_.push_back(segment.release()); @@ -1249,7 +1258,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err uint8_t* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr; if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) { *error_msg = StringPrintf("dynamic section address invalid in ELF file %s", - file_->GetPath().c_str()); + file->GetPath().c_str()); return false; } dynamic_section_start_ = reinterpret_cast<Elf_Dyn*>(dsptr); @@ -1261,7 +1270,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err case DT_HASH: { if (!ValidPointer(d_ptr)) { *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s", - d_ptr, file_->GetPath().c_str()); + d_ptr, file->GetPath().c_str()); return false; } hash_section_start_ = reinterpret_cast<Elf_Word*>(d_ptr); @@ -1270,7 +1279,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err case DT_STRTAB: { if (!ValidPointer(d_ptr)) { *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s", - d_ptr, file_->GetPath().c_str()); + d_ptr, file->GetPath().c_str()); return false; } dynstr_section_start_ = reinterpret_cast<char*>(d_ptr); @@ -1279,7 +1288,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err case DT_SYMTAB: { if (!ValidPointer(d_ptr)) { *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s", - d_ptr, file_->GetPath().c_str()); + d_ptr, file->GetPath().c_str()); return false; } dynsym_section_start_ = reinterpret_cast<Elf_Sym*>(d_ptr); @@ -1289,7 +1298,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err if (GetDynamicNum() != i+1) { *error_msg = StringPrintf("DT_NULL found after %d .dynamic entries, " "expected %d as implied by size of PT_DYNAMIC segment in %s", - i + 1, GetDynamicNum(), file_->GetPath().c_str()); + i + 1, GetDynamicNum(), file->GetPath().c_str()); return false; } break; @@ -1298,7 +1307,7 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* err } // Check for the existence of some sections. - if (!CheckSectionsExist(error_msg)) { + if (!CheckSectionsExist(file, error_msg)) { return false; } @@ -1392,7 +1401,7 @@ void ElfFileImpl<ElfTypes>::ApplyOatPatches( } template <typename ElfTypes> -bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) { +bool ElfFileImpl<ElfTypes>::Strip(File* file, std::string* error_msg) { // ELF files produced by MCLinker look roughly like this // // +------------+ @@ -1484,10 +1493,10 @@ bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) { GetHeader().e_shnum = section_headers.size(); GetHeader().e_shoff = shoff; - int result = ftruncate(file_->Fd(), offset); + int result = ftruncate(file->Fd(), offset); if (result != 0) { *error_msg = StringPrintf("Failed to truncate while stripping ELF file: '%s': %s", - file_->GetPath().c_str(), strerror(errno)); + file->GetPath().c_str(), strerror(errno)); return false; } return true; @@ -1498,32 +1507,32 @@ static const bool DEBUG_FIXUP = false; template <typename ElfTypes> bool ElfFileImpl<ElfTypes>::Fixup(Elf_Addr base_address) { if (!FixupDynamic(base_address)) { - LOG(WARNING) << "Failed to fixup .dynamic in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup .dynamic in " << file_path_; return false; } if (!FixupSectionHeaders(base_address)) { - LOG(WARNING) << "Failed to fixup section headers in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup section headers in " << file_path_; return false; } if (!FixupProgramHeaders(base_address)) { - LOG(WARNING) << "Failed to fixup program headers in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup program headers in " << file_path_; return false; } if (!FixupSymbols(base_address, true)) { - LOG(WARNING) << "Failed to fixup .dynsym in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup .dynsym in " << file_path_; return false; } if (!FixupSymbols(base_address, false)) { - LOG(WARNING) << "Failed to fixup .symtab in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup .symtab in " << file_path_; return false; } if (!FixupRelocations(base_address)) { - LOG(WARNING) << "Failed to fixup .rel.dyn in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup .rel.dyn in " << file_path_; return false; } static_assert(sizeof(Elf_Off) >= sizeof(base_address), "Potentially losing precision."); if (!FixupDebugSections(static_cast<Elf_Off>(base_address))) { - LOG(WARNING) << "Failed to fixup debug sections in " << file_->GetPath(); + LOG(WARNING) << "Failed to fixup debug sections in " << file_path_; return false; } return true; @@ -1538,7 +1547,7 @@ bool ElfFileImpl<ElfTypes>::FixupDynamic(Elf_Addr base_address) { Elf_Addr d_ptr = elf_dyn.d_un.d_ptr; if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Dyn[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), i, + file_path_.c_str(), i, static_cast<uint64_t>(d_ptr), static_cast<uint64_t>(d_ptr + base_address)); } @@ -1560,7 +1569,7 @@ bool ElfFileImpl<ElfTypes>::FixupSectionHeaders(Elf_Addr base_address) { } if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Shdr[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), i, + file_path_.c_str(), i, static_cast<uint64_t>(sh->sh_addr), static_cast<uint64_t>(sh->sh_addr + base_address)); } @@ -1575,19 +1584,19 @@ bool ElfFileImpl<ElfTypes>::FixupProgramHeaders(Elf_Addr base_address) { for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) { Elf_Phdr* ph = GetProgramHeader(i); CHECK(ph != nullptr); - CHECK_EQ(ph->p_vaddr, ph->p_paddr) << GetFile().GetPath() << " i=" << i; + CHECK_EQ(ph->p_vaddr, ph->p_paddr) << file_path_ << " i=" << i; CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1)))) - << GetFile().GetPath() << " i=" << i; + << file_path_ << " i=" << i; if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Phdr[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), i, + file_path_.c_str(), i, static_cast<uint64_t>(ph->p_vaddr), static_cast<uint64_t>(ph->p_vaddr + base_address)); } ph->p_vaddr += base_address; ph->p_paddr += base_address; CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1)))) - << GetFile().GetPath() << " i=" << i; + << file_path_ << " i=" << i; } return true; } @@ -1599,7 +1608,7 @@ bool ElfFileImpl<ElfTypes>::FixupSymbols(Elf_Addr base_address, bool dynamic) { Elf_Shdr* symbol_section = FindSectionByType(section_type); if (symbol_section == nullptr) { // file is missing optional .symtab - CHECK(!dynamic) << GetFile().GetPath(); + CHECK(!dynamic) << file_path_; return true; } for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) { @@ -1608,7 +1617,7 @@ bool ElfFileImpl<ElfTypes>::FixupSymbols(Elf_Addr base_address, bool dynamic) { if (symbol->st_value != 0) { if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Sym[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), i, + file_path_.c_str(), i, static_cast<uint64_t>(symbol->st_value), static_cast<uint64_t>(symbol->st_value + base_address)); } @@ -1628,7 +1637,7 @@ bool ElfFileImpl<ElfTypes>::FixupRelocations(Elf_Addr base_address) { Elf_Rel& rel = GetRel(*sh, j); if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Rel[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), j, + file_path_.c_str(), j, static_cast<uint64_t>(rel.r_offset), static_cast<uint64_t>(rel.r_offset + base_address)); } @@ -1639,7 +1648,7 @@ bool ElfFileImpl<ElfTypes>::FixupRelocations(Elf_Addr base_address) { Elf_Rela& rela = GetRela(*sh, j); if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Rela[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), j, + file_path_.c_str(), j, static_cast<uint64_t>(rela.r_offset), static_cast<uint64_t>(rela.r_offset + base_address)); } @@ -1775,8 +1784,8 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* e return elf32_->func(__VA_ARGS__); \ } -bool ElfFile::Load(bool executable, bool low_4gb, std::string* error_msg) { - DELEGATE_TO_IMPL(Load, executable, low_4gb, error_msg); +bool ElfFile::Load(File* file, bool executable, bool low_4gb, std::string* error_msg) { + DELEGATE_TO_IMPL(Load, file, executable, low_4gb, error_msg); } const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const { @@ -1795,8 +1804,8 @@ uint8_t* ElfFile::End() const { DELEGATE_TO_IMPL(End); } -const File& ElfFile::GetFile() const { - DELEGATE_TO_IMPL(GetFile); +const std::string& ElfFile::GetFilePath() const { + DELEGATE_TO_IMPL(GetFilePath); } bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, @@ -1854,10 +1863,11 @@ bool ElfFile::Strip(File* file, std::string* error_msg) { return false; } - if (elf_file->elf64_.get() != nullptr) - return elf_file->elf64_->Strip(error_msg); - else - return elf_file->elf32_->Strip(error_msg); + if (elf_file->elf64_.get() != nullptr) { + return elf_file->elf64_->Strip(file, error_msg); + } else { + return elf_file->elf32_->Strip(file, error_msg); + } } bool ElfFile::Fixup(uint64_t base_address) { diff --git a/runtime/elf_file.h b/runtime/elf_file.h index c3616f7290..b1c9395fb5 100644 --- a/runtime/elf_file.h +++ b/runtime/elf_file.h @@ -53,7 +53,7 @@ class ElfFile { ~ElfFile(); // Load segments into memory based on PT_LOAD program headers - bool Load(bool executable, bool low_4gb, std::string* error_msg); + bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg); const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const; @@ -65,7 +65,7 @@ class ElfFile { // The end of the memory map address range for this ELF file. uint8_t* End() const; - const File& GetFile() const; + const std::string& GetFilePath() const; bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) const; diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h index 1cdbedc057..04c224387b 100644 --- a/runtime/elf_file_impl.h +++ b/runtime/elf_file_impl.h @@ -61,8 +61,8 @@ class ElfFileImpl { std::string* error_msg); ~ElfFileImpl(); - const File& GetFile() const { - return *file_; + const std::string& GetFilePath() const { + return file_path_; } uint8_t* Begin() const { @@ -119,7 +119,7 @@ class ElfFileImpl { // Load segments into memory based on PT_LOAD program headers. // executable is true at run time, false at compile time. - bool Load(bool executable, bool low_4gb, std::string* error_msg); + bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg); bool Fixup(Elf_Addr base_address); bool FixupDynamic(Elf_Addr base_address); @@ -132,14 +132,14 @@ class ElfFileImpl { static void ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end, Elf_Addr delta, uint8_t* to_patch, const uint8_t* to_patch_end); - bool Strip(std::string* error_msg); + bool Strip(File* file, std::string* error_msg); private: ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base); - bool Setup(int prot, int flags, bool low_4gb, std::string* error_msg); + bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg); - bool SetMap(MemMap* map, std::string* error_msg); + bool SetMap(File* file, MemMap* map, std::string* error_msg); uint8_t* GetProgramHeadersStart() const; uint8_t* GetSectionHeadersStart() const; @@ -163,7 +163,7 @@ class ElfFileImpl { const Elf_Sym* FindDynamicSymbol(const std::string& symbol_name) const; // Check that certain sections and their dependencies exist. - bool CheckSectionsExist(std::string* error_msg) const; + bool CheckSectionsExist(File* file, std::string* error_msg) const; // Check that the link of the first section links to the second section. bool CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const; @@ -191,7 +191,7 @@ class ElfFileImpl { // Lookup a string by section type. Returns null for special 0 offset. const char* GetString(Elf_Word section_type, Elf_Word) const; - const File* const file_; + const std::string file_path_; const bool writable_; const bool program_header_only_; diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 03b7713aa1..e9c71b4523 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -316,9 +316,9 @@ Heap::Heap(size_t initial_size, continue; } - space::ImageSpace::CreateMultiImageLocations(image_file_name, - boot_classpath, - &image_file_names); + space::ImageSpace::ExtractMultiImageLocations(image_file_name, + boot_classpath, + &image_file_names); } } else { LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. " diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index b7b9ffec7e..9e828107e5 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -1508,9 +1508,54 @@ void ImageSpace::Dump(std::ostream& os) const { << ",name=\"" << GetName() << "\"]"; } -void ImageSpace::CreateMultiImageLocations(const std::string& input_image_file_name, - const std::string& boot_classpath, - std::vector<std::string>* image_file_names) { +std::string ImageSpace::GetMultiImageBootClassPath( + const std::vector<const char*>& dex_locations, + const std::vector<const char*>& oat_filenames, + const std::vector<const char*>& image_filenames) { + DCHECK_GT(oat_filenames.size(), 1u); + // If the image filename was adapted (e.g., for our tests), we need to change this here, + // too, but need to strip all path components (they will be re-established when loading). + std::ostringstream bootcp_oss; + bool first_bootcp = true; + for (size_t i = 0; i < dex_locations.size(); ++i) { + if (!first_bootcp) { + bootcp_oss << ":"; + } + + std::string dex_loc = dex_locations[i]; + std::string image_filename = image_filenames[i]; + + // Use the dex_loc path, but the image_filename name (without path elements). + size_t dex_last_slash = dex_loc.rfind('/'); + + // npos is max(size_t). That makes this a bit ugly. + size_t image_last_slash = image_filename.rfind('/'); + size_t image_last_at = image_filename.rfind('@'); + size_t image_last_sep = (image_last_slash == std::string::npos) + ? image_last_at + : (image_last_at == std::string::npos) + ? std::string::npos + : std::max(image_last_slash, image_last_at); + // Note: whenever image_last_sep == npos, +1 overflow means using the full string. + + if (dex_last_slash == std::string::npos) { + dex_loc = image_filename.substr(image_last_sep + 1); + } else { + dex_loc = dex_loc.substr(0, dex_last_slash + 1) + + image_filename.substr(image_last_sep + 1); + } + + // Image filenames already end with .art, no need to replace. + + bootcp_oss << dex_loc; + first_bootcp = false; + } + return bootcp_oss.str(); +} + +void ImageSpace::ExtractMultiImageLocations(const std::string& input_image_file_name, + const std::string& boot_classpath, + std::vector<std::string>* image_file_names) { DCHECK(image_file_names != nullptr); std::vector<std::string> images; diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index c9741d0648..d8962f47a9 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -130,10 +130,14 @@ class ImageSpace : public MemMapSpace { // Use the input image filename to adapt the names in the given boot classpath to establish // complete locations for secondary images. - static void CreateMultiImageLocations(const std::string& input_image_file_name, + static void ExtractMultiImageLocations(const std::string& input_image_file_name, const std::string& boot_classpath, std::vector<std::string>* image_filenames); + static std::string GetMultiImageBootClassPath(const std::vector<const char*>& dex_locations, + const std::vector<const char*>& oat_filenames, + const std::vector<const char*>& image_filenames); + // Return the end of the image which includes non-heap objects such as ArtMethods and ArtFields. uint8_t* GetImageEnd() const { return Begin() + GetImageHeader().GetImageSize(); diff --git a/runtime/gc_root.h b/runtime/gc_root.h index 3734bcc7e1..0304d0d93c 100644 --- a/runtime/gc_root.h +++ b/runtime/gc_root.h @@ -195,7 +195,8 @@ class GcRoot { return root_.IsNull(); } - ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_); + ALWAYS_INLINE GcRoot() {} + explicit ALWAYS_INLINE GcRoot(MirrorType* ref) SHARED_REQUIRES(Locks::mutator_lock_); private: // Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h index f70503d62a..5cc1de209d 100644 --- a/runtime/indirect_reference_table-inl.h +++ b/runtime/indirect_reference_table-inl.h @@ -36,21 +36,27 @@ inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const { return false; } if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) { - LOG(ERROR) << "JNI ERROR (app bug): invalid " << kind_ << " " << iref; - AbortIfNoCheckJNI(); + AbortIfNoCheckJNI(StringPrintf("JNI ERROR (app bug): invalid %s %p", + GetIndirectRefKindString(kind_), + iref)); return false; } const int topIndex = segment_state_.parts.topIndex; int idx = ExtractIndex(iref); if (UNLIKELY(idx >= topIndex)) { - LOG(ERROR) << "JNI ERROR (app bug): accessed stale " << kind_ << " " - << iref << " (index " << idx << " in a table of size " << topIndex << ")"; - AbortIfNoCheckJNI(); + std::string msg = StringPrintf( + "JNI ERROR (app bug): accessed stale %s %p (index %d in a table of size %d)", + GetIndirectRefKindString(kind_), + iref, + idx, + topIndex); + AbortIfNoCheckJNI(msg); return false; } if (UNLIKELY(table_[idx].GetReference()->IsNull())) { - LOG(ERROR) << "JNI ERROR (app bug): accessed deleted " << kind_ << " " << iref; - AbortIfNoCheckJNI(); + AbortIfNoCheckJNI(StringPrintf("JNI ERROR (app bug): accessed deleted %s %p", + GetIndirectRefKindString(kind_), + iref)); return false; } if (UNLIKELY(!CheckEntry("use", iref, idx))) { @@ -63,10 +69,13 @@ inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const { inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const { IndirectRef checkRef = ToIndirectRef(idx); if (UNLIKELY(checkRef != iref)) { - LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what - << " stale " << kind_ << " " << iref - << " (should be " << checkRef << ")"; - AbortIfNoCheckJNI(); + std::string msg = StringPrintf( + "JNI ERROR (app bug): attempt to %s stale %s %p (should be %p)", + what, + GetIndirectRefKindString(kind_), + iref, + checkRef); + AbortIfNoCheckJNI(msg); return false; } return true; diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 8e49492cf4..06d376b865 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -32,6 +32,20 @@ namespace art { static constexpr bool kDumpStackOnNonLocalReference = false; +const char* GetIndirectRefKindString(const IndirectRefKind& kind) { + switch (kind) { + case kHandleScopeOrInvalid: + return "HandleScopeOrInvalid"; + case kLocal: + return "Local"; + case kGlobal: + return "Global"; + case kWeakGlobal: + return "WeakGlobal"; + } + return "IndirectRefKind Error"; +} + template<typename T> class MutatorLockedDumpable { public: @@ -58,12 +72,14 @@ std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) return os; } -void IndirectReferenceTable::AbortIfNoCheckJNI() { +void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) { // If -Xcheck:jni is on, it'll give a more detailed error before aborting. JavaVMExt* vm = Runtime::Current()->GetJavaVM(); if (!vm->IsCheckJniEnabled()) { // Otherwise, we want to abort rather than hand back a bad reference. - LOG(FATAL) << "JNI ERROR (app bug): see above."; + LOG(FATAL) << msg; + } else { + LOG(ERROR) << msg; } } diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h index 2d0ae63b22..4a671aa2d0 100644 --- a/runtime/indirect_reference_table.h +++ b/runtime/indirect_reference_table.h @@ -119,6 +119,7 @@ enum IndirectRefKind { kWeakGlobal = 3 // <<weak global reference>> }; std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); +const char* GetIndirectRefKindString(const IndirectRefKind& kind); /* * Determine what kind of indirect reference this is. @@ -372,8 +373,8 @@ class IndirectReferenceTable { return reinterpret_cast<IndirectRef>(uref); } - // Abort if check_jni is not enabled. - static void AbortIfNoCheckJNI(); + // Abort if check_jni is not enabled. Otherwise, just log as an error. + static void AbortIfNoCheckJNI(const std::string& msg); /* extra debugging checks */ bool GetChecked(IndirectRef) const; diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index ae5a0f6777..74d99176c4 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -687,9 +687,6 @@ void Jit::InvokeVirtualOrInterface(Thread* thread, DCHECK(this_object != nullptr); ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*)); if (info != nullptr) { - // Since the instrumentation is marked from the declaring class we need to mark the card so - // that mod-union tables and card rescanning know about the update. - Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass()); info->AddInvokeInfo(dex_pc, this_object->GetClass()); } } diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 6dc15787bd..1938221849 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -146,7 +146,6 @@ class JitCodeCache { // Remove all methods in our cache that were allocated by 'alloc'. void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) REQUIRES(!lock_) - REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_); diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc index 07c8051214..216df2fc09 100644 --- a/runtime/jit/profiling_info.cc +++ b/runtime/jit/profiling_info.cc @@ -25,10 +25,33 @@ namespace art { +ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries) + : number_of_inline_caches_(entries.size()), + method_(method), + is_method_being_compiled_(false), + is_osr_method_being_compiled_(false), + current_inline_uses_(0), + saved_entry_point_(nullptr) { + memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache)); + for (size_t i = 0; i < number_of_inline_caches_; ++i) { + cache_[i].dex_pc_ = entries[i]; + } + if (method->IsCopied()) { + // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method + // appears to happen very rarely in practice. + holding_class_ = GcRoot<mirror::Class>( + Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method)); + } else { + holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass()); + } + DCHECK(!holding_class_.IsNull()); +} + bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) { // Walk over the dex instructions of the method and keep track of // instructions we are interested in profiling. DCHECK(!method->IsNative()); + const DexFile::CodeItem& code_item = *method->GetCodeItem(); const uint16_t* code_ptr = code_item.insns_; const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_; @@ -93,6 +116,14 @@ void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) { --i; } else { // We successfully set `cls`, just return. + // Since the instrumentation is marked from the declaring class we need to mark the card so + // that mod-union tables and card rescanning know about the update. + // Note that the declaring class is not necessarily the holding class if the method is + // copied. We need the card mark to be in the holding class since that is from where we + // will visit the profiling info. + if (!holding_class_.IsNull()) { + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read()); + } return; } } diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h index d04d2de756..a890fbb96d 100644 --- a/runtime/jit/profiling_info.h +++ b/runtime/jit/profiling_info.h @@ -105,6 +105,7 @@ class ProfilingInfo { // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires. template<typename RootVisitorType> void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS { + visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier()); for (size_t i = 0; i < number_of_inline_caches_; ++i) { InlineCache* cache = &cache_[i]; for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) { @@ -166,18 +167,7 @@ class ProfilingInfo { } private: - ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries) - : number_of_inline_caches_(entries.size()), - method_(method), - is_method_being_compiled_(false), - is_osr_method_being_compiled_(false), - current_inline_uses_(0), - saved_entry_point_(nullptr) { - memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache)); - for (size_t i = 0; i < number_of_inline_caches_; ++i) { - cache_[i].dex_pc_ = entries[i]; - } - } + ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries); // Number of instructions we are profiling in the ArtMethod. const uint32_t number_of_inline_caches_; @@ -185,6 +175,9 @@ class ProfilingInfo { // Method this profiling info is for. ArtMethod* const method_; + // Holding class for the method in case method is a copied method. + GcRoot<mirror::Class> holding_class_; + // Whether the ArtMethod is currently being compiled. This flag // is implicitly guarded by the JIT code cache lock. // TODO: Make the JIT code cache lock global. diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 5d89c21803..20c61127c0 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -157,6 +157,8 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* } return false; } + + ScopedBacktraceMapIteratorLock lock(map.get()); for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { if ((begin >= it->start && begin < it->end) // start of new within old && (end > it->start && end <= it->end)) { // end of new within old @@ -180,6 +182,7 @@ static bool CheckNonOverlapping(uintptr_t begin, *error_msg = StringPrintf("Failed to build process map"); return false; } + ScopedBacktraceMapIteratorLock(map.get()); for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { if ((begin >= it->start && begin < it->end) // start of new within old || (end > it->start && end < it->end) // end of new within old diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 0d95bb175b..934a73b64f 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -368,8 +368,21 @@ inline bool Class::ResolvedFieldAccessTest(Class* access_to, ArtField* field, // class rather than the declaring class itself. DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache; uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetFieldId(field_idx).class_idx_; - // The referenced class has already been resolved with the field, get it from the dex cache. - Class* dex_access_to = referrer_dex_cache->GetResolvedType(class_idx); + // The referenced class has already been resolved with the field, but may not be in the dex + // cache. Using ResolveType here without handles in the caller should be safe since there + // should be no thread suspension due to the class being resolved. + // TODO: Clean this up to use handles in the caller. + Class* dex_access_to; + { + StackHandleScope<2> hs(Thread::Current()); + Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer_dex_cache)); + Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(access_to->GetClassLoader())); + dex_access_to = Runtime::Current()->GetClassLinker()->ResolveType( + *referrer_dex_cache->GetDexFile(), + class_idx, + h_dex_cache, + h_class_loader); + } DCHECK(dex_access_to != nullptr); if (UNLIKELY(!this->CanAccess(dex_access_to))) { if (throw_on_failure) { @@ -398,8 +411,21 @@ inline bool Class::ResolvedMethodAccessTest(Class* access_to, ArtMethod* method, // class rather than the declaring class itself. DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache; uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_; - // The referenced class has already been resolved with the method, get it from the dex cache. - Class* dex_access_to = referrer_dex_cache->GetResolvedType(class_idx); + // The referenced class has already been resolved with the method, but may not be in the dex + // cache. Using ResolveType here without handles in the caller should be safe since there + // should be no thread suspension due to the class being resolved. + // TODO: Clean this up to use handles in the caller. + Class* dex_access_to; + { + StackHandleScope<2> hs(Thread::Current()); + Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer_dex_cache)); + Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(access_to->GetClassLoader())); + dex_access_to = Runtime::Current()->GetClassLinker()->ResolveType( + *referrer_dex_cache->GetDexFile(), + class_idx, + h_dex_cache, + h_class_loader); + } DCHECK(dex_access_to != nullptr); if (UNLIKELY(!this->CanAccess(dex_access_to))) { if (throw_on_failure) { diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index c1504de0b3..26dae7c081 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -1476,6 +1476,7 @@ class MANAGED Class FINAL : public Object { // java.lang.Class static GcRoot<Class> java_lang_Class_; + ART_FRIEND_TEST(DexCacheTest, TestResolvedFieldAccess); // For ResolvedFieldAccessTest friend struct art::ClassOffsets; // for verifying offset information friend class Object; // For VisitReferences DISALLOW_IMPLICIT_CONSTRUCTORS(Class); diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc index 48f2ca59e8..0c8a782a2d 100644 --- a/runtime/mirror/dex_cache_test.cc +++ b/runtime/mirror/dex_cache_test.cc @@ -62,5 +62,33 @@ TEST_F(DexCacheTest, LinearAlloc) { EXPECT_TRUE(linear_alloc->Contains(klass->GetDexCache()->GetResolvedMethods())); } +TEST_F(DexCacheTest, TestResolvedFieldAccess) { + ScopedObjectAccess soa(Thread::Current()); + jobject jclass_loader(LoadDex("Packages")); + ASSERT_TRUE(jclass_loader != nullptr); + Runtime* const runtime = Runtime::Current(); + ClassLinker* const class_linker = runtime->GetClassLinker(); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass1 = + hs.NewHandle(class_linker->FindClass(soa.Self(), "Lpackage1/Package1;", class_loader)); + ASSERT_TRUE(klass1.Get() != nullptr); + Handle<mirror::Class> klass2 = + hs.NewHandle(class_linker->FindClass(soa.Self(), "Lpackage2/Package2;", class_loader)); + ASSERT_TRUE(klass2.Get() != nullptr); + EXPECT_EQ(klass1->GetDexCache(), klass2->GetDexCache()); + + EXPECT_NE(klass1->NumStaticFields(), 0u); + for (ArtField& field : klass2->GetSFields()) { + EXPECT_FALSE(( + klass1->ResolvedFieldAccessTest</*throw_on_failure*/ false, + /*use_referrers_cache*/ false>(klass2.Get(), + &field, + field.GetDexFieldIndex(), + klass1->GetDexCache()))); + } +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 76a36ac893..e1097fa7ca 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -106,7 +106,11 @@ inline uint32_t Object::GetLockOwnerThreadId() { } inline mirror::Object* Object::MonitorEnter(Thread* self) { - return Monitor::MonitorEnter(self, this); + return Monitor::MonitorEnter(self, this, /*trylock*/false); +} + +inline mirror::Object* Object::MonitorTryEnter(Thread* self) { + return Monitor::MonitorEnter(self, this, /*trylock*/true); } inline bool Object::MonitorExit(Thread* self) { diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 0ee46c3556..e174cbcadc 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -140,6 +140,11 @@ class MANAGED LOCKABLE Object { SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); + // Try to enter the monitor, returns non null if we succeeded. + mirror::Object* MonitorTryEnter(Thread* self) + EXCLUSIVE_LOCK_FUNCTION() + REQUIRES(!Roles::uninterruptible_) + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* MonitorEnter(Thread* self) EXCLUSIVE_LOCK_FUNCTION() REQUIRES(!Roles::uninterruptible_) diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 0f567053e5..3771877c9d 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -314,21 +314,34 @@ std::string Monitor::PrettyContentionInfo(const std::string& owner_name, return oss.str(); } +bool Monitor::TryLockLocked(Thread* self) { + if (owner_ == nullptr) { // Unowned. + owner_ = self; + CHECK_EQ(lock_count_, 0); + // When debugging, save the current monitor holder for future + // acquisition failures to use in sampled logging. + if (lock_profiling_threshold_ != 0) { + locking_method_ = self->GetCurrentMethod(&locking_dex_pc_); + } + } else if (owner_ == self) { // Recursive. + lock_count_++; + } else { + return false; + } + AtraceMonitorLock(self, GetObject(), false /* is_wait */); + return true; +} + +bool Monitor::TryLock(Thread* self) { + MutexLock mu(self, monitor_lock_); + return TryLockLocked(self); +} + void Monitor::Lock(Thread* self) { MutexLock mu(self, monitor_lock_); while (true) { - if (owner_ == nullptr) { // Unowned. - owner_ = self; - CHECK_EQ(lock_count_, 0); - // When debugging, save the current monitor holder for future - // acquisition failures to use in sampled logging. - if (lock_profiling_threshold_ != 0) { - locking_method_ = self->GetCurrentMethod(&locking_dex_pc_); - } - break; - } else if (owner_ == self) { // Recursive. - lock_count_++; - break; + if (TryLockLocked(self)) { + return; } // Contended. const bool log_contention = (lock_profiling_threshold_ != 0); @@ -430,8 +443,6 @@ void Monitor::Lock(Thread* self) { monitor_lock_.Lock(self); // Reacquire locks in order. --num_waiters_; } - - AtraceMonitorLock(self, GetObject(), false /* is_wait */); } static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) @@ -852,7 +863,7 @@ static mirror::Object* FakeUnlock(mirror::Object* obj) return obj; } -mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { +mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool trylock) { DCHECK(self != nullptr); DCHECK(obj != nullptr); self->AssertThreadSuspensionIsAllowable(); @@ -898,6 +909,9 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { InflateThinLocked(self, h_obj, lock_word, 0); } } else { + if (trylock) { + return nullptr; + } // Contention. contention_count++; Runtime* runtime = Runtime::Current(); @@ -916,8 +930,12 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { } case LockWord::kFatLocked: { Monitor* mon = lock_word.FatLockMonitor(); - mon->Lock(self); - return h_obj.Get(); // Success! + if (trylock) { + return mon->TryLock(self) ? h_obj.Get() : nullptr; + } else { + mon->Lock(self); + return h_obj.Get(); // Success! + } } case LockWord::kHashCode: // Inflate with the existing hashcode. diff --git a/runtime/monitor.h b/runtime/monitor.h index 7b4b8f9467..1d829e1d68 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -62,7 +62,7 @@ class Monitor { NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy. // NO_THREAD_SAFETY_ANALYSIS for mon->Lock. - static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj) + static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj, bool trylock) EXCLUSIVE_LOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS REQUIRES(!Roles::uninterruptible_) @@ -193,6 +193,15 @@ class Monitor { !monitor_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + // Try to lock without blocking, returns true if we acquired the lock. + bool TryLock(Thread* self) + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + // Variant for already holding the monitor lock. + bool TryLockLocked(Thread* self) + REQUIRES(monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + void Lock(Thread* self) REQUIRES(!monitor_lock_) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc index 83e0c0dea9..48d256c985 100644 --- a/runtime/monitor_test.cc +++ b/runtime/monitor_test.cc @@ -26,6 +26,7 @@ #include "handle_scope-inl.h" #include "mirror/class-inl.h" #include "mirror/string-inl.h" // Strings are easiest to allocate +#include "object_lock.h" #include "scoped_thread_state_change.h" #include "thread_pool.h" @@ -374,4 +375,60 @@ TEST_F(MonitorTest, CheckExceptionsWait3) { "Monitor test thread pool 3"); } +class TryLockTask : public Task { + public: + explicit TryLockTask(Handle<mirror::Object> obj) : obj_(obj) {} + + void Run(Thread* self) { + ScopedObjectAccess soa(self); + // Lock is held by other thread, try lock should fail. + ObjectTryLock<mirror::Object> lock(self, obj_); + EXPECT_FALSE(lock.Acquired()); + } + + void Finalize() { + delete this; + } + + private: + Handle<mirror::Object> obj_; +}; + +// Test trylock in deadlock scenarios. +TEST_F(MonitorTest, TestTryLock) { + ScopedLogSeverity sls(LogSeverity::FATAL); + + Thread* const self = Thread::Current(); + ThreadPool thread_pool("the pool", 2); + ScopedObjectAccess soa(self); + StackHandleScope<3> hs(self); + Handle<mirror::Object> obj1( + hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!"))); + Handle<mirror::Object> obj2( + hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!"))); + { + ObjectLock<mirror::Object> lock1(self, obj1); + ObjectLock<mirror::Object> lock2(self, obj1); + { + ObjectTryLock<mirror::Object> trylock(self, obj1); + EXPECT_TRUE(trylock.Acquired()); + } + // Test failure case. + thread_pool.AddTask(self, new TryLockTask(obj1)); + thread_pool.StartWorkers(self); + ScopedThreadSuspension sts(self, kSuspended); + thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false); + } + // Test that the trylock actually locks the object. + { + ObjectTryLock<mirror::Object> trylock(self, obj1); + EXPECT_TRUE(trylock.Acquired()); + obj1->Notify(self); + // Since we hold the lock there should be no monitor state exeception. + self->AssertNoPendingException(); + } + thread_pool.StopWorkers(self); +} + + } // namespace art diff --git a/runtime/oat.h b/runtime/oat.h index 57675dc738..31e4e072c0 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '7', '9', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '8', '8', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 72c0cea1a0..e78a097174 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -898,7 +898,7 @@ bool ElfOatFile::ElfFileOpen(File* file, DCHECK(!error_msg->empty()); return false; } - bool loaded = elf_file_->Load(executable, low_4gb, error_msg); + bool loaded = elf_file_->Load(file, executable, low_4gb, error_msg); DCHECK(loaded || !error_msg->empty()); return loaded; } diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index 156d394171..e99377d834 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -87,8 +87,10 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { bool with_patch_info = true) { // Temporarily redirect the dalvik cache so dex2oat doesn't find the // relocated image file. - std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp"; - setenv("ANDROID_DATA", android_data_tmp.c_str(), 1); + std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA)); + std::string dalvik_cache_tmp = dalvik_cache + ".redirected"; + ASSERT_EQ(0, rename(dalvik_cache.c_str(), dalvik_cache_tmp.c_str())) << strerror(errno); + std::vector<std::string> args; args.push_back("--dex-file=" + dex_location); args.push_back("--oat-file=" + odex_location); @@ -106,7 +108,7 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { std::string error_msg; ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg; - setenv("ANDROID_DATA", android_data_.c_str(), 1); + ASSERT_EQ(0, rename(dalvik_cache_tmp.c_str(), dalvik_cache.c_str())) << strerror(errno); // Verify the odex file was generated as expected and really is // unrelocated. diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc index f7accc0f31..b8754a4093 100644 --- a/runtime/object_lock.cc +++ b/runtime/object_lock.cc @@ -47,7 +47,22 @@ void ObjectLock<T>::NotifyAll() { obj_->NotifyAll(self_); } +template <typename T> +ObjectTryLock<T>::ObjectTryLock(Thread* self, Handle<T> object) : self_(self), obj_(object) { + CHECK(object.Get() != nullptr); + acquired_ = obj_->MonitorTryEnter(self_) != nullptr; +} + +template <typename T> +ObjectTryLock<T>::~ObjectTryLock() { + if (acquired_) { + obj_->MonitorExit(self_); + } +} + template class ObjectLock<mirror::Class>; template class ObjectLock<mirror::Object>; +template class ObjectTryLock<mirror::Class>; +template class ObjectTryLock<mirror::Object>; } // namespace art diff --git a/runtime/object_lock.h b/runtime/object_lock.h index eb7cbd85d3..7f02b37258 100644 --- a/runtime/object_lock.h +++ b/runtime/object_lock.h @@ -45,6 +45,27 @@ class ObjectLock { DISALLOW_COPY_AND_ASSIGN(ObjectLock); }; +template <typename T> +class ObjectTryLock { + public: + ObjectTryLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_); + + ~ObjectTryLock() SHARED_REQUIRES(Locks::mutator_lock_); + + bool Acquired() const { + return acquired_; + } + + private: + Thread* const self_; + Handle<T> const obj_; + bool acquired_; + + + DISALLOW_COPY_AND_ASSIGN(ObjectTryLock); +}; + + } // namespace art #endif // ART_RUNTIME_OBJECT_LOCK_H_ diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc index 49b6a38b01..d4bd6dd247 100644 --- a/runtime/reference_table.cc +++ b/runtime/reference_table.cc @@ -192,6 +192,13 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) { } else { StringAppendF(&extras, " \"%.16s... (%d chars)", utf8.c_str(), s->GetLength()); } + } else if (ref->IsReferenceInstance()) { + mirror::Object* referent = ref->AsReference()->GetReferent(); + if (referent == nullptr) { + extras = " (storing null)"; + } else { + extras = StringPrintf(" (storing a %s)", PrettyTypeOf(referent).c_str()); + } } os << StringPrintf(" %5d: ", idx) << ref << " " << className << extras << "\n"; } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 66c8f87fd1..f4c28b9a53 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -26,6 +26,9 @@ #include <signal.h> #include <sys/syscall.h> #include "base/memory_tool.h" +#if defined(__APPLE__) +#include <crt_externs.h> // for _NSGetEnviron +#endif #include <cstdio> #include <cstdlib> @@ -156,6 +159,22 @@ struct TraceConfig { size_t trace_file_size; }; +namespace { +#ifdef __APPLE__ +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +#else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +#endif +} // namespace + Runtime::Runtime() : resolution_method_(nullptr), imt_conflict_method_(nullptr), @@ -387,7 +406,7 @@ struct AbortState { } }; -void Runtime::Abort() { +void Runtime::Abort(const char* msg) { gAborting++; // set before taking any locks // Ensure that we don't have multiple threads trying to abort at once, @@ -402,6 +421,12 @@ void Runtime::Abort() { AbortState state; LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state); + // Sometimes we dump long messages, and the Android abort message only retains the first line. + // In those cases, just log the message again, to avoid logcat limits. + if (msg != nullptr && strchr(msg, '\n') != nullptr) { + LOG(INTERNAL_FATAL) << msg; + } + // Call the abort hook if we have one. if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) { LOG(INTERNAL_FATAL) << "Calling abort hook..."; @@ -826,7 +851,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location, return false; } std::string error_msg; - std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), + std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(), false, false, /*low_4gb*/false, @@ -859,9 +884,9 @@ static bool OpenDexFilesFromImage(const std::string& image_location, const OatHeader& boot_oat_header = oat_file->GetOatHeader(); const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey); if (boot_cp != nullptr) { - gc::space::ImageSpace::CreateMultiImageLocations(image_locations[0], - boot_cp, - &image_locations); + gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0], + boot_cp, + &image_locations); } } @@ -905,6 +930,10 @@ void Runtime::SetSentinel(mirror::Object* sentinel) { } bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { + // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc. + // Take a snapshot of the environment at the time the runtime was created, for use by Exec, etc. + env_snapshot_.TakeSnapshot(); + RuntimeArgumentMap runtime_options(std::move(runtime_options_in)); ScopedTrace trace(__FUNCTION__); CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize); @@ -2015,4 +2044,22 @@ bool Runtime::SaveProfileInfo() const { return (jit_ != nullptr) && jit_->SaveProfilingInfo(); } +void Runtime::EnvSnapshot::TakeSnapshot() { + char** env = GetEnviron(); + for (size_t i = 0; env[i] != nullptr; ++i) { + name_value_pairs_.emplace_back(new std::string(env[i])); + } + // The strings in name_value_pairs_ retain ownership of the c_str, but we assign pointers + // for quick use by GetSnapshot. This avoids allocation and copying cost at Exec. + c_env_vector_.reset(new char*[name_value_pairs_.size() + 1]); + for (size_t i = 0; env[i] != nullptr; ++i) { + c_env_vector_[i] = const_cast<char*>(name_value_pairs_[i]->c_str()); + } + c_env_vector_[name_value_pairs_.size()] = nullptr; +} + +char** Runtime::EnvSnapshot::GetSnapshot() const { + return c_env_vector_.get(); +} + } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index 1394462fd1..3b72aa7b38 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -228,7 +228,7 @@ class Runtime { // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most // callers should prefer. - NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_); + NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_); // Returns the "main" ThreadGroup, used when attaching user threads. jobject GetMainThreadGroup() const; @@ -648,6 +648,16 @@ class Runtime { return zygote_no_threads_; } + // Returns if the code can be deoptimized. Code may be compiled with some + // optimization that makes it impossible to deoptimize. + bool IsDeoptimizeable(uintptr_t code) const SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns a saved copy of the environment (getenv/setenv values). + // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc. + char** GetEnvSnapshot() const { + return env_snapshot_.GetSnapshot(); + } + private: static void InitPlatformSignalHandlers(); @@ -872,6 +882,20 @@ class Runtime { // Whether zygote code is in a section that should not start threads. bool zygote_no_threads_; + // Saved environment. + class EnvSnapshot { + public: + EnvSnapshot() = default; + void TakeSnapshot(); + char** GetSnapshot() const; + + private: + std::unique_ptr<char*[]> c_env_vector_; + std::vector<std::unique_ptr<std::string>> name_value_pairs_; + + DISALLOW_COPY_AND_ASSIGN(EnvSnapshot); + } env_snapshot_; + DISALLOW_COPY_AND_ASSIGN(Runtime); }; std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs); diff --git a/runtime/utils.cc b/runtime/utils.cc index 6a50b8eee2..0eb3a4ddf6 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -1416,8 +1416,15 @@ int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_m // change process groups, so we don't get reaped by ProcessManager setpgid(0, 0); - execv(program, &args[0]); - PLOG(ERROR) << "Failed to execv(" << command_line << ")"; + // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc. + // Use the snapshot of the environment from the time the runtime was created. + char** envp = (Runtime::Current() == nullptr) ? nullptr : Runtime::Current()->GetEnvSnapshot(); + if (envp == nullptr) { + execv(program, &args[0]); + } else { + execve(program, &args[0], envp); + } + PLOG(ERROR) << "Failed to execve(" << command_line << ")"; // _exit to avoid atexit handlers in child. _exit(1); } else { diff --git a/runtime/utils.h b/runtime/utils.h index c1e88a4feb..fe915f2c6b 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -291,6 +291,9 @@ std::string GetDalvikCacheFilenameOrDie(const char* file_location, std::string GetSystemImageFilename(const char* location, InstructionSet isa); // Wrapper on fork/execv to run a command in a subprocess. +// Both of these spawn child processes using the environment as it was set when the single instance +// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it +// will use the current environment settings. bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg); int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg); diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc index f00edffab8..f7d9fba7e7 100644 --- a/runtime/utils_test.cc +++ b/runtime/utils_test.cc @@ -16,6 +16,8 @@ #include "utils.h" +#include <stdlib.h> + #include "class_linker-inl.h" #include "common_runtime_test.h" #include "mirror/array.h" @@ -375,8 +377,57 @@ TEST_F(UtilsTest, ExecError) { if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. EXPECT_FALSE(Exec(command, &error_msg)); - EXPECT_NE(0U, error_msg.size()); + EXPECT_FALSE(error_msg.empty()); + } +} + +TEST_F(UtilsTest, EnvSnapshotAdditionsAreNotVisible) { + static constexpr const char* kModifiedVariable = "EXEC_SHOULD_NOT_EXPORT_THIS"; + static constexpr int kOverwrite = 1; + // Set an variable in the current environment. + EXPECT_EQ(setenv(kModifiedVariable, "NEVER", kOverwrite), 0); + // Test that it is not exported. + std::vector<std::string> command; + if (kIsTargetBuild) { + std::string android_root(GetAndroidRoot()); + command.push_back(android_root + "/bin/printenv"); + } else { + command.push_back("/usr/bin/printenv"); + } + command.push_back(kModifiedVariable); + std::string error_msg; + if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { + // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. + EXPECT_FALSE(Exec(command, &error_msg)); + EXPECT_NE(0U, error_msg.size()) << error_msg; + } +} + +TEST_F(UtilsTest, EnvSnapshotDeletionsAreNotVisible) { + static constexpr const char* kDeletedVariable = "PATH"; + static constexpr int kOverwrite = 1; + // Save the variable's value. + const char* save_value = getenv(kDeletedVariable); + EXPECT_NE(save_value, nullptr); + // Delete the variable. + EXPECT_EQ(unsetenv(kDeletedVariable), 0); + // Test that it is not exported. + std::vector<std::string> command; + if (kIsTargetBuild) { + std::string android_root(GetAndroidRoot()); + command.push_back(android_root + "/bin/printenv"); + } else { + command.push_back("/usr/bin/printenv"); + } + command.push_back(kDeletedVariable); + std::string error_msg; + if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { + // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. + EXPECT_TRUE(Exec(command, &error_msg)); + EXPECT_EQ(0U, error_msg.size()) << error_msg; } + // Restore the variable's value. + EXPECT_EQ(setenv(kDeletedVariable, save_value, kOverwrite), 0); } TEST_F(UtilsTest, IsValidDescriptor) { |