diff options
-rw-r--r-- | runtime/base/mutex.cc | 8 | ||||
-rw-r--r-- | runtime/base/mutex.h | 12 | ||||
-rw-r--r-- | runtime/cha.cc | 74 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 8 | ||||
-rw-r--r-- | runtime/gc/space/image_space.cc | 490 | ||||
-rw-r--r-- | runtime/gc/space/image_space.h | 26 | ||||
-rw-r--r-- | runtime/interpreter/mterp/mterp.cc | 101 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 109 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.h | 6 | ||||
-rw-r--r-- | tools/dexanalyze/dexanalyze_bytecode.cc | 63 | ||||
-rw-r--r-- | tools/dexanalyze/dexanalyze_bytecode.h | 1 |
11 files changed, 474 insertions, 424 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 7b888b18d9..044c4c2f78 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -1142,10 +1142,6 @@ void Locks::Init() { DCHECK(subtype_check_lock_ == nullptr); subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level); - UPDATE_CURRENT_LOCK_LEVEL(kCHALock); - DCHECK(cha_lock_ == nullptr); - cha_lock_ = new Mutex("CHA lock", current_lock_level); - UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock); DCHECK(classlinker_classes_lock_ == nullptr); classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", @@ -1226,6 +1222,10 @@ void Locks::Init() { DCHECK(custom_tls_lock_ == nullptr); custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kCHALock); + DCHECK(cha_lock_ == nullptr); + cha_lock_ = new Mutex("CHA lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock); DCHECK(native_debug_interface_lock_ == nullptr); native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index af2e7b2763..fba209a0b6 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -72,6 +72,7 @@ enum LockLevel : uint8_t { kJdwpSocketLock, kRegionSpaceRegionLock, kMarkSweepMarkStackLock, + kCHALock, kJitCodeCacheLock, kRosAllocGlobalLock, kRosAllocBracketLock, @@ -109,7 +110,6 @@ enum LockLevel : uint8_t { kMonitorPoolLock, kClassLinkerClassesLock, // TODO rename. kDexToDexCompilerLock, - kCHALock, kSubtypeCheckLock, kBreakpointLock, kMonitorLock, @@ -661,14 +661,11 @@ class Locks { // TODO: improve name, perhaps instrumentation_update_lock_. static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_); - // Guards Class Hierarchy Analysis (CHA). - static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_); - // Guard the update of the SubtypeCheck data stores in each Class::status_ field. // This lock is used in SubtypeCheck methods which are the interface for // any SubtypeCheck-mutating methods. // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data. - static Mutex* subtype_check_lock_ ACQUIRED_AFTER(cha_lock_); + static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_); // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads // attaching and detaching. @@ -745,11 +742,14 @@ class Locks { // GetThreadLocalStorage. static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_); + // Guards Class Hierarchy Analysis (CHA). + static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_); + // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this // actually only encodes the mutex being below jni_function_table_lock_ although having // kGenericBottomLock level is lower than this. - #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::custom_tls_lock_) + #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_) // Have an exclusive aborting thread. static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_); diff --git a/runtime/cha.cc b/runtime/cha.cc index ccbe066ed6..ce84e8ce2e 100644 --- a/runtime/cha.cc +++ b/runtime/cha.cc @@ -636,38 +636,54 @@ void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods( // We do this under cha_lock_. Committing code also grabs this lock to // make sure the code is only committed when all single-implementation // assumptions are still true. - MutexLock cha_mu(self, *Locks::cha_lock_); - // Invalidate compiled methods that assume some virtual calls have only - // single implementations. - for (ArtMethod* invalidated : invalidated_single_impl_methods) { - if (!invalidated->HasSingleImplementation()) { - // It might have been invalidated already when other class linking is - // going on. - continue; - } - invalidated->SetHasSingleImplementation(false); - if (invalidated->IsAbstract()) { - // Clear the single implementation method. - invalidated->SetSingleImplementation(nullptr, image_pointer_size); - } + std::vector<std::pair<ArtMethod*, OatQuickMethodHeader*>> headers; + { + MutexLock cha_mu(self, *Locks::cha_lock_); + // Invalidate compiled methods that assume some virtual calls have only + // single implementations. + for (ArtMethod* invalidated : invalidated_single_impl_methods) { + if (!invalidated->HasSingleImplementation()) { + // It might have been invalidated already when other class linking is + // going on. + continue; + } + invalidated->SetHasSingleImplementation(false); + if (invalidated->IsAbstract()) { + // Clear the single implementation method. + invalidated->SetSingleImplementation(nullptr, image_pointer_size); + } - if (runtime->IsAotCompiler()) { - // No need to invalidate any compiled code as the AotCompiler doesn't - // run any code. - continue; - } + if (runtime->IsAotCompiler()) { + // No need to invalidate any compiled code as the AotCompiler doesn't + // run any code. + continue; + } - // Invalidate all dependents. - for (const auto& dependent : GetDependents(invalidated)) { - ArtMethod* method = dependent.first;; - OatQuickMethodHeader* method_header = dependent.second; - VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod(); - DCHECK(runtime->UseJitCompilation()); - runtime->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor( - method, method_header); - dependent_method_headers.insert(method_header); + // Invalidate all dependents. + for (const auto& dependent : GetDependents(invalidated)) { + ArtMethod* method = dependent.first;; + OatQuickMethodHeader* method_header = dependent.second; + VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod(); + DCHECK(runtime->UseJitCompilation()); + // We need to call JitCodeCache::InvalidateCompiledCodeFor but we cannot do it here + // since it would run into problems with lock-ordering. We don't want to re-order the + // locks since that would make code-commit racy. + headers.push_back({method, method_header}); + dependent_method_headers.insert(method_header); + } + RemoveAllDependenciesFor(invalidated); + } + } + // Since we are still loading the class that invalidated the code it's fine we have this after + // getting rid of the dependency. Any calls would need to be with the old version (since the + // new one isn't loaded yet) which still works fine. We will deoptimize just after this to + // ensure everything gets the new state. + jit::Jit* jit = Runtime::Current()->GetJit(); + if (jit != nullptr) { + jit::JitCodeCache* code_cache = jit->GetCodeCache(); + for (const auto& pair : headers) { + code_cache->InvalidateCompiledCodeFor(pair.first, pair.second); } - RemoveAllDependenciesFor(invalidated); } } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 58becb1d09..222be142a1 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -318,12 +318,14 @@ Heap::Heap(size_t initial_size, } // Load image space(s). + std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces; if (space::ImageSpace::LoadBootImage(image_file_name, image_instruction_set, - &boot_image_spaces_, + &boot_image_spaces, &requested_alloc_space_begin)) { - for (auto space : boot_image_spaces_) { - AddSpace(space); + for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) { + boot_image_spaces_.push_back(space.get()); + AddSpace(space.release()); } } diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 826f382f72..985eff3025 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -181,18 +181,19 @@ static bool FindImageFilenameImpl(const char* image_location, bool have_android_data = false; *dalvik_cache_exists = false; GetDalvikCache(GetInstructionSetString(image_isa), - true, + /* create_if_absent */ true, dalvik_cache, &have_android_data, dalvik_cache_exists, is_global_cache); - if (have_android_data && *dalvik_cache_exists) { + if (*dalvik_cache_exists) { + DCHECK(have_android_data); // Always set output location even if it does not exist, // so that the caller knows where to create the image. // // image_location = /system/framework/boot.art - // *image_filename = /data/dalvik-cache/<image_isa>/boot.art + // *image_filename = /data/dalvik-cache/<image_isa>/system@framework@boot.art std::string error_msg; if (!GetDalvikCacheFilename(image_location, dalvik_cache->c_str(), @@ -381,33 +382,6 @@ ImageHeader* ImageSpace::ReadImageHeader(const char* image_location, return nullptr; } -static bool ChecksumsMatch(const char* image_a, const char* image_b, std::string* error_msg) { - DCHECK(error_msg != nullptr); - - ImageHeader hdr_a; - ImageHeader hdr_b; - - if (!ReadSpecificImageHeader(image_a, &hdr_a)) { - *error_msg = StringPrintf("Cannot read header of %s", image_a); - return false; - } - if (!ReadSpecificImageHeader(image_b, &hdr_b)) { - *error_msg = StringPrintf("Cannot read header of %s", image_b); - return false; - } - - if (hdr_a.GetOatChecksum() != hdr_b.GetOatChecksum()) { - *error_msg = StringPrintf("Checksum mismatch: %u(%s) vs %u(%s)", - hdr_a.GetOatChecksum(), - image_a, - hdr_b.GetOatChecksum(), - image_b); - return false; - } - - return true; -} - static bool CanWriteToDalvikCache(const InstructionSet isa) { const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa)); if (access(dalvik_cache.c_str(), O_RDWR) == 0) { @@ -507,9 +481,9 @@ std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) { // Helper class encapsulating loading, so we can access private ImageSpace members (this is a // friend class), but not declare functions in the header. -class ImageSpaceLoader { +class ImageSpace::Loader { public: - static std::unique_ptr<ImageSpace> Load(const char* image_location, + static std::unique_ptr<ImageSpace> Load(const std::string& image_location, const std::string& image_filename, bool is_zygote, bool is_global_cache, @@ -541,7 +515,7 @@ class ImageSpaceLoader { // Since we are the boot image, pass null since we load the oat file from the boot image oat // file name. return Init(image_filename.c_str(), - image_location, + image_location.c_str(), validate_oat_file, /* oat_file */nullptr, error_msg); @@ -1471,6 +1445,187 @@ class ImageSpaceLoader { } }; +class ImageSpace::BootImageLoader { + public: + BootImageLoader(const std::string& image_location, InstructionSet image_isa) + : image_location_(image_location), + image_isa_(image_isa), + is_zygote_(Runtime::Current()->IsZygote()), + has_system_(false), + has_cache_(false), + is_global_cache_(true), + dalvik_cache_(), + cache_filename_() { + } + + bool IsZygote() const { return is_zygote_; } + + void FindImageFiles() { + std::string system_filename; + bool dalvik_cache_exists = false; + bool found_image = FindImageFilenameImpl(image_location_.c_str(), + image_isa_, + &has_system_, + &system_filename, + &dalvik_cache_exists, + &dalvik_cache_, + &is_global_cache_, + &has_cache_, + &cache_filename_); + DCHECK_EQ(dalvik_cache_exists, !dalvik_cache_.empty()); + DCHECK_EQ(found_image, has_system_ || has_cache_); + } + + bool HasSystem() const { return has_system_; } + bool HasCache() const { return has_cache_; } + + bool DalvikCacheExists() const { return !dalvik_cache_.empty(); } + bool IsGlobalCache() const { return is_global_cache_; } + + const std::string& GetDalvikCache() const { + DCHECK(DalvikCacheExists()); + return dalvik_cache_; + } + + const std::string& GetCacheFilename() const { + DCHECK(DalvikCacheExists()); + return cache_filename_; + } + + bool LoadFromSystem(/*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces, + /*out*/ uint8_t** oat_file_end, + /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) { + std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_); + std::vector<std::string> locations; + if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) { + return false; + } + std::vector<std::unique_ptr<ImageSpace>> spaces; + spaces.reserve(locations.size()); + for (const std::string& location : locations) { + filename = GetSystemImageFilename(location.c_str(), image_isa_); + spaces.push_back(Loader::Load(location, + filename, + is_zygote_, + is_global_cache_, + /* validate_oat_file */ false, + error_msg)); + if (spaces.back() == nullptr) { + return false; + } + } + *oat_file_end = GetOatFileEnd(spaces); + boot_image_spaces->swap(spaces); + return true; + } + + bool LoadFromDalvikCache( + bool validate_system_checksums, + bool validate_oat_file, + /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces, + /*out*/ uint8_t** oat_file_end, + /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(DalvikCacheExists()); + std::vector<std::string> locations; + if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) { + return false; + } + std::vector<std::unique_ptr<ImageSpace>> spaces; + spaces.reserve(locations.size()); + for (const std::string& location : locations) { + std::string filename; + if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) { + return false; + } + spaces.push_back(Loader::Load(location, + filename, + is_zygote_, + is_global_cache_, + validate_oat_file, + error_msg)); + if (spaces.back() == nullptr) { + return false; + } + if (validate_system_checksums) { + ImageHeader system_hdr; + std::string system_filename = GetSystemImageFilename(location.c_str(), image_isa_); + if (!ReadSpecificImageHeader(system_filename.c_str(), &system_hdr)) { + *error_msg = StringPrintf("Cannot read header of %s", system_filename.c_str()); + return false; + } + if (spaces.back()->GetImageHeader().GetOatChecksum() != system_hdr.GetOatChecksum()) { + *error_msg = StringPrintf("Checksum mismatch: %u(%s) vs %u(%s)", + spaces.back()->GetImageHeader().GetOatChecksum(), + filename.c_str(), + system_hdr.GetOatChecksum(), + system_filename.c_str()); + return false; + } + } + } + *oat_file_end = GetOatFileEnd(spaces); + boot_image_spaces->swap(spaces); + return true; + } + + private: + // Extract boot class path from oat file associated with `image_filename` + // and list all associated image locations. + static bool GetBootClassPathImageLocations(const std::string& image_location, + const std::string& image_filename, + /*out*/ std::vector<std::string>* all_locations, + /*out*/ std::string* error_msg) { + std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename); + std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1, + oat_filename, + oat_filename, + /* requested_base */ nullptr, + /* oat_file_begin */ nullptr, + /* executable */ false, + /* low_4gb */ false, + /* abs_dex_location */ nullptr, + error_msg)); + if (oat_file == nullptr) { + *error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s", + oat_filename.c_str(), + image_filename.c_str(), + error_msg->c_str()); + return false; + } + const OatHeader& oat_header = oat_file->GetOatHeader(); + const char* boot_classpath = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey); + if (boot_classpath == nullptr || boot_classpath[0] == 0) { + *error_msg = StringPrintf("No boot class path in oat file '%s' for image file %s", + oat_filename.c_str(), + image_filename.c_str()); + return false; + } + + all_locations->push_back(image_location); + ExtractMultiImageLocations(image_location, boot_classpath, all_locations); + return true; + } + + uint8_t* GetOatFileEnd(const std::vector<std::unique_ptr<ImageSpace>>& spaces) { + DCHECK(std::is_sorted( + spaces.begin(), + spaces.end(), + [](const std::unique_ptr<ImageSpace>& lhs, const std::unique_ptr<ImageSpace>& rhs) { + return lhs->GetOatFileEnd() < rhs->GetOatFileEnd(); + })); + return AlignUp(spaces.back()->GetOatFileEnd(), kPageSize); + } + + const std::string& image_location_; + InstructionSet image_isa_; + bool is_zygote_; + bool has_system_; + bool has_cache_; + bool is_global_cache_; + std::string dalvik_cache_; + std::string cache_filename_; +}; + static constexpr uint64_t kLowSpaceValue = 50 * MB; static constexpr uint64_t kTmpFsSentinelValue = 384 * MB; @@ -1506,70 +1661,56 @@ static bool CheckSpace(const std::string& cache_filename, std::string* error_msg return true; } -std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_location, - const InstructionSet image_isa, - bool secondary_image, - std::string* error_msg) { +bool ImageSpace::LoadBootImage( + const std::string& image_location, + const InstructionSet image_isa, + /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces, + /*out*/ uint8_t** oat_file_end) { ScopedTrace trace(__FUNCTION__); + DCHECK(boot_image_spaces != nullptr); + DCHECK(boot_image_spaces->empty()); + DCHECK(oat_file_end != nullptr); + DCHECK_NE(image_isa, InstructionSet::kNone); + + if (image_location.empty()) { + return false; + } + + BootImageLoader loader(image_location, image_isa); + // Step 0: Extra zygote work. // Step 0.a: If we're the zygote, mark boot. - const bool is_zygote = Runtime::Current()->IsZygote(); - if (is_zygote && !secondary_image && CanWriteToDalvikCache(image_isa)) { + if (loader.IsZygote() && CanWriteToDalvikCache(image_isa)) { MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots()); } + loader.FindImageFiles(); + // Step 0.b: If we're the zygote, check for free space, and prune the cache preemptively, // if necessary. While the runtime may be fine (it is pretty tolerant to // out-of-disk-space situations), other parts of the platform are not. // // The advantage of doing this proactively is that the later steps are simplified, // i.e., we do not need to code retries. - std::string system_filename; - bool has_system = false; - std::string cache_filename; - bool has_cache = false; - bool dalvik_cache_exists = false; - bool is_global_cache = true; - std::string dalvik_cache; - bool found_image = FindImageFilenameImpl(image_location, - image_isa, - &has_system, - &system_filename, - &dalvik_cache_exists, - &dalvik_cache, - &is_global_cache, - &has_cache, - &cache_filename); - bool dex2oat_enabled = Runtime::Current()->IsImageDex2OatEnabled(); - if (is_zygote && dalvik_cache_exists && !secondary_image) { + if (loader.IsZygote() && loader.DalvikCacheExists()) { // Extra checks for the zygote. These only apply when loading the first image, explained below. + const std::string& dalvik_cache = loader.GetDalvikCache(); DCHECK(!dalvik_cache.empty()); std::string local_error_msg; // All secondary images are verified when the primary image is verified. - bool verified = VerifyImage(image_location, dalvik_cache.c_str(), image_isa, &local_error_msg); - // If we prune for space at a secondary image, we may end up in a crash loop with the _exit - // path. + bool verified = + VerifyImage(image_location.c_str(), dalvik_cache.c_str(), image_isa, &local_error_msg); bool check_space = CheckSpace(dalvik_cache, &local_error_msg); if (!verified || !check_space) { - // Note: it is important to only prune for space on the primary image, or we will hit the - // restart path. LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache."; PruneDalvikCache(image_isa); // Re-evaluate the image. - found_image = FindImageFilenameImpl(image_location, - image_isa, - &has_system, - &system_filename, - &dalvik_cache_exists, - &dalvik_cache, - &is_global_cache, - &has_cache, - &cache_filename); + loader.FindImageFiles(); } if (!check_space) { // Disable compilation/patching - we do not want to fill up the space again. @@ -1580,39 +1721,16 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati // Collect all the errors. std::vector<std::string> error_msgs; - // Step 1: Check if we have an existing and relocated image. - - // Step 1.a: Have files in system and cache. Then they need to match. - if (found_image && has_system && has_cache) { + // Step 1: Check if we have an existing image in the dalvik cache. + if (loader.HasCache()) { std::string local_error_msg; - // Check that the files are matching. - if (ChecksumsMatch(system_filename.c_str(), cache_filename.c_str(), &local_error_msg)) { - std::unique_ptr<ImageSpace> relocated_space = - ImageSpaceLoader::Load(image_location, - cache_filename, - is_zygote, - is_global_cache, - /* validate_oat_file */ false, - &local_error_msg); - if (relocated_space != nullptr) { - return relocated_space; - } - } - error_msgs.push_back(local_error_msg); - } - - // Step 1.b: Only have a cache file. - if (found_image && !has_system && has_cache) { - std::string local_error_msg; - std::unique_ptr<ImageSpace> cache_space = - ImageSpaceLoader::Load(image_location, - cache_filename, - is_zygote, - is_global_cache, - /* validate_oat_file */ true, - &local_error_msg); - if (cache_space != nullptr) { - return cache_space; + // If we have system image, validate system image checksums, otherwise validate the oat file. + if (loader.LoadFromDalvikCache(/* validate_system_checksums */ loader.HasSystem(), + /* validate_oat_file */ !loader.HasSystem(), + boot_image_spaces, + oat_file_end, + &local_error_msg)) { + return true; } error_msgs.push_back(local_error_msg); } @@ -1622,83 +1740,64 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati // Step 2.a: We are not required to relocate it. Then we can use it directly. bool relocate = Runtime::Current()->ShouldRelocate(); - if (found_image && has_system && !relocate) { + if (loader.HasSystem() && !relocate) { std::string local_error_msg; - std::unique_ptr<ImageSpace> system_space = - ImageSpaceLoader::Load(image_location, - system_filename, - is_zygote, - is_global_cache, - /* validate_oat_file */ false, - &local_error_msg); - if (system_space != nullptr) { - return system_space; + if (loader.LoadFromSystem(boot_image_spaces, oat_file_end, &local_error_msg)) { + return true; } error_msgs.push_back(local_error_msg); } - // Step 2.b: We require a relocated image. Then we must patch it. This step fails if this is a - // secondary image. - if (found_image && has_system && relocate) { + // Step 2.b: We require a relocated image. Then we must patch it. + if (loader.HasSystem() && relocate) { std::string local_error_msg; if (!dex2oat_enabled) { local_error_msg = "Patching disabled."; - } else if (secondary_image) { - // We really want a working image. Prune and restart. - PruneDalvikCache(image_isa); - _exit(1); - } else if (ImageCreationAllowed(is_global_cache, image_isa, &local_error_msg)) { - bool patch_success = - RelocateImage(image_location, dalvik_cache.c_str(), image_isa, &local_error_msg); + } else if (ImageCreationAllowed(loader.IsGlobalCache(), image_isa, &local_error_msg)) { + bool patch_success = RelocateImage( + image_location.c_str(), loader.GetDalvikCache().c_str(), image_isa, &local_error_msg); if (patch_success) { - std::unique_ptr<ImageSpace> patched_space = - ImageSpaceLoader::Load(image_location, - cache_filename, - is_zygote, - is_global_cache, - /* validate_oat_file */ false, - &local_error_msg); - if (patched_space != nullptr) { - return patched_space; + if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false, + /* validate_oat_file */ false, + boot_image_spaces, + oat_file_end, + &local_error_msg)) { + return true; } } } error_msgs.push_back(StringPrintf("Cannot relocate image %s to %s: %s", - image_location, - cache_filename.c_str(), + image_location.c_str(), + loader.GetCacheFilename().c_str(), local_error_msg.c_str())); } - // Step 3: We do not have an existing image in /system, so generate an image into the dalvik - // cache. This step fails if this is a secondary image. - if (!has_system) { + // Step 3: We do not have an existing image in /system, + // so generate an image into the dalvik cache. + if (!loader.HasSystem()) { std::string local_error_msg; if (!dex2oat_enabled) { local_error_msg = "Image compilation disabled."; - } else if (secondary_image) { - local_error_msg = "Cannot compile a secondary image."; - } else if (ImageCreationAllowed(is_global_cache, image_isa, &local_error_msg)) { - bool compilation_success = GenerateImage(cache_filename, image_isa, &local_error_msg); + } else if (ImageCreationAllowed(loader.IsGlobalCache(), image_isa, &local_error_msg)) { + bool compilation_success = + GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg); if (compilation_success) { - std::unique_ptr<ImageSpace> compiled_space = - ImageSpaceLoader::Load(image_location, - cache_filename, - is_zygote, - is_global_cache, - /* validate_oat_file */ false, - &local_error_msg); - if (compiled_space != nullptr) { - return compiled_space; + if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false, + /* validate_oat_file */ false, + boot_image_spaces, + oat_file_end, + &local_error_msg)) { + return true; } } } error_msgs.push_back(StringPrintf("Cannot compile image to %s: %s", - cache_filename.c_str(), + loader.GetCacheFilename().c_str(), local_error_msg.c_str())); } - // We failed. Prune the cache the free up space, create a compound error message and return no - // image. + // We failed. Prune the cache the free up space, create a compound error message + // and return false. PruneDalvikCache(image_isa); std::ostringstream oss; @@ -1709,84 +1808,11 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati } oss << msg; } - *error_msg = oss.str(); - - return nullptr; -} - -bool ImageSpace::LoadBootImage(const std::string& image_file_name, - const InstructionSet image_instruction_set, - std::vector<space::ImageSpace*>* boot_image_spaces, - uint8_t** oat_file_end) { - DCHECK(boot_image_spaces != nullptr); - DCHECK(boot_image_spaces->empty()); - DCHECK(oat_file_end != nullptr); - DCHECK_NE(image_instruction_set, InstructionSet::kNone); - - if (image_file_name.empty()) { - return false; - } - - // For code reuse, handle this like a work queue. - std::vector<std::string> image_file_names; - image_file_names.push_back(image_file_name); - - bool error = false; - uint8_t* oat_file_end_tmp = *oat_file_end; - - for (size_t index = 0; index < image_file_names.size(); ++index) { - std::string& image_name = image_file_names[index]; - std::string error_msg; - std::unique_ptr<space::ImageSpace> boot_image_space_uptr = CreateBootImage( - image_name.c_str(), - image_instruction_set, - index > 0, - &error_msg); - if (boot_image_space_uptr != nullptr) { - space::ImageSpace* boot_image_space = boot_image_space_uptr.release(); - boot_image_spaces->push_back(boot_image_space); - // Oat files referenced by image files immediately follow them in memory, ensure alloc space - // isn't going to get in the middle - uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd(); - CHECK_GT(oat_file_end_addr, boot_image_space->End()); - oat_file_end_tmp = AlignUp(oat_file_end_addr, kPageSize); - - if (index == 0) { - // If this was the first space, check whether there are more images to load. - const OatFile* boot_oat_file = boot_image_space->GetOatFile(); - if (boot_oat_file == nullptr) { - continue; - } - - const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader(); - const char* boot_classpath = - boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey); - if (boot_classpath == nullptr) { - continue; - } - - ExtractMultiImageLocations(image_file_name, boot_classpath, &image_file_names); - } - } else { - error = true; - LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. " - << "Attempting to fall back to imageless running. Error was: " << error_msg - << "\nAttempted image: " << image_name; - break; - } - } - if (error) { - // Remove already loaded spaces. - for (space::Space* loaded_space : *boot_image_spaces) { - delete loaded_space; - } - boot_image_spaces->clear(); - return false; - } + LOG(ERROR) << "Could not create image space with image file '" << image_location << "'. " + << "Attempting to fall back to imageless running. Error was: " << oss.str(); - *oat_file_end = oat_file_end_tmp; - return true; + return false; } ImageSpace::~ImageSpace() { @@ -1815,11 +1841,7 @@ ImageSpace::~ImageSpace() { std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image, const OatFile* oat_file, std::string* error_msg) { - return ImageSpaceLoader::Init(image, - image, - /*validate_oat_file*/false, - oat_file, - /*out*/error_msg); + return Loader::Init(image, image, /*validate_oat_file*/false, oat_file, /*out*/error_msg); } const OatFile* ImageSpace::GetOatFile() const { diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index 3383d6b383..771ba2acb8 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -41,11 +41,11 @@ class ImageSpace : public MemMapSpace { // On successful return, the loaded spaces are added to boot_image_spaces (which must be // empty on entry) and oat_file_end is updated with the (page-aligned) end of the last // oat file. - static bool LoadBootImage(const std::string& image_file_name, - const InstructionSet image_instruction_set, - std::vector<space::ImageSpace*>* boot_image_spaces, - uint8_t** oat_file_end) - REQUIRES_SHARED(Locks::mutator_lock_); + static bool LoadBootImage( + const std::string& image_location, + const InstructionSet image_isa, + /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces, + /*out*/ uint8_t** oat_file_end) REQUIRES_SHARED(Locks::mutator_lock_); // Try to open an existing app image space. static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image, @@ -197,23 +197,11 @@ class ImageSpace : public MemMapSpace { const std::string image_location_; - friend class ImageSpaceLoader; friend class Space; private: - // Create a boot image space from an image file for a specified instruction - // set. Cannot be used for future allocation or collected. - // - // Create also opens the OatFile associated with the image file so - // that it be contiguously allocated with the image before the - // creation of the alloc space. The ReleaseOatFile will later be - // used to transfer ownership of the OatFile to the ClassLinker when - // it is initialized. - static std::unique_ptr<ImageSpace> CreateBootImage(const char* image, - InstructionSet image_isa, - bool secondary_image, - std::string* error_msg) - REQUIRES_SHARED(Locks::mutator_lock_); + class Loader; + class BootImageLoader; DISALLOW_COPY_AND_ASSIGN(ImageSpace); }; diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index 4c06e12af9..abbc50936d 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -681,57 +681,80 @@ extern "C" size_t MterpSuspendCheck(Thread* self) return MterpShouldSwitchInterpreters(); } -// Helper function to do a null check after trying to resolve the field. Not for statics since obj -// does not exist there. There is a suspend check, object is a double pointer to update the value -// in the caller in case it moves. -template<FindFieldType type, bool kAccessCheck> -ALWAYS_INLINE static inline ArtField* FindInstanceField(uint32_t field_idx, - ArtMethod* referrer, - Thread* self, - size_t size, - mirror::Object** obj) - REQUIRES(!Roles::uninterruptible_) - REQUIRES_SHARED(Locks::mutator_lock_) { +template<typename PrimType, typename RetType, typename Getter, FindFieldType kType> +NO_INLINE RetType artGetInstanceFromMterp(uint32_t field_idx, + mirror::Object* obj, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { StackHandleScope<1> hs(self); - HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj)); - ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size); - if (LIKELY(field != nullptr) && UNLIKELY(h == nullptr)) { - ThrowNullPointerExceptionForFieldAccess(field, /*is_read*/true); - return nullptr; + HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&obj)); // GC might move the object. + ArtField* field = FindFieldFromCode<kType, /* access_checks */ false>( + field_idx, referrer, self, sizeof(PrimType)); + if (UNLIKELY(field == nullptr)) { + return 0; // Will throw exception by checking with Thread::Current. } - return field; + if (UNLIKELY(h == nullptr)) { + ThrowNullPointerExceptionForFieldAccess(field, /*is_read*/ true); + return 0; // Will throw exception by checking with Thread::Current. + } + return Getter::Get(obj, field); +} + +template<typename PrimType, typename RetType, typename Getter> +ALWAYS_INLINE RetType artGetInstanceFromMterpFast(uint32_t field_idx, + mirror::Object* obj, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + constexpr bool kIsObject = std::is_same<RetType, mirror::Object*>::value; + constexpr FindFieldType kType = kIsObject ? InstanceObjectRead : InstancePrimitiveRead; + + // This effectively inlines the fast path from ArtMethod::GetDexCache. + // It avoids non-inlined call which in turn allows elimination of the prologue and epilogue. + if (LIKELY(!referrer->IsObsolete())) { + // Avoid read barriers, since we need only the pointer to the native (non-movable) + // DexCache field array which we can get even through from-space objects. + ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>(); + mirror::DexCache* dex_cache = klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>(); + // Try to find the desired field in DexCache. + ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize); + if (LIKELY(field != nullptr & obj != nullptr)) { + if (kIsDebugBuild) { + // Compare the fast path and slow path. + StackHandleScope<1> hs(self); + HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&obj)); // GC might move the object. + DCHECK_EQ(field, (FindFieldFromCode<kType, /* access_checks */ false>( + field_idx, referrer, self, sizeof(PrimType)))); + } + return Getter::Get(obj, field); + } + } + // Slow path. Last and with identical arguments so that it becomes single instruction tail call. + return artGetInstanceFromMterp<PrimType, RetType, Getter, kType>(field_idx, obj, referrer, self); } -#define ART_GET_FIELD_FROM_MTERP(Kind, PrimitiveType, RetType, SetType, \ - PrimitiveOrObject, IsObject, Ptr) \ +#define ART_GET_FIELD_FROM_MTERP(Kind, PrimType, RetType, Ptr) \ extern "C" RetType artGet ## Kind ## InstanceFromMterp(uint32_t field_idx, \ mirror::Object* obj, \ ArtMethod* referrer, \ Thread* self) \ REQUIRES_SHARED(Locks::mutator_lock_) { \ - constexpr FindFieldType kType = Instance ## PrimitiveOrObject ## Read; \ - constexpr size_t kSize = sizeof(PrimitiveType); \ - mirror::DexCache* dex_cache = referrer->GetDexCache(); \ - ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize); \ - if (LIKELY(field != nullptr && obj != nullptr)) { \ - return field->Get ## Kind (obj)Ptr; /* NOLINT */ \ - } \ - field = FindInstanceField<kType, true>(field_idx, referrer, self, kSize, &obj); \ - if (LIKELY(field != nullptr)) { \ - return field->Get ## Kind (obj)Ptr; /* NOLINT */ \ + struct Getter { /* Specialize the field load depending on the field type */ \ + static RetType Get(mirror::Object* o, ArtField* f) REQUIRES_SHARED(Locks::mutator_lock_) { \ + return f->Get##Kind(o)Ptr; \ } \ - /* Will throw exception by checking with Thread::Current. */ \ - return 0; \ + }; \ + return artGetInstanceFromMterpFast<PrimType, RetType, Getter>(field_idx, obj, referrer, self); \ } \ -ART_GET_FIELD_FROM_MTERP(Byte, int8_t, ssize_t, uint32_t, Primitive, false, ) -ART_GET_FIELD_FROM_MTERP(Boolean, int8_t, size_t, uint32_t, Primitive, false, ) -ART_GET_FIELD_FROM_MTERP(Short, int16_t, ssize_t, uint16_t, Primitive, false, ) -ART_GET_FIELD_FROM_MTERP(Char, int16_t, size_t, uint16_t, Primitive, false, ) -ART_GET_FIELD_FROM_MTERP(32, int32_t, size_t, uint32_t, Primitive, false, ) -ART_GET_FIELD_FROM_MTERP(64, int64_t, uint64_t, uint64_t, Primitive, false, ) -ART_GET_FIELD_FROM_MTERP(Obj, mirror::HeapReference<mirror::Object>, mirror::Object*, - mirror::Object*, Object, true, .Ptr()) +ART_GET_FIELD_FROM_MTERP(Byte, int8_t, ssize_t, ) +ART_GET_FIELD_FROM_MTERP(Boolean, uint8_t, size_t, ) +ART_GET_FIELD_FROM_MTERP(Short, int16_t, ssize_t, ) +ART_GET_FIELD_FROM_MTERP(Char, uint16_t, size_t, ) +ART_GET_FIELD_FROM_MTERP(32, uint32_t, size_t, ) +ART_GET_FIELD_FROM_MTERP(64, uint64_t, uint64_t, ) +ART_GET_FIELD_FROM_MTERP(Obj, mirror::HeapReference<mirror::Object>, mirror::Object*, .Ptr()) #undef ART_GET_FIELD_FROM_MTERP diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index b92affa26e..a8692a0702 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -608,17 +608,17 @@ void JitCodeCache::FreeCodeAndData(const void* code_ptr) { void JitCodeCache::FreeAllMethodHeaders( const std::unordered_set<OatQuickMethodHeader*>& method_headers) { - { - MutexLock mu(Thread::Current(), *Locks::cha_lock_); - Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis() - ->RemoveDependentsWithMethodHeaders(method_headers); - } - // We need to remove entries in method_headers from CHA dependencies // first since once we do FreeCode() below, the memory can be reused // so it's possible for the same method_header to start representing // different compile code. MutexLock mu(Thread::Current(), lock_); + { + MutexLock mu2(Thread::Current(), *Locks::cha_lock_); + Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis() + ->RemoveDependentsWithMethodHeaders(method_headers); + } + ScopedCodeCacheWrite scc(this); for (const OatQuickMethodHeader* method_header : method_headers) { FreeCodeAndData(method_header->GetCode()); @@ -742,6 +742,18 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm) { method->SetCounter(std::min(jit_warmup_threshold - 1, 1)); } +void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) { + while (collection_in_progress_) { + lock_.Unlock(self); + { + ScopedThreadSuspension sts(self, kSuspended); + MutexLock mu(self, lock_); + WaitForPotentialCollectionToComplete(self); + } + lock_.Lock(self); + } +} + uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, ArtMethod* method, uint8_t* stack_map, @@ -755,6 +767,13 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, const ArenaSet<ArtMethod*>& cha_single_implementation_list) { DCHECK(!method->IsNative() || !osr); + + if (!method->IsNative()) { + // We need to do this before grabbing the lock_ because it needs to be able to see the string + // InternTable. Native methods do not have roots. + DCheckRootsAreValid(roots); + } + size_t alignment = GetInstructionSetAlignment(kRuntimeISA); // Ensure the header ends up at expected instruction alignment. size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); @@ -763,44 +782,45 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, OatQuickMethodHeader* method_header = nullptr; uint8_t* code_ptr = nullptr; uint8_t* memory = nullptr; + MutexLock mu(self, lock_); + // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to + // finish. + WaitForPotentialCollectionToCompleteRunnable(self); { - ScopedThreadSuspension sts(self, kSuspended); - MutexLock mu(self, lock_); - WaitForPotentialCollectionToComplete(self); - { - ScopedCodeCacheWrite scc(this); - memory = AllocateCode(total_size); - if (memory == nullptr) { - return nullptr; - } - code_ptr = memory + header_size; - - std::copy(code, code + code_size, code_ptr); - method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); - new (method_header) OatQuickMethodHeader( - (stack_map != nullptr) ? code_ptr - stack_map : 0u, - code_size); - // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may - // trigger a segfault if a page fault occurs when requesting a cache maintenance operation. - // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and - // 6P) stop being supported or their kernels are fixed. - // - // For reference, this behavior is caused by this commit: - // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c - FlushInstructionCache(reinterpret_cast<char*>(code_ptr), - reinterpret_cast<char*>(code_ptr + code_size)); - DCHECK(!Runtime::Current()->IsAotCompiler()); - if (has_should_deoptimize_flag) { - method_header->SetHasShouldDeoptimizeFlag(); - } + ScopedCodeCacheWrite scc(this); + memory = AllocateCode(total_size); + if (memory == nullptr) { + return nullptr; + } + code_ptr = memory + header_size; + + std::copy(code, code + code_size, code_ptr); + method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); + new (method_header) OatQuickMethodHeader( + (stack_map != nullptr) ? code_ptr - stack_map : 0u, + code_size); + // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may + // trigger a segfault if a page fault occurs when requesting a cache maintenance operation. + // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and + // 6P) stop being supported or their kernels are fixed. + // + // For reference, this behavior is caused by this commit: + // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c + FlushInstructionCache(reinterpret_cast<char*>(code_ptr), + reinterpret_cast<char*>(code_ptr + code_size)); + DCHECK(!Runtime::Current()->IsAotCompiler()); + if (has_should_deoptimize_flag) { + method_header->SetHasShouldDeoptimizeFlag(); } number_of_compilations_++; } // We need to update the entry point in the runnable state for the instrumentation. { - // Need cha_lock_ for checking all single-implementation flags and register - // dependencies. + // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the + // compiled code is considered invalidated by some class linking, but below we still make the + // compiled code valid for the method. Need cha_lock_ for checking all single-implementation + // flags and register dependencies. MutexLock cha_mu(self, *Locks::cha_lock_); bool single_impl_still_valid = true; for (ArtMethod* single_impl : cha_single_implementation_list) { @@ -826,16 +846,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, single_impl, method, method_header); } - if (!method->IsNative()) { - // We need to do this before grabbing the lock_ because it needs to be able to see the string - // InternTable. Native methods do not have roots. - DCheckRootsAreValid(roots); - } - - // The following needs to be guarded by cha_lock_ also. Otherwise it's - // possible that the compiled code is considered invalidated by some class linking, - // but below we still make the compiled code valid for the method. - MutexLock mu(self, lock_); if (UNLIKELY(method->IsNative())) { auto it = jni_stubs_map_.find(JniStubKey(method)); DCHECK(it != jni_stubs_map_.end()) @@ -867,11 +877,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, method, method_header->GetEntryPoint()); } } - if (collection_in_progress_) { - // We need to update the live bitmap if there is a GC to ensure it sees this new - // code. - GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); - } VLOG(jit) << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") " << ArtMethod::PrettyMethod(method) << "@" << method diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 29f9c9cf43..632b45bb00 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -314,6 +314,12 @@ class JitCodeCache { REQUIRES(lock_) REQUIRES_SHARED(Locks::mutator_lock_); + // If a collection is in progress, wait for it to finish. Must be called with the mutator lock. + // The non-mutator lock version should be used if possible. This method will release then + // re-acquire the mutator lock. + void WaitForPotentialCollectionToCompleteRunnable(Thread* self) + REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); + // If a collection is in progress, wait for it to finish. Return // whether the thread actually waited. bool WaitForPotentialCollectionToComplete(Thread* self) diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc index 1c5a5d548b..0bb3f911a2 100644 --- a/tools/dexanalyze/dexanalyze_bytecode.cc +++ b/tools/dexanalyze/dexanalyze_bytecode.cc @@ -164,13 +164,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file, std::map<size_t, TypeLinkage>& types) { TypeLinkage& current_type = types[current_class_type.index_]; bool skip_next = false; - size_t last_start = 0u; for (auto inst = code_item.begin(); ; ++inst) { - if (!count_types && last_start != buffer_.size()) { - // Register the instruction blob. - ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + last_start, buffer_.end())]; - last_start = buffer_.size(); - } if (inst == code_item.end()) { break; } @@ -334,31 +328,31 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file, } } - bool result = false; uint32_t type_idx = current_type.types_.Get(receiver_type.index_); uint32_t local_idx = types[receiver_type.index_].methods_.Get(method_idx); + + // If true, we always put the return value in r0. + static constexpr bool kMoveToDestReg = true; + + std::vector<uint32_t> new_args; + if (kMoveToDestReg && arg_count % 2 == 1) { + // Use the extra nibble to sneak in part of the type index. + new_args.push_back(local_idx >> 4); + local_idx ^= local_idx & 0xF0; + } ExtendPrefix(&type_idx, &local_idx); - ExtendPrefix(&dest_reg, &local_idx); - if (arg_count == 0) { - result = InstNibbles(opcode, {dest_reg, type_idx, local_idx}); - } else if (arg_count == 1) { - result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0]}); - } else if (arg_count == 2) { - result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0], - args[1]}); - } else if (arg_count == 3) { - result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0], - args[1], args[2]}); - } else if (arg_count == 4) { - result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0], - args[1], args[2], args[3]}); - } else if (arg_count == 5) { - result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0], - args[1], args[2], args[3], args[4]}); + new_args.push_back(type_idx); + new_args.push_back(local_idx); + if (!kMoveToDestReg) { + ExtendPrefix(&dest_reg, &local_idx); + new_args.push_back(dest_reg); } - - if (result) { + new_args.insert(new_args.end(), args, args + arg_count); + if (InstNibbles(opcode, new_args)) { skip_next = next_move_result; + if (kMoveToDestReg && dest_reg != 0u) { + CHECK(InstNibbles(Instruction::MOVE, {dest_reg >> 4, dest_reg & 0xF})); + } continue; } } @@ -466,8 +460,11 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file, void NewRegisterInstructions::Add(Instruction::Code opcode, const Instruction& inst) { const uint8_t* start = reinterpret_cast<const uint8_t*>(&inst); + const size_t buffer_start = buffer_.size(); buffer_.push_back(opcode); buffer_.insert(buffer_.end(), start + 1, start + 2 * inst.SizeInCodeUnits()); + // Register the instruction blob. + ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + buffer_start, buffer_.end())]; } void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) { @@ -500,17 +497,6 @@ void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) { *value2 &= 0XF; } -bool NewRegisterInstructions::InstNibblesAndIndex(uint8_t opcode, - uint16_t idx, - const std::vector<uint32_t>& args) { - if (!InstNibbles(opcode, args)) { - return false; - } - buffer_.push_back(static_cast<uint8_t>(idx >> 8)); - buffer_.push_back(static_cast<uint8_t>(idx)); - return true; -} - bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args) { if (verbose_level_ >= VerboseLevel::kEverything) { std::cout << " ==> " << Instruction::Name(static_cast<Instruction::Code>(opcode)) << " "; @@ -526,6 +512,7 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint return false; } } + const size_t buffer_start = buffer_.size(); buffer_.push_back(opcode); for (size_t i = 0; i < args.size(); i += 2) { buffer_.push_back(args[i] << 4); @@ -536,6 +523,8 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint while (buffer_.size() % alignment_ != 0) { buffer_.push_back(0); } + // Register the instruction blob. + ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + buffer_start, buffer_.end())]; return true; } diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h index ed40ba7d9b..db009b03b8 100644 --- a/tools/dexanalyze/dexanalyze_bytecode.h +++ b/tools/dexanalyze/dexanalyze_bytecode.h @@ -64,7 +64,6 @@ class NewRegisterInstructions : public Experiment { bool count_types, std::map<size_t, TypeLinkage>& types); void Add(Instruction::Code opcode, const Instruction& inst); - bool InstNibblesAndIndex(uint8_t opcode, uint16_t idx, const std::vector<uint32_t>& args); bool InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args); void ExtendPrefix(uint32_t* value1, uint32_t* value2); bool Enabled(BytecodeExperiment experiment) const { |