From 7940e44f4517de5e2634a7e07d58d0fb26160513 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Fri, 12 Jul 2013 13:46:57 -0700 Subject: Create separate Android.mk for main build targets The runtime, compiler, dex2oat, and oatdump now are in seperate trees to prevent dependency creep. They can now be individually built without rebuilding the rest of the art projects. dalvikvm and jdwpspy were already this way. Builds in the art directory should behave as before, building everything including tests. Change-Id: Ic6b1151e5ed0f823c3dd301afd2b13eb2d8feb81 --- compiler/driver/compiler_driver.cc | 2404 ++++++++++++++++++++++++++++++++++++ 1 file changed, 2404 insertions(+) create mode 100644 compiler/driver/compiler_driver.cc (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc new file mode 100644 index 0000000000..c99d103c17 --- /dev/null +++ b/compiler/driver/compiler_driver.cc @@ -0,0 +1,2404 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler_driver.h" + +#include + +#include + +#include "base/stl_util.h" +#include "base/timing_logger.h" +#include "class_linker.h" +#include "dex_compilation_unit.h" +#include "dex_file-inl.h" +#include "jni_internal.h" +#include "oat_file.h" +#include "object_utils.h" +#include "runtime.h" +#include "gc/accounting/card_table-inl.h" +#include "gc/accounting/heap_bitmap.h" +#include "gc/space/space.h" +#include "mirror/class_loader.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/field-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/throwable.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" +#include "stubs/stubs.h" +#include "thread.h" +#include "thread_pool.h" +#include "verifier/method_verifier.h" + +#if defined(ART_USE_PORTABLE_COMPILER) +#include "elf_writer_mclinker.h" +#else +#include "elf_writer_quick.h" +#endif + +namespace art { + +static double Percentage(size_t x, size_t y) { + return 100.0 * (static_cast(x)) / (static_cast(x + y)); +} + +static void DumpStat(size_t x, size_t y, const char* str) { + if (x == 0 && y == 0) { + return; + } + LOG(INFO) << Percentage(x, y) << "% of " << str << " for " << (x + y) << " cases"; +} + +class AOTCompilationStats { + public: + AOTCompilationStats() + : stats_lock_("AOT compilation statistics lock"), + types_in_dex_cache_(0), types_not_in_dex_cache_(0), + strings_in_dex_cache_(0), strings_not_in_dex_cache_(0), + resolved_types_(0), unresolved_types_(0), + resolved_instance_fields_(0), unresolved_instance_fields_(0), + resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0), + type_based_devirtualization_(0), + safe_casts_(0), not_safe_casts_(0) { + for (size_t i = 0; i <= kMaxInvokeType; i++) { + resolved_methods_[i] = 0; + unresolved_methods_[i] = 0; + virtual_made_direct_[i] = 0; + direct_calls_to_boot_[i] = 0; + direct_methods_to_boot_[i] = 0; + } + } + + void Dump() { + DumpStat(types_in_dex_cache_, types_not_in_dex_cache_, "types known to be in dex cache"); + DumpStat(strings_in_dex_cache_, strings_not_in_dex_cache_, "strings known to be in dex cache"); + DumpStat(resolved_types_, unresolved_types_, "types resolved"); + DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved"); + DumpStat(resolved_local_static_fields_ + resolved_static_fields_, unresolved_static_fields_, + "static fields resolved"); + DumpStat(resolved_local_static_fields_, resolved_static_fields_ + unresolved_static_fields_, + "static fields local to a class"); + DumpStat(safe_casts_, not_safe_casts_, "check-casts removed based on type information"); + // Note, the code below subtracts the stat value so that when added to the stat value we have + // 100% of samples. TODO: clean this up. + DumpStat(type_based_devirtualization_, + resolved_methods_[kVirtual] + unresolved_methods_[kVirtual] + + resolved_methods_[kInterface] + unresolved_methods_[kInterface] - + type_based_devirtualization_, + "virtual/interface calls made direct based on type information"); + + for (size_t i = 0; i <= kMaxInvokeType; i++) { + std::ostringstream oss; + oss << static_cast(i) << " methods were AOT resolved"; + DumpStat(resolved_methods_[i], unresolved_methods_[i], oss.str().c_str()); + if (virtual_made_direct_[i] > 0) { + std::ostringstream oss2; + oss2 << static_cast(i) << " methods made direct"; + DumpStat(virtual_made_direct_[i], + resolved_methods_[i] + unresolved_methods_[i] - virtual_made_direct_[i], + oss2.str().c_str()); + } + if (direct_calls_to_boot_[i] > 0) { + std::ostringstream oss2; + oss2 << static_cast(i) << " method calls are direct into boot"; + DumpStat(direct_calls_to_boot_[i], + resolved_methods_[i] + unresolved_methods_[i] - direct_calls_to_boot_[i], + oss2.str().c_str()); + } + if (direct_methods_to_boot_[i] > 0) { + std::ostringstream oss2; + oss2 << static_cast(i) << " method calls have methods in boot"; + DumpStat(direct_methods_to_boot_[i], + resolved_methods_[i] + unresolved_methods_[i] - direct_methods_to_boot_[i], + oss2.str().c_str()); + } + } + } + +// Allow lossy statistics in non-debug builds. +#ifndef NDEBUG +#define STATS_LOCK() MutexLock mu(Thread::Current(), stats_lock_) +#else +#define STATS_LOCK() +#endif + + void TypeInDexCache() { + STATS_LOCK(); + types_in_dex_cache_++; + } + + void TypeNotInDexCache() { + STATS_LOCK(); + types_not_in_dex_cache_++; + } + + void StringInDexCache() { + STATS_LOCK(); + strings_in_dex_cache_++; + } + + void StringNotInDexCache() { + STATS_LOCK(); + strings_not_in_dex_cache_++; + } + + void TypeDoesntNeedAccessCheck() { + STATS_LOCK(); + resolved_types_++; + } + + void TypeNeedsAccessCheck() { + STATS_LOCK(); + unresolved_types_++; + } + + void ResolvedInstanceField() { + STATS_LOCK(); + resolved_instance_fields_++; + } + + void UnresolvedInstanceField() { + STATS_LOCK(); + unresolved_instance_fields_++; + } + + void ResolvedLocalStaticField() { + STATS_LOCK(); + resolved_local_static_fields_++; + } + + void ResolvedStaticField() { + STATS_LOCK(); + resolved_static_fields_++; + } + + void UnresolvedStaticField() { + STATS_LOCK(); + unresolved_static_fields_++; + } + + // Indicate that type information from the verifier led to devirtualization. + void PreciseTypeDevirtualization() { + STATS_LOCK(); + type_based_devirtualization_++; + } + + // Indicate that a method of the given type was resolved at compile time. + void ResolvedMethod(InvokeType type) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + resolved_methods_[type]++; + } + + // Indicate that a method of the given type was unresolved at compile time as it was in an + // unknown dex file. + void UnresolvedMethod(InvokeType type) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + unresolved_methods_[type]++; + } + + // Indicate that a type of virtual method dispatch has been converted into a direct method + // dispatch. + void VirtualMadeDirect(InvokeType type) { + DCHECK(type == kVirtual || type == kInterface || type == kSuper); + STATS_LOCK(); + virtual_made_direct_[type]++; + } + + // Indicate that a method of the given type was able to call directly into boot. + void DirectCallsToBoot(InvokeType type) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + direct_calls_to_boot_[type]++; + } + + // Indicate that a method of the given type was able to be resolved directly from boot. + void DirectMethodsToBoot(InvokeType type) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + direct_methods_to_boot_[type]++; + } + + // A check-cast could be eliminated due to verifier type analysis. + void SafeCast() { + STATS_LOCK(); + safe_casts_++; + } + + // A check-cast couldn't be eliminated due to verifier type analysis. + void NotASafeCast() { + STATS_LOCK(); + not_safe_casts_++; + } + + private: + Mutex stats_lock_; + + size_t types_in_dex_cache_; + size_t types_not_in_dex_cache_; + + size_t strings_in_dex_cache_; + size_t strings_not_in_dex_cache_; + + size_t resolved_types_; + size_t unresolved_types_; + + size_t resolved_instance_fields_; + size_t unresolved_instance_fields_; + + size_t resolved_local_static_fields_; + size_t resolved_static_fields_; + size_t unresolved_static_fields_; + // Type based devirtualization for invoke interface and virtual. + size_t type_based_devirtualization_; + + size_t resolved_methods_[kMaxInvokeType + 1]; + size_t unresolved_methods_[kMaxInvokeType + 1]; + size_t virtual_made_direct_[kMaxInvokeType + 1]; + size_t direct_calls_to_boot_[kMaxInvokeType + 1]; + size_t direct_methods_to_boot_[kMaxInvokeType + 1]; + + size_t safe_casts_; + size_t not_safe_casts_; + + DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats); +}; + +extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver); +extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& compiler); + +extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver); +extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& compiler); + +extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint32_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); +extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& compiler, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint32_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); + +extern "C" art::CompiledMethod* ArtCompileDEX(art::CompilerDriver& compiler, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint32_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); + +extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint32_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); + +extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver, + uint32_t access_flags, uint32_t method_idx, + const art::DexFile& dex_file); + +extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler, + uint32_t access_flags, uint32_t method_idx, + const art::DexFile& dex_file); + +extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver, + std::string const& filename); + +CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set, + bool image, DescriptorSet* image_classes, + size_t thread_count, bool support_debugging, + bool dump_stats, bool dump_timings) + : compiler_backend_(compiler_backend), + instruction_set_(instruction_set), + freezing_constructor_lock_("freezing constructor lock"), + compiled_classes_lock_("compiled classes lock"), + compiled_methods_lock_("compiled method lock"), + image_(image), + image_classes_(image_classes), + thread_count_(thread_count), + support_debugging_(support_debugging), + start_ns_(0), + stats_(new AOTCompilationStats), + dump_stats_(dump_stats), + dump_timings_(dump_timings), + compiler_library_(NULL), + compiler_(NULL), + compiler_context_(NULL), + jni_compiler_(NULL), + compiler_enable_auto_elf_loading_(NULL), + compiler_get_method_code_addr_(NULL), + support_boot_image_fixup_(true) +{ + CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key"); + + // TODO: more work needed to combine initializations and allow per-method backend selection + typedef void (*InitCompilerContextFn)(CompilerDriver&); + InitCompilerContextFn init_compiler_context; + if (compiler_backend_ == kPortable){ + // Initialize compiler_context_ + init_compiler_context = reinterpret_cast(ArtInitCompilerContext); + compiler_ = reinterpret_cast(ArtCompileMethod); + } else { + init_compiler_context = reinterpret_cast(ArtInitQuickCompilerContext); + compiler_ = reinterpret_cast(ArtQuickCompileMethod); + } + + dex_to_dex_compiler_ = reinterpret_cast(ArtCompileDEX); + +#ifdef ART_SEA_IR_MODE + sea_ir_compiler_ = NULL; + if (Runtime::Current()->IsSeaIRMode()) { + sea_ir_compiler_ = reinterpret_cast(SeaIrCompileMethod); + } +#endif + + init_compiler_context(*this); + + if (compiler_backend_ == kPortable) { + jni_compiler_ = reinterpret_cast(ArtLLVMJniCompileMethod); + } else { + jni_compiler_ = reinterpret_cast(ArtQuickJniCompileMethod); + } + + CHECK(!Runtime::Current()->IsStarted()); + if (!image_) { + CHECK(image_classes_.get() == NULL); + } +} + +CompilerDriver::~CompilerDriver() { + Thread* self = Thread::Current(); + { + MutexLock mu(self, compiled_classes_lock_); + STLDeleteValues(&compiled_classes_); + } + { + MutexLock mu(self, compiled_methods_lock_); + STLDeleteValues(&compiled_methods_); + } + { + MutexLock mu(self, compiled_methods_lock_); + STLDeleteElements(&code_to_patch_); + } + { + MutexLock mu(self, compiled_methods_lock_); + STLDeleteElements(&methods_to_patch_); + } + CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key"); + typedef void (*UninitCompilerContextFn)(CompilerDriver&); + UninitCompilerContextFn uninit_compiler_context; + // Uninitialize compiler_context_ + // TODO: rework to combine initialization/uninitialization + if (compiler_backend_ == kPortable) { + uninit_compiler_context = reinterpret_cast(ArtUnInitCompilerContext); + } else { + uninit_compiler_context = reinterpret_cast(ArtUnInitQuickCompilerContext); + } + uninit_compiler_context(*this); +} + +CompilerTls* CompilerDriver::GetTls() { + // Lazily create thread-local storage + CompilerTls* res = static_cast(pthread_getspecific(tls_key_)); + if (res == NULL) { + res = new CompilerTls(); + CHECK_PTHREAD_CALL(pthread_setspecific, (tls_key_, res), "compiler tls"); + } + return res; +} + +const std::vector* CompilerDriver::CreatePortableResolutionTrampoline() const { + switch (instruction_set_) { + case kArm: + case kThumb2: + return arm::CreatePortableResolutionTrampoline(); + case kMips: + return mips::CreatePortableResolutionTrampoline(); + case kX86: + return x86::CreatePortableResolutionTrampoline(); + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; + return NULL; + } +} + +const std::vector* CompilerDriver::CreateQuickResolutionTrampoline() const { + switch (instruction_set_) { + case kArm: + case kThumb2: + return arm::CreateQuickResolutionTrampoline(); + case kMips: + return mips::CreateQuickResolutionTrampoline(); + case kX86: + return x86::CreateQuickResolutionTrampoline(); + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; + return NULL; + } +} + +const std::vector* CompilerDriver::CreateInterpreterToInterpreterEntry() const { + switch (instruction_set_) { + case kArm: + case kThumb2: + return arm::CreateInterpreterToInterpreterEntry(); + case kMips: + return mips::CreateInterpreterToInterpreterEntry(); + case kX86: + return x86::CreateInterpreterToInterpreterEntry(); + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; + return NULL; + } +} + +const std::vector* CompilerDriver::CreateInterpreterToQuickEntry() const { + switch (instruction_set_) { + case kArm: + case kThumb2: + return arm::CreateInterpreterToQuickEntry(); + case kMips: + return mips::CreateInterpreterToQuickEntry(); + case kX86: + return x86::CreateInterpreterToQuickEntry(); + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; + return NULL; + } +} + +void CompilerDriver::CompileAll(jobject class_loader, + const std::vector& dex_files) { + DCHECK(!Runtime::Current()->IsStarted()); + + UniquePtr thread_pool(new ThreadPool(thread_count_)); + TimingLogger timings("compiler", false); + + PreCompile(class_loader, dex_files, *thread_pool.get(), timings); + + Compile(class_loader, dex_files, *thread_pool.get(), timings); + + if (dump_timings_ && timings.GetTotalNs() > MsToNs(1000)) { + LOG(INFO) << Dumpable(timings); + } + + if (dump_stats_) { + stats_->Dump(); + } +} + +static bool IsDexToDexCompilationAllowed(mirror::ClassLoader* class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& class_def) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Do not allow DEX-to-DEX compilation of image classes. This is to prevent the + // verifier from passing on "quick" instruction at compilation time. It must + // only pass on quick instructions at runtime. + if (class_loader == NULL) { + return false; + } + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->FindClass(descriptor, class_loader); + if (klass == NULL) { + Thread* self = Thread::Current(); + CHECK(self->IsExceptionPending()); + self->ClearException(); + return false; + } + // DEX-to-DEX compilation is only allowed on preverified classes. + return klass->IsVerified(); +} + +void CompilerDriver::CompileOne(const mirror::AbstractMethod* method) { + DCHECK(!Runtime::Current()->IsStarted()); + Thread* self = Thread::Current(); + jobject jclass_loader; + const DexFile* dex_file; + uint32_t class_def_idx; + { + ScopedObjectAccessUnchecked soa(self); + ScopedLocalRef + local_class_loader(soa.Env(), + soa.AddLocalReference(method->GetDeclaringClass()->GetClassLoader())); + jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get()); + // Find the dex_file + MethodHelper mh(method); + dex_file = &mh.GetDexFile(); + class_def_idx = mh.GetClassDefIndex(); + } + self->TransitionFromRunnableToSuspended(kNative); + + std::vector dex_files; + dex_files.push_back(dex_file); + + UniquePtr thread_pool(new ThreadPool(1U)); + TimingLogger timings("CompileOne", false); + PreCompile(jclass_loader, dex_files, *thread_pool.get(), timings); + + uint32_t method_idx = method->GetDexMethodIndex(); + const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); + // Can we run DEX-to-DEX compiler on this class ? + bool allow_dex_compilation; + { + ScopedObjectAccess soa(Thread::Current()); + const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx); + mirror::ClassLoader* class_loader = soa.Decode(jclass_loader); + allow_dex_compilation = IsDexToDexCompilationAllowed(class_loader, *dex_file, class_def); + } + CompileMethod(code_item, method->GetAccessFlags(), method->GetInvokeType(), + class_def_idx, method_idx, jclass_loader, *dex_file, allow_dex_compilation); + + self->GetJniEnv()->DeleteGlobalRef(jclass_loader); + + self->TransitionFromSuspendedToRunnable(); +} + +void CompilerDriver::Resolve(jobject class_loader, const std::vector& dex_files, + ThreadPool& thread_pool, TimingLogger& timings) { + for (size_t i = 0; i != dex_files.size(); ++i) { + const DexFile* dex_file = dex_files[i]; + CHECK(dex_file != NULL); + ResolveDexFile(class_loader, *dex_file, thread_pool, timings); + } +} + +void CompilerDriver::PreCompile(jobject class_loader, const std::vector& dex_files, + ThreadPool& thread_pool, TimingLogger& timings) { + LoadImageClasses(timings); + + Resolve(class_loader, dex_files, thread_pool, timings); + + Verify(class_loader, dex_files, thread_pool, timings); + + InitializeClasses(class_loader, dex_files, thread_pool, timings); + + UpdateImageClasses(timings); +} + +bool CompilerDriver::IsImageClass(const char* descriptor) const { + DCHECK(descriptor != NULL); + if (image_classes_.get() == NULL) { + return true; + } + return image_classes_->find(descriptor) != image_classes_->end(); +} + +static void ResolveExceptionsForMethod(MethodHelper* mh, + std::set >& exceptions_to_resolve) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = mh->GetCodeItem(); + if (code_item == NULL) { + return; // native or abstract method + } + if (code_item->tries_size_ == 0) { + return; // nothing to process + } + const byte* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); + size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); + for (size_t i = 0; i < num_encoded_catch_handlers; i++) { + int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); + bool has_catch_all = false; + if (encoded_catch_handler_size <= 0) { + encoded_catch_handler_size = -encoded_catch_handler_size; + has_catch_all = true; + } + for (int32_t j = 0; j < encoded_catch_handler_size; j++) { + uint16_t encoded_catch_handler_handlers_type_idx = + DecodeUnsignedLeb128(&encoded_catch_handler_list); + // Add to set of types to resolve if not already in the dex cache resolved types + if (!mh->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { + exceptions_to_resolve.insert( + std::pair(encoded_catch_handler_handlers_type_idx, + &mh->GetDexFile())); + } + // ignore address associated with catch handler + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + if (has_catch_all) { + // ignore catch all address + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + } +} + +static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + std::set >* exceptions_to_resolve = + reinterpret_cast >*>(arg); + MethodHelper mh; + for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { + mirror::AbstractMethod* m = c->GetVirtualMethod(i); + mh.ChangeMethod(m); + ResolveExceptionsForMethod(&mh, *exceptions_to_resolve); + } + for (size_t i = 0; i < c->NumDirectMethods(); ++i) { + mirror::AbstractMethod* m = c->GetDirectMethod(i); + mh.ChangeMethod(m); + ResolveExceptionsForMethod(&mh, *exceptions_to_resolve); + } + return true; +} + +static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CompilerDriver::DescriptorSet* image_classes = + reinterpret_cast(arg); + image_classes->insert(ClassHelper(klass).GetDescriptor()); + return true; +} + +// Make a list of descriptors for classes to include in the image +void CompilerDriver::LoadImageClasses(TimingLogger& timings) + LOCKS_EXCLUDED(Locks::mutator_lock_) { + if (image_classes_.get() == NULL) { + return; + } + + // Make a first class to load all classes explicitly listed in the file + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + typedef DescriptorSet::iterator It; // TODO: C++0x auto + for (It it = image_classes_->begin(), end = image_classes_->end(); it != end;) { + std::string descriptor(*it); + SirtRef klass(self, class_linker->FindSystemClass(descriptor.c_str())); + if (klass.get() == NULL) { + image_classes_->erase(it++); + LOG(WARNING) << "Failed to find class " << descriptor; + Thread::Current()->ClearException(); + } else { + ++it; + } + } + + // Resolve exception classes referenced by the loaded classes. The catch logic assumes + // exceptions are resolved by the verifier when there is a catch block in an interested method. + // Do this here so that exception classes appear to have been specified image classes. + std::set > unresolved_exception_types; + SirtRef java_lang_Throwable(self, + class_linker->FindSystemClass("Ljava/lang/Throwable;")); + do { + unresolved_exception_types.clear(); + class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor, + &unresolved_exception_types); + typedef std::set >::const_iterator It; // TODO: C++0x auto + for (It it = unresolved_exception_types.begin(), + end = unresolved_exception_types.end(); + it != end; ++it) { + uint16_t exception_type_idx = it->first; + const DexFile* dex_file = it->second; + mirror::DexCache* dex_cache = class_linker->FindDexCache(*dex_file); + mirror:: ClassLoader* class_loader = NULL; + SirtRef klass(self, class_linker->ResolveType(*dex_file, exception_type_idx, + dex_cache, class_loader)); + if (klass.get() == NULL) { + const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx); + const char* descriptor = dex_file->GetTypeDescriptor(type_id); + LOG(FATAL) << "Failed to resolve class " << descriptor; + } + DCHECK(java_lang_Throwable->IsAssignableFrom(klass.get())); + } + // Resolving exceptions may load classes that reference more exceptions, iterate until no + // more are found + } while (!unresolved_exception_types.empty()); + + // We walk the roots looking for classes so that we'll pick up the + // above classes plus any classes them depend on such super + // classes, interfaces, and the required ClassLinker roots. + class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get()); + + CHECK_NE(image_classes_->size(), 0U); + timings.AddSplit("LoadImageClasses"); +} + +static void MaybeAddToImageClasses(mirror::Class* klass, CompilerDriver::DescriptorSet* image_classes) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + while (!klass->IsObjectClass()) { + ClassHelper kh(klass); + const char* descriptor = kh.GetDescriptor(); + std::pair result = + image_classes->insert(descriptor); + if (result.second) { + LOG(INFO) << "Adding " << descriptor << " to image classes"; + } else { + return; + } + for (size_t i = 0; i < kh.NumDirectInterfaces(); ++i) { + MaybeAddToImageClasses(kh.GetDirectInterface(i), image_classes); + } + if (klass->IsArrayClass()) { + MaybeAddToImageClasses(klass->GetComponentType(), image_classes); + } + klass = klass->GetSuperClass(); + } +} + +void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void* arg) { + DCHECK(object != NULL); + DCHECK(arg != NULL); + CompilerDriver* compiler_driver = reinterpret_cast(arg); + MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get()); +} + +void CompilerDriver::UpdateImageClasses(TimingLogger& timings) { + if (image_classes_.get() == NULL) { + return; + } + + // Update image_classes_ with classes for objects created by methods. + Thread* self = Thread::Current(); + const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter"); + gc::Heap* heap = Runtime::Current()->GetHeap(); + // TODO: Image spaces only? + WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); + heap->FlushAllocStack(); + heap->GetLiveBitmap()->Walk(FindClinitImageClassesCallback, this); + self->EndAssertNoThreadSuspension(old_cause); + timings.AddSplit("UpdateImageClasses"); +} + +void CompilerDriver::RecordClassStatus(ClassReference ref, CompiledClass* compiled_class) { + MutexLock mu(Thread::Current(), CompilerDriver::compiled_classes_lock_); + compiled_classes_.Put(ref, compiled_class); +} + +bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, + uint32_t type_idx) { + if (IsImage() && IsImageClass(dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)))) { + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + CHECK(resolved_class != NULL); + } + stats_->TypeInDexCache(); + return true; + } else { + stats_->TypeNotInDexCache(); + return false; + } +} + +bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, + uint32_t string_idx) { + // See also Compiler::ResolveDexFile + + bool result = false; + if (IsImage()) { + // We resolve all const-string strings when building for the image. + ScopedObjectAccess soa(Thread::Current()); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache); + result = true; + } + if (result) { + stats_->StringInDexCache(); + } else { + stats_->StringNotInDexCache(); + } + return result; +} + +bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, + uint32_t type_idx, + bool* type_known_final, bool* type_known_abstract, + bool* equals_referrers_class) { + if (type_known_final != NULL) { + *type_known_final = false; + } + if (type_known_abstract != NULL) { + *type_known_abstract = false; + } + if (equals_referrers_class != NULL) { + *equals_referrers_class = false; + } + ScopedObjectAccess soa(Thread::Current()); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + // Get type from dex cache assuming it was populated by the verifier + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + if (resolved_class == NULL) { + stats_->TypeNeedsAccessCheck(); + return false; // Unknown class needs access checks. + } + const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx); + if (equals_referrers_class != NULL) { + *equals_referrers_class = (method_id.class_idx_ == type_idx); + } + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); + if (referrer_class == NULL) { + stats_->TypeNeedsAccessCheck(); + return false; // Incomplete referrer knowledge needs access check. + } + // Perform access check, will return true if access is ok or false if we're going to have to + // check this at runtime (for example for class loaders). + bool result = referrer_class->CanAccess(resolved_class); + if (result) { + stats_->TypeDoesntNeedAccessCheck(); + if (type_known_final != NULL) { + *type_known_final = resolved_class->IsFinal() && !resolved_class->IsArrayClass(); + } + if (type_known_abstract != NULL) { + *type_known_abstract = resolved_class->IsAbstract() && !resolved_class->IsArrayClass(); + } + } else { + stats_->TypeNeedsAccessCheck(); + } + return result; +} + +bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, + const DexFile& dex_file, + uint32_t type_idx) { + ScopedObjectAccess soa(Thread::Current()); + mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); + // Get type from dex cache assuming it was populated by the verifier. + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + if (resolved_class == NULL) { + stats_->TypeNeedsAccessCheck(); + return false; // Unknown class needs access checks. + } + const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx); + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); + if (referrer_class == NULL) { + stats_->TypeNeedsAccessCheck(); + return false; // Incomplete referrer knowledge needs access check. + } + // Perform access and instantiable checks, will return true if access is ok or false if we're + // going to have to check this at runtime (for example for class loaders). + bool result = referrer_class->CanAccess(resolved_class) && resolved_class->IsInstantiable(); + if (result) { + stats_->TypeDoesntNeedAccessCheck(); + } else { + stats_->TypeNeedsAccessCheck(); + } + return result; +} + +static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, + mirror::DexCache* dex_cache, + const DexCompilationUnit* mUnit) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // The passed dex_cache is a hint, sanity check before asking the class linker that will take a + // lock. + if (dex_cache->GetDexFile() != mUnit->GetDexFile()) { + dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()); + } + mirror::ClassLoader* class_loader = soa.Decode(mUnit->GetClassLoader()); + const DexFile::MethodId& referrer_method_id = mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex()); + return mUnit->GetClassLinker()->ResolveType(*mUnit->GetDexFile(), referrer_method_id.class_idx_, + dex_cache, class_loader); +} + +static mirror::Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa, + const DexCompilationUnit* mUnit, + uint32_t field_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()); + mirror::ClassLoader* class_loader = soa.Decode(mUnit->GetClassLoader()); + return mUnit->GetClassLinker()->ResolveField(*mUnit->GetDexFile(), field_idx, dex_cache, + class_loader, false); +} + +static mirror::AbstractMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa, + const DexCompilationUnit* mUnit, + uint32_t method_idx, + InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()); + mirror::ClassLoader* class_loader = soa.Decode(mUnit->GetClassLoader()); + return mUnit->GetClassLinker()->ResolveMethod(*mUnit->GetDexFile(), method_idx, dex_cache, + class_loader, NULL, type); +} + +bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, + int& field_offset, bool& is_volatile, bool is_put) { + ScopedObjectAccess soa(Thread::Current()); + // Conservative defaults. + field_offset = -1; + is_volatile = true; + // Try to resolve field and ignore if an Incompatible Class Change Error (ie is static). + mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx); + if (resolved_field != NULL && !resolved_field->IsStatic()) { + mirror::Class* referrer_class = + ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(), + mUnit); + if (referrer_class != NULL) { + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + bool access_ok = referrer_class->CanAccess(fields_class) && + referrer_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()); + if (!access_ok) { + // The referring class can't access the resolved field, this may occur as a result of a + // protected field being made public by a sub-class. Resort to the dex file to determine + // the correct class for the access check. + const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile(); + mirror::Class* dex_fields_class = mUnit->GetClassLinker()->ResolveType(dex_file, + dex_file.GetFieldId(field_idx).class_idx_, + referrer_class); + access_ok = referrer_class->CanAccess(dex_fields_class) && + referrer_class->CanAccessMember(dex_fields_class, + resolved_field->GetAccessFlags()); + } + bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() && + fields_class != referrer_class; + if (access_ok && !is_write_to_final_from_wrong_class) { + field_offset = resolved_field->GetOffset().Int32Value(); + is_volatile = resolved_field->IsVolatile(); + stats_->ResolvedInstanceField(); + return true; // Fast path. + } + } + } + // Clean up any exception left by field/type resolution + if (soa.Self()->IsExceptionPending()) { + soa.Self()->ClearException(); + } + stats_->UnresolvedInstanceField(); + return false; // Incomplete knowledge needs slow path. +} + +bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, + int& field_offset, int& ssb_index, + bool& is_referrers_class, bool& is_volatile, + bool is_put) { + ScopedObjectAccess soa(Thread::Current()); + // Conservative defaults. + field_offset = -1; + ssb_index = -1; + is_referrers_class = false; + is_volatile = true; + // Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static). + mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx); + if (resolved_field != NULL && resolved_field->IsStatic()) { + mirror::Class* referrer_class = + ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(), + mUnit); + if (referrer_class != NULL) { + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + if (fields_class == referrer_class) { + is_referrers_class = true; // implies no worrying about class initialization + field_offset = resolved_field->GetOffset().Int32Value(); + is_volatile = resolved_field->IsVolatile(); + stats_->ResolvedLocalStaticField(); + return true; // fast path + } else { + bool access_ok = referrer_class->CanAccess(fields_class) && + referrer_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()); + if (!access_ok) { + // The referring class can't access the resolved field, this may occur as a result of a + // protected field being made public by a sub-class. Resort to the dex file to determine + // the correct class for the access check. Don't change the field's class as that is + // used to identify the SSB. + const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile(); + mirror::Class* dex_fields_class = + mUnit->GetClassLinker()->ResolveType(dex_file, + dex_file.GetFieldId(field_idx).class_idx_, + referrer_class); + access_ok = referrer_class->CanAccess(dex_fields_class) && + referrer_class->CanAccessMember(dex_fields_class, + resolved_field->GetAccessFlags()); + } + bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal(); + if (access_ok && !is_write_to_final_from_wrong_class) { + // We have the resolved field, we must make it into a ssbIndex for the referrer + // in its static storage base (which may fail if it doesn't have a slot for it) + // TODO: for images we can elide the static storage base null check + // if we know there's a non-null entry in the image + mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()); + if (fields_class->GetDexCache() == dex_cache) { + // common case where the dex cache of both the referrer and the field are the same, + // no need to search the dex file + ssb_index = fields_class->GetDexTypeIndex(); + field_offset = resolved_field->GetOffset().Int32Value(); + is_volatile = resolved_field->IsVolatile(); + stats_->ResolvedStaticField(); + return true; + } + // Search dex file for localized ssb index, may fail if field's class is a parent + // of the class mentioned in the dex file and there is no dex cache entry. + const DexFile::StringId* string_id = + mUnit->GetDexFile()->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor()); + if (string_id != NULL) { + const DexFile::TypeId* type_id = + mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id)); + if (type_id != NULL) { + // medium path, needs check of static storage base being initialized + ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id); + field_offset = resolved_field->GetOffset().Int32Value(); + is_volatile = resolved_field->IsVolatile(); + stats_->ResolvedStaticField(); + return true; + } + } + } + } + } + } + // Clean up any exception left by field/type resolution + if (soa.Self()->IsExceptionPending()) { + soa.Self()->ClearException(); + } + stats_->UnresolvedStaticField(); + return false; // Incomplete knowledge needs slow path. +} + +void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, + mirror::Class* referrer_class, + mirror::AbstractMethod* method, + uintptr_t& direct_code, + uintptr_t& direct_method, + bool update_stats) { + // For direct and static methods compute possible direct_code and direct_method values, ie + // an address for the Method* being invoked and an address of the code for that Method*. + // For interface calls compute a value for direct_method that is the interface method being + // invoked, so this can be passed to the out-of-line runtime support code. + direct_code = 0; + direct_method = 0; + if (compiler_backend_ == kPortable) { + if (sharp_type != kStatic && sharp_type != kDirect) { + return; + } + } else { + if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) { + return; + } + } + bool method_code_in_boot = method->GetDeclaringClass()->GetClassLoader() == NULL; + if (!method_code_in_boot) { + return; + } + bool has_clinit_trampoline = method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); + if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) { + // Ensure we run the clinit trampoline unless we are invoking a static method in the same class. + return; + } + if (update_stats) { + if (sharp_type != kInterface) { // Interfaces always go via a trampoline. + stats_->DirectCallsToBoot(type); + } + stats_->DirectMethodsToBoot(type); + } + bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1; + if (compiling_boot) { + if (support_boot_image_fixup_) { + MethodHelper mh(method); + if (IsImageClass(mh.GetDeclaringClassDescriptor())) { + // We can only branch directly to Methods that are resolved in the DexCache. + // Otherwise we won't invoke the resolution trampoline. + direct_method = -1; + direct_code = -1; + } + } + } else { + if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) { + direct_method = reinterpret_cast(method); + } + direct_code = reinterpret_cast(method->GetEntryPointFromCompiledCode()); + } +} + +bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc, + InvokeType& invoke_type, + MethodReference& target_method, + int& vtable_idx, + uintptr_t& direct_code, uintptr_t& direct_method, + bool update_stats) { + ScopedObjectAccess soa(Thread::Current()); + vtable_idx = -1; + direct_code = 0; + direct_method = 0; + mirror::AbstractMethod* resolved_method = + ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method.dex_method_index, + invoke_type); + if (resolved_method != NULL) { + // Don't try to fast-path if we don't understand the caller's class or this appears to be an + // Incompatible Class Change Error. + mirror::Class* referrer_class = + ComputeCompilingMethodsClass(soa, resolved_method->GetDeclaringClass()->GetDexCache(), + mUnit); + bool icce = resolved_method->CheckIncompatibleClassChange(invoke_type); + if (referrer_class != NULL && !icce) { + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + if (!referrer_class->CanAccess(methods_class) || + !referrer_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags())) { + // The referring class can't access the resolved method, this may occur as a result of a + // protected method being made public by implementing an interface that re-declares the + // method public. Resort to the dex file to determine the correct class for the access + // check. + uint16_t class_idx = + target_method.dex_file->GetMethodId(target_method.dex_method_index).class_idx_; + methods_class = mUnit->GetClassLinker()->ResolveType(*target_method.dex_file, + class_idx, referrer_class); + } + if (referrer_class->CanAccess(methods_class) && + referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) { + const bool kEnableFinalBasedSharpening = true; + // Sharpen a virtual call into a direct call when the target is known not to have been + // overridden (ie is final). + bool can_sharpen_virtual_based_on_type = + (invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal()); + // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of + // the super class. + bool can_sharpen_super_based_on_type = (invoke_type == kSuper) && + (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) && + resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() && + (methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method); + + if (kEnableFinalBasedSharpening && (can_sharpen_virtual_based_on_type || + can_sharpen_super_based_on_type)) { + // Sharpen a virtual call into a direct call. The method_idx is into referrer's + // dex cache, check that this resolved method is where we expect it. + CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method.dex_method_index) == + resolved_method) << PrettyMethod(resolved_method); + if (update_stats) { + stats_->ResolvedMethod(invoke_type); + stats_->VirtualMadeDirect(invoke_type); + } + GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, resolved_method, + direct_code, direct_method, update_stats); + invoke_type = kDirect; + return true; + } + const bool kEnableVerifierBasedSharpening = true; + if (kEnableVerifierBasedSharpening && (invoke_type == kVirtual || + invoke_type == kInterface)) { + // Did the verifier record a more precise invoke target based on its type information? + const MethodReference caller_method(mUnit->GetDexFile(), mUnit->GetDexMethodIndex()); + const MethodReference* devirt_map_target = + verifier::MethodVerifier::GetDevirtMap(caller_method, dex_pc); + if (devirt_map_target != NULL) { + mirror::DexCache* target_dex_cache = + mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file); + mirror::ClassLoader* class_loader = + soa.Decode(mUnit->GetClassLoader()); + mirror::AbstractMethod* called_method = + mUnit->GetClassLinker()->ResolveMethod(*devirt_map_target->dex_file, + devirt_map_target->dex_method_index, + target_dex_cache, class_loader, NULL, + kVirtual); + CHECK(called_method != NULL); + CHECK(!called_method->IsAbstract()); + GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, called_method, + direct_code, direct_method, update_stats); + bool compiler_needs_dex_cache = + (GetCompilerBackend() == kPortable) || + (GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) || + (direct_code == 0) || (direct_code == static_cast(-1)) || + (direct_method == 0) || (direct_method == static_cast(-1)); + if ((devirt_map_target->dex_file != target_method.dex_file) && + compiler_needs_dex_cache) { + // We need to use the dex cache to find either the method or code, and the dex file + // containing the method isn't the one expected for the target method. Try to find + // the method within the expected target dex file. + // TODO: the -1 could be handled as direct code if the patching new the target dex + // file. + // TODO: quick only supports direct pointers with Thumb2. + // TODO: the following should be factored into a common helper routine to find + // one dex file's method within another. + const DexFile* dexfile = target_method.dex_file; + const DexFile* cm_dexfile = + called_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); + const DexFile::MethodId& cm_method_id = + cm_dexfile->GetMethodId(called_method->GetDexMethodIndex()); + const char* cm_descriptor = cm_dexfile->StringByTypeIdx(cm_method_id.class_idx_); + const DexFile::StringId* descriptor = dexfile->FindStringId(cm_descriptor); + if (descriptor != NULL) { + const DexFile::TypeId* type_id = + dexfile->FindTypeId(dexfile->GetIndexForStringId(*descriptor)); + if (type_id != NULL) { + const char* cm_name = cm_dexfile->GetMethodName(cm_method_id); + const DexFile::StringId* name = dexfile->FindStringId(cm_name); + if (name != NULL) { + uint16_t return_type_idx; + std::vector param_type_idxs; + bool success = dexfile->CreateTypeList(&return_type_idx, ¶m_type_idxs, + cm_dexfile->GetMethodSignature(cm_method_id)); + if (success) { + const DexFile::ProtoId* sig = + dexfile->FindProtoId(return_type_idx, param_type_idxs); + if (sig != NULL) { + const DexFile::MethodId* method_id = dexfile->FindMethodId(*type_id, + *name, *sig); + if (method_id != NULL) { + if (update_stats) { + stats_->ResolvedMethod(invoke_type); + stats_->VirtualMadeDirect(invoke_type); + stats_->PreciseTypeDevirtualization(); + } + target_method.dex_method_index = dexfile->GetIndexForMethodId(*method_id); + invoke_type = kDirect; + return true; + } + } + } + } + } + } + // TODO: the stats for direct code and method are off as we failed to find the direct + // method in the referring method's dex cache/file. + } else { + if (update_stats) { + stats_->ResolvedMethod(invoke_type); + stats_->VirtualMadeDirect(invoke_type); + stats_->PreciseTypeDevirtualization(); + } + target_method = *devirt_map_target; + invoke_type = kDirect; + return true; + } + } + } + if (invoke_type == kSuper) { + // Unsharpened super calls are suspicious so go slow-path. + } else { + // Sharpening failed so generate a regular resolved method dispatch. + if (update_stats) { + stats_->ResolvedMethod(invoke_type); + } + if (invoke_type == kVirtual || invoke_type == kSuper) { + vtable_idx = resolved_method->GetMethodIndex(); + } + GetCodeAndMethodForDirectCall(invoke_type, invoke_type, referrer_class, resolved_method, + direct_code, direct_method, update_stats); + return true; + } + } + } + } + // Clean up any exception left by method/invoke_type resolution + if (soa.Self()->IsExceptionPending()) { + soa.Self()->ClearException(); + } + if (update_stats) { + stats_->UnresolvedMethod(invoke_type); + } + return false; // Incomplete knowledge needs slow path. +} + +bool CompilerDriver::IsSafeCast(const MethodReference& mr, uint32_t dex_pc) { + bool result = verifier::MethodVerifier::IsSafeCast(mr, dex_pc); + if (result) { + stats_->SafeCast(); + } else { + stats_->NotASafeCast(); + } + return result; +} + + +void CompilerDriver::AddCodePatch(const DexFile* dex_file, + uint32_t referrer_method_idx, + InvokeType referrer_invoke_type, + uint32_t target_method_idx, + InvokeType target_invoke_type, + size_t literal_offset) { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + code_to_patch_.push_back(new PatchInformation(dex_file, + referrer_method_idx, + referrer_invoke_type, + target_method_idx, + target_invoke_type, + literal_offset)); +} +void CompilerDriver::AddMethodPatch(const DexFile* dex_file, + uint32_t referrer_method_idx, + InvokeType referrer_invoke_type, + uint32_t target_method_idx, + InvokeType target_invoke_type, + size_t literal_offset) { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + methods_to_patch_.push_back(new PatchInformation(dex_file, + referrer_method_idx, + referrer_invoke_type, + target_method_idx, + target_invoke_type, + literal_offset)); +} + +class ParallelCompilationManager { + public: + typedef void Callback(const ParallelCompilationManager* manager, size_t index); + + ParallelCompilationManager(ClassLinker* class_linker, + jobject class_loader, + CompilerDriver* compiler, + const DexFile* dex_file, + ThreadPool& thread_pool) + : class_linker_(class_linker), + class_loader_(class_loader), + compiler_(compiler), + dex_file_(dex_file), + thread_pool_(&thread_pool) {} + + ClassLinker* GetClassLinker() const { + CHECK(class_linker_ != NULL); + return class_linker_; + } + + jobject GetClassLoader() const { + return class_loader_; + } + + CompilerDriver* GetCompiler() const { + CHECK(compiler_ != NULL); + return compiler_; + } + + const DexFile* GetDexFile() const { + CHECK(dex_file_ != NULL); + return dex_file_; + } + + void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) { + Thread* self = Thread::Current(); + self->AssertNoPendingException(); + CHECK_GT(work_units, 0U); + + std::vector closures(work_units); + for (size_t i = 0; i < work_units; ++i) { + closures[i] = new ForAllClosure(this, begin + i, end, callback, work_units); + thread_pool_->AddTask(self, closures[i]); + } + thread_pool_->StartWorkers(self); + + // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker + // thread destructor's called below perform join). + CHECK_NE(self->GetState(), kRunnable); + + // Wait for all the worker threads to finish. + thread_pool_->Wait(self, true, false); + } + + private: + + class ForAllClosure : public Task { + public: + ForAllClosure(ParallelCompilationManager* manager, size_t begin, size_t end, Callback* callback, + size_t stripe) + : manager_(manager), + begin_(begin), + end_(end), + callback_(callback), + stripe_(stripe) + { + + } + + virtual void Run(Thread* self) { + for (size_t i = begin_; i < end_; i += stripe_) { + callback_(manager_, i); + self->AssertNoPendingException(); + } + } + + virtual void Finalize() { + delete this; + } + private: + const ParallelCompilationManager* const manager_; + const size_t begin_; + const size_t end_; + const Callback* const callback_; + const size_t stripe_; + }; + + ClassLinker* const class_linker_; + const jobject class_loader_; + CompilerDriver* const compiler_; + const DexFile* const dex_file_; + ThreadPool* const thread_pool_; +}; + +// Return true if the class should be skipped during compilation. We +// never skip classes in the boot class loader. However, if we have a +// non-boot class loader and we can resolve the class in the boot +// class loader, we do skip the class. This happens if an app bundles +// classes found in the boot classpath. Since at runtime we will +// select the class from the boot classpath, do not attempt to resolve +// or compile it now. +static bool SkipClass(mirror::ClassLoader* class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& class_def) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (class_loader == NULL) { + return false; + } + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->FindClass(descriptor, NULL); + if (klass == NULL) { + Thread* self = Thread::Current(); + CHECK(self->IsExceptionPending()); + self->ClearException(); + return false; + } + return true; +} + +static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager, size_t class_def_index) + LOCKS_EXCLUDED(Locks::mutator_lock_) { + ScopedObjectAccess soa(Thread::Current()); + mirror::ClassLoader* class_loader = soa.Decode(manager->GetClassLoader()); + const DexFile& dex_file = *manager->GetDexFile(); + + // Method and Field are the worst. We can't resolve without either + // context from the code use (to disambiguate virtual vs direct + // method and instance vs static field) or from class + // definitions. While the compiler will resolve what it can as it + // needs it, here we try to resolve fields and methods used in class + // definitions, since many of them many never be referenced by + // generated code. + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + if (SkipClass(class_loader, dex_file, class_def)) { + return; + } + + // Note the class_data pointer advances through the headers, + // static fields, instance fields, direct methods, and virtual + // methods. + const byte* class_data = dex_file.GetClassData(class_def); + if (class_data == NULL) { + // empty class such as a marker interface + return; + } + Thread* self = Thread::Current(); + ClassLinker* class_linker = manager->GetClassLinker(); + mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file); + ClassDataItemIterator it(dex_file, class_data); + while (it.HasNextStaticField()) { + mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, + class_loader, true); + if (field == NULL) { + CHECK(self->IsExceptionPending()); + self->ClearException(); + } + it.Next(); + } + // If an instance field is final then we need to have a barrier on the return, static final + // fields are assigned within the lock held for class initialization. + bool requires_constructor_barrier = false; + while (it.HasNextInstanceField()) { + if ((it.GetMemberAccessFlags() & kAccFinal) != 0) { + requires_constructor_barrier = true; + } + + mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, + class_loader, false); + if (field == NULL) { + CHECK(self->IsExceptionPending()); + self->ClearException(); + } + it.Next(); + } + if (requires_constructor_barrier) { + manager->GetCompiler()->AddRequiresConstructorBarrier(soa.Self(), manager->GetDexFile(), + class_def_index); + } + while (it.HasNextDirectMethod()) { + mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, NULL, + it.GetMethodInvokeType(class_def)); + if (method == NULL) { + CHECK(self->IsExceptionPending()); + self->ClearException(); + } + it.Next(); + } + while (it.HasNextVirtualMethod()) { + mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, NULL, + it.GetMethodInvokeType(class_def)); + if (method == NULL) { + CHECK(self->IsExceptionPending()); + self->ClearException(); + } + it.Next(); + } + DCHECK(!it.HasNext()); +} + +static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx) + LOCKS_EXCLUDED(Locks::mutator_lock_) { + // Class derived values are more complicated, they require the linker and loader. + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = manager->GetClassLinker(); + const DexFile& dex_file = *manager->GetDexFile(); + mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file); + mirror::ClassLoader* class_loader = soa.Decode(manager->GetClassLoader()); + mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + + if (klass == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + Thread::Current()->ClearException(); + } +} + +void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file, + ThreadPool& thread_pool, TimingLogger& timings) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + + // TODO: we could resolve strings here, although the string table is largely filled with class + // and method names. + + ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool); + context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_); + timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types"); + + context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_); + timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields"); +} + +void CompilerDriver::Verify(jobject class_loader, const std::vector& dex_files, + ThreadPool& thread_pool, TimingLogger& timings) { + for (size_t i = 0; i != dex_files.size(); ++i) { + const DexFile* dex_file = dex_files[i]; + CHECK(dex_file != NULL); + VerifyDexFile(class_loader, *dex_file, thread_pool, timings); + } +} + +static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index) + LOCKS_EXCLUDED(Locks::mutator_lock_) { + ScopedObjectAccess soa(Thread::Current()); + const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index); + const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def); + mirror::Class* klass = + manager->GetClassLinker()->FindClass(descriptor, + soa.Decode(manager->GetClassLoader())); + if (klass == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + + /* + * At compile time, we can still structurally verify the class even if FindClass fails. + * This is to ensure the class is structurally sound for compilation. An unsound class + * will be rejected by the verifier and later skipped during compilation in the compiler. + */ + mirror::DexCache* dex_cache = manager->GetClassLinker()->FindDexCache(*manager->GetDexFile()); + std::string error_msg; + if (verifier::MethodVerifier::VerifyClass(manager->GetDexFile(), + dex_cache, + soa.Decode(manager->GetClassLoader()), + class_def_index, error_msg, true) == + verifier::MethodVerifier::kHardFailure) { + const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index); + LOG(ERROR) << "Verification failed on class " + << PrettyDescriptor(manager->GetDexFile()->GetClassDescriptor(class_def)) + << " because: " << error_msg; + } + return; + } + CHECK(klass->IsResolved()) << PrettyClass(klass); + manager->GetClassLinker()->VerifyClass(klass); + + if (klass->IsErroneous()) { + // ClassLinker::VerifyClass throws, which isn't useful in the compiler. + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + } + + CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) + << PrettyDescriptor(klass) << ": state=" << klass->GetStatus(); + soa.Self()->AssertNoPendingException(); +} + +void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, + ThreadPool& thread_pool, TimingLogger& timings) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool); + context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_); + timings.AddSplit("Verify " + dex_file.GetLocation()); +} + +static const char* class_initializer_black_list[] = { + "Landroid/app/ActivityThread;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/bluetooth/BluetoothAudioGateway;", // Calls android.bluetooth.BluetoothAudioGateway.classInitNative(). + "Landroid/bluetooth/HeadsetBase;", // Calls android.bluetooth.HeadsetBase.classInitNative(). + "Landroid/content/res/CompatibilityInfo;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int. + "Landroid/content/res/CompatibilityInfo$1;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int. + "Landroid/content/UriMatcher;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/database/CursorWindow;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int. + "Landroid/database/sqlite/SQLiteConnection;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/database/sqlite/SQLiteConnection$Operation;", // Requires SimpleDateFormat -> java.util.Locale. + "Landroid/database/sqlite/SQLiteDatabaseConfiguration;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/database/sqlite/SQLiteDebug;", // Calls android.util.Log.isLoggable. + "Landroid/database/sqlite/SQLiteOpenHelper;", // Calls Class.getSimpleName -> Class.isAnonymousClass -> Class.getDex. + "Landroid/database/sqlite/SQLiteQueryBuilder;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/drm/DrmManagerClient;", // Calls System.loadLibrary. + "Landroid/graphics/drawable/AnimatedRotateDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/AnimationDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/BitmapDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/ClipDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/ColorDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/Drawable;", // Requires android.graphics.Rect. + "Landroid/graphics/drawable/DrawableContainer;", // Sub-class of Drawable. + "Landroid/graphics/drawable/GradientDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/LayerDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/NinePatchDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/RotateDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/ScaleDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/ShapeDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/StateListDrawable;", // Sub-class of Drawable. + "Landroid/graphics/drawable/TransitionDrawable;", // Sub-class of Drawable. + "Landroid/graphics/Matrix;", // Calls android.graphics.Matrix.native_create. + "Landroid/graphics/Matrix$1;", // Requires Matrix. + "Landroid/graphics/PixelFormat;", // Calls android.graphics.PixelFormat.nativeClassInit(). + "Landroid/graphics/Rect;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/graphics/SurfaceTexture;", // Calls android.graphics.SurfaceTexture.nativeClassInit(). + "Landroid/graphics/Typeface;", // Calls android.graphics.Typeface.nativeCreate. + "Landroid/inputmethodservice/ExtractEditText;", // Requires android.widget.TextView. + "Landroid/media/AmrInputStream;", // Calls OsConstants.initConstants. + "Landroid/media/CamcorderProfile;", // Calls OsConstants.initConstants. + "Landroid/media/CameraProfile;", // Calls System.loadLibrary. + "Landroid/media/DecoderCapabilities;", // Calls System.loadLibrary. + "Landroid/media/EncoderCapabilities;", // Calls OsConstants.initConstants. + "Landroid/media/ExifInterface;", // Calls OsConstants.initConstants. + "Landroid/media/MediaCodec;", // Calls OsConstants.initConstants. + "Landroid/media/MediaCodecList;", // Calls OsConstants.initConstants. + "Landroid/media/MediaCrypto;", // Calls OsConstants.initConstants. + "Landroid/media/MediaDrm;", // Calls OsConstants.initConstants. + "Landroid/media/MediaExtractor;", // Calls OsConstants.initConstants. + "Landroid/media/MediaFile;", // Requires DecoderCapabilities. + "Landroid/media/MediaMetadataRetriever;", // Calls OsConstants.initConstants. + "Landroid/media/MediaMuxer;", // Calls OsConstants.initConstants. + "Landroid/media/MediaPlayer;", // Calls System.loadLibrary. + "Landroid/media/MediaRecorder;", // Calls System.loadLibrary. + "Landroid/media/MediaScanner;", // Calls System.loadLibrary. + "Landroid/media/ResampleInputStream;", // Calls OsConstants.initConstants. + "Landroid/media/SoundPool;", // Calls OsConstants.initConstants. + "Landroid/media/videoeditor/MediaArtistNativeHelper;", // Calls OsConstants.initConstants. + "Landroid/media/videoeditor/VideoEditorProfile;", // Calls OsConstants.initConstants. + "Landroid/mtp/MtpDatabase;", // Calls OsConstants.initConstants. + "Landroid/mtp/MtpDevice;", // Calls OsConstants.initConstants. + "Landroid/mtp/MtpServer;", // Calls OsConstants.initConstants. + "Landroid/net/NetworkInfo;", // Calls java.util.EnumMap. -> java.lang.Enum.getSharedConstants -> System.identityHashCode. + "Landroid/net/Proxy;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/net/SSLCertificateSocketFactory;", // Requires javax.net.ssl.HttpsURLConnection. + "Landroid/net/Uri;", // Calls Class.getSimpleName -> Class.isAnonymousClass -> Class.getDex. + "Landroid/net/Uri$AbstractHierarchicalUri;", // Requires Uri. + "Landroid/net/Uri$HierarchicalUri;", // Requires Uri. + "Landroid/net/Uri$OpaqueUri;", // Requires Uri. + "Landroid/net/Uri$StringUri;", // Requires Uri. + "Landroid/net/WebAddress;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/nfc/NdefRecord;", // Calls String.getBytes -> java.nio.charset.Charset. + "Landroid/opengl/EGL14;", // Calls android.opengl.EGL14._nativeClassInit. + "Landroid/opengl/GLES10;", // Calls android.opengl.GLES10._nativeClassInit. + "Landroid/opengl/GLES10Ext;", // Calls android.opengl.GLES10Ext._nativeClassInit. + "Landroid/opengl/GLES11;", // Requires GLES10. + "Landroid/opengl/GLES11Ext;", // Calls android.opengl.GLES11Ext._nativeClassInit. + "Landroid/opengl/GLES20;", // Calls android.opengl.GLES20._nativeClassInit. + "Landroid/opengl/GLUtils;", // Calls android.opengl.GLUtils.nativeClassInit. + "Landroid/os/Build;", // Calls -..-> android.os.SystemProperties.native_get. + "Landroid/os/Build$VERSION;", // Requires Build. + "Landroid/os/Debug;", // Requires android.os.Environment. + "Landroid/os/Environment;", // Calls System.getenv. + "Landroid/os/FileUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/os/StrictMode;", // Calls android.util.Log.isLoggable. + "Landroid/os/StrictMode$VmPolicy;", // Requires StrictMode. + "Landroid/os/Trace;", // Calls android.os.Trace.nativeGetEnabledTags. + "Landroid/os/UEventObserver;", // Calls Class.getSimpleName -> Class.isAnonymousClass -> Class.getDex. + "Landroid/provider/ContactsContract;", // Calls OsConstants.initConstants. + "Landroid/provider/Settings$Global;", // Calls OsConstants.initConstants. + "Landroid/provider/Settings$Secure;", // Requires android.net.Uri. + "Landroid/provider/Settings$System;", // Requires android.net.Uri. + "Landroid/renderscript/RenderScript;", // Calls System.loadLibrary. + "Landroid/server/BluetoothService;", // Calls android.server.BluetoothService.classInitNative. + "Landroid/server/BluetoothEventLoop;", // Calls android.server.BluetoothEventLoop.classInitNative. + "Landroid/telephony/PhoneNumberUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/telephony/TelephonyManager;", // Calls OsConstants.initConstants. + "Landroid/text/AutoText;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int. + "Landroid/text/Layout;", // Calls com.android.internal.util.ArrayUtils.emptyArray -> System.identityHashCode. + "Landroid/text/BoringLayout;", // Requires Layout. + "Landroid/text/DynamicLayout;", // Requires Layout. + "Landroid/text/Html$HtmlParser;", // Calls -..-> String.toLowerCase -> java.util.Locale. + "Landroid/text/StaticLayout;", // Requires Layout. + "Landroid/text/TextUtils;", // Requires android.util.DisplayMetrics. + "Landroid/util/DisplayMetrics;", // Calls SystemProperties.native_get_int. + "Landroid/util/Patterns;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/view/Choreographer;", // Calls SystemProperties.native_get_boolean. + "Landroid/util/Patterns;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/view/GLES20Canvas;", // Calls GLES20Canvas.nIsAvailable(). + "Landroid/view/GLES20RecordingCanvas;", // Requires android.view.GLES20Canvas. + "Landroid/view/GestureDetector;", // Calls android.view.GLES20Canvas.nIsAvailable. + "Landroid/view/HardwareRenderer$Gl20Renderer;", // Requires SystemProperties.native_get. + "Landroid/view/HardwareRenderer$GlRenderer;", // Requires SystemProperties.native_get. + "Landroid/view/InputEventConsistencyVerifier;", // Requires android.os.Build. + "Landroid/view/Surface;", // Requires SystemProperties.native_get. + "Landroid/view/SurfaceControl;", // Calls OsConstants.initConstants. + "Landroid/view/animation/AlphaAnimation;", // Requires Animation. + "Landroid/view/animation/Animation;", // Calls SystemProperties.native_get_boolean. + "Landroid/view/animation/AnimationSet;", // Calls OsConstants.initConstants. + "Landroid/view/textservice/SpellCheckerSubtype;", // Calls Class.getDex(). + "Landroid/webkit/JniUtil;", // Calls System.loadLibrary. + "Landroid/webkit/PluginManager;", // // Calls OsConstants.initConstants. + "Landroid/webkit/WebViewCore;", // Calls System.loadLibrary. + "Landroid/webkit/WebViewFactory$Preloader;", // Calls to Class.forName. + "Landroid/webkit/WebViewInputDispatcher;", // Calls Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/webkit/URLUtil;", // Calls Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Landroid/widget/AutoCompleteTextView;", // Requires TextView. + "Landroid/widget/Button;", // Requires TextView. + "Landroid/widget/CheckBox;", // Requires TextView. + "Landroid/widget/CheckedTextView;", // Requires TextView. + "Landroid/widget/CompoundButton;", // Requires TextView. + "Landroid/widget/EditText;", // Requires TextView. + "Landroid/widget/NumberPicker;", // Requires java.util.Locale. + "Landroid/widget/ScrollBarDrawable;", // Sub-class of Drawable. + "Landroid/widget/SearchView$SearchAutoComplete;", // Requires TextView. + "Landroid/widget/Switch;", // Requires TextView. + "Landroid/widget/TextView;", // Calls Paint. -> Paint.native_init. + "Lcom/android/i18n/phonenumbers/AsYouTypeFormatter;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Lcom/android/i18n/phonenumbers/MetadataManager;", // Calls OsConstants.initConstants. + "Lcom/android/i18n/phonenumbers/PhoneNumberMatcher;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Lcom/android/i18n/phonenumbers/PhoneNumberUtil;", // Requires java.util.logging.LogManager. + "Lcom/android/i18n/phonenumbers/geocoding/AreaCodeMap;", // Calls OsConstants.initConstants. + "Lcom/android/i18n/phonenumbers/geocoding/PhoneNumberOfflineGeocoder;", // Calls OsConstants.initConstants. + "Lcom/android/internal/os/SamplingProfilerIntegration;", // Calls SystemProperties.native_get_int. + "Lcom/android/internal/policy/impl/PhoneWindow;", // Calls android.os.Binder.init. + "Lcom/android/internal/view/menu/ActionMenuItemView;", // Requires TextView. + "Lcom/android/internal/widget/DialogTitle;", // Requires TextView. + "Lcom/android/org/bouncycastle/asn1/StreamUtil;", // Calls Runtime.getRuntime().maxMemory(). + "Lcom/android/org/bouncycastle/asn1/pkcs/MacData;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/asn1/pkcs/RSASSAPSSparams;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/asn1/cms/SignedData;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/asn1/x509/GeneralSubtree;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/asn1/x9/X9ECParameters;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$MD5;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA1;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA256;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA384;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA512;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/bouncycastle/crypto/engines/RSABlindedEngine;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/crypto/generators/DHKeyGeneratorHelper;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/crypto/generators/DHParametersGenerator;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/crypto/generators/DHParametersHelper;", // Calls System.getenv -> OsConstants.initConstants. + "Lcom/android/org/bouncycastle/crypto/generators/DSAKeyPairGenerator;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/crypto/generators/DSAParametersGenerator;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/crypto/generators/RSAKeyPairGenerator;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/dh/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/dsa/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$EC;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECDH;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECDHC;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECDSA;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECMQV;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/rsa/BCRSAPrivateCrtKey;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/rsa/BCRSAPrivateKey;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/rsa/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jcajce/provider/keystore/pkcs12/PKCS12KeyStoreSpi$BCPKCS12KeyStore;", // Calls Thread.currentThread. + "Lcom/android/org/bouncycastle/jcajce/provider/keystore/pkcs12/PKCS12KeyStoreSpi;", // Calls Thread.currentThread. + "Lcom/android/org/bouncycastle/jce/PKCS10CertificationRequest;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/jce/provider/CertBlacklist;", // Calls System.getenv -> OsConstants.initConstants. + "Lcom/android/org/bouncycastle/jce/provider/JCERSAPrivateCrtKey;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/jce/provider/JCERSAPrivateKey;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/jce/provider/PKIXCertPathValidatorSpi;", // Calls System.getenv -> OsConstants.initConstants. + "Lcom/android/org/bouncycastle/math/ec/ECConstants;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/math/ec/Tnaf;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/util/BigIntegers;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/bouncycastle/x509/X509Util;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lcom/android/org/conscrypt/CipherSuite;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/FileClientSessionCache$CacheFile;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/HandshakeIODataStream;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/Logger;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/NativeCrypto;", // Calls native NativeCrypto.clinit(). + "Lcom/android/org/conscrypt/OpenSSLECKeyPairGenerator;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/OpenSSLEngine;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/conscrypt/OpenSSLMac$HmacMD5;", // Calls native NativeCrypto.clinit(). + "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA1;", // Calls native NativeCrypto.clinit(). + "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA256;", // Calls native NativeCrypto.clinit(). + "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA384;", // Calls native NativeCrypto.clinit(). + "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA512;", // Calls native NativeCrypto.clinit(). + "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$MD5;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA1;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA256;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA384;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA512;", // Requires com.android.org.conscrypt.NativeCrypto. + "Lcom/android/org/conscrypt/OpenSSLX509CertPath;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/OpenSSLX509CertificateFactory;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/PRF;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/SSLSessionImpl;", // Calls OsConstants.initConstants. + "Lcom/android/org/conscrypt/TrustedCertificateStore;", // Calls System.getenv -> OsConstants.initConstants. + "Lcom/android/okhttp/ConnectionPool;", // Calls OsConstants.initConstants. + "Lcom/android/okhttp/OkHttpClient;", // Calls OsConstants.initConstants. + "Lcom/android/okhttp/internal/DiskLruCache;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Lcom/android/okhttp/internal/Util;", // Calls OsConstants.initConstants. + "Lcom/android/okhttp/internal/http/HttpsURLConnectionImpl;", // Calls VMClassLoader.getBootClassPathSize. + "Lcom/android/okhttp/internal/spdy/SpdyConnection;", // Calls OsConstants.initConstants. + "Lcom/android/okhttp/internal/spdy/SpdyReader;", // Calls OsConstants.initConstants. + "Lcom/android/okhttp/internal/tls/OkHostnameVerifier;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Lcom/google/android/gles_jni/EGLContextImpl;", // Calls com.google.android.gles_jni.EGLImpl._nativeClassInit. + "Lcom/google/android/gles_jni/EGLImpl;", // Calls com.google.android.gles_jni.EGLImpl._nativeClassInit. + "Lcom/google/android/gles_jni/GLImpl;", // Calls com.google.android.gles_jni.GLImpl._nativeClassInit. + "Lgov/nist/core/GenericObject;", // Calls OsConstants.initConstants. + "Lgov/nist/core/Host;", // Calls OsConstants.initConstants. + "Lgov/nist/core/HostPort;", // Calls OsConstants.initConstants. + "Lgov/nist/core/NameValue;", // Calls OsConstants.initConstants. + "Lgov/nist/core/net/DefaultNetworkLayer;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/Utils;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/AddressImpl;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/Authority;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/GenericURI;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/NetObject;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/SipUri;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/TelephoneNumber;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/address/UserInfo;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Accept;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AcceptEncoding;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AcceptLanguage;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AddressParametersHeader;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AlertInfoList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AllowEvents;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AllowEventsList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/AuthenticationInfo;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Authorization;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/CSeq;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/CallIdentifier;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Challenge;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ContactList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ContentEncoding;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ContentEncodingList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ContentLanguageList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ContentType;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Credentials;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ErrorInfoList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Expires;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/From;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/MimeVersion;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/NameMap;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Priority;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Protocol;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ProxyAuthenticate;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ProxyAuthenticateList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ProxyAuthorizationList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ProxyRequire;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ProxyRequireList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/RSeq;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/RecordRoute;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ReferTo;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/RequestLine;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Require;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/RetryAfter;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/SIPETag;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/SIPHeader;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/SIPHeaderNamesCache;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/StatusLine;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/SubscriptionState;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/TimeStamp;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/UserAgent;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Unsupported;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/Warning;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ViaList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/extensions/Join;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/extensions/References;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/extensions/Replaces;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PAccessNetworkInfo;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PAssertedIdentity;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PAssertedIdentityList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PAssociatedURI;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PCalledPartyID;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PChargingVector;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PPreferredIdentity;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PVisitedNetworkIDList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/PathList;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/SecurityAgree;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/SecurityClient;", // Calls OsConstants.initConstants. + "Lgov/nist/javax/sip/header/ims/ServiceRoute;", // Calls OsConstants.initConstants. + "Ljava/io/Console;", // Has FileDescriptor(s). + "Ljava/io/File;", // Calls to Random. -> System.currentTimeMillis -> OsConstants.initConstants. + "Ljava/io/FileDescriptor;", // Requires libcore.io.OsConstants. + "Ljava/io/ObjectInputStream;", // Requires java.lang.ClassLoader$SystemClassLoader. + "Ljava/io/ObjectStreamClass;", // Calls to Class.forName -> java.io.FileDescriptor. + "Ljava/io/ObjectStreamConstants;", // Instance of non-image class SerializablePermission. + "Ljava/lang/ClassLoader$SystemClassLoader;", // Calls System.getProperty -> OsConstants.initConstants. + "Ljava/lang/HexStringParser;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Ljava/lang/ProcessManager;", // Calls Thread.currentThread. + "Ljava/lang/Runtime;", // Calls System.getProperty -> OsConstants.initConstants. + "Ljava/lang/System;", // Calls OsConstants.initConstants. + "Ljava/math/BigDecimal;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Ljava/math/BigInteger;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Ljava/math/Primality;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Ljava/math/Multiplication;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Ljava/net/InetAddress;", // Requires libcore.io.OsConstants. + "Ljava/net/Inet4Address;", // Sub-class of InetAddress. + "Ljava/net/Inet6Address;", // Sub-class of InetAddress. + "Ljava/net/InetUnixAddress;", // Sub-class of InetAddress. + "Ljava/nio/charset/Charset;", // Calls Charset.getDefaultCharset -> System.getProperty -> OsConstants.initConstants. + "Ljava/nio/charset/CharsetICU;", // Sub-class of Charset. + "Ljava/nio/charset/Charsets;", // Calls Charset.forName. + "Ljava/nio/charset/StandardCharsets;", // Calls OsConstants.initConstants. + "Ljava/security/AlgorithmParameterGenerator;", // Calls OsConstants.initConstants. + "Ljava/security/KeyPairGenerator$KeyPairGeneratorImpl;", // Calls OsConstants.initConstants. + "Ljava/security/KeyPairGenerator;", // Calls OsConstants.initConstants. + "Ljava/security/Security;", // Tries to do disk IO for "security.properties". + "Ljava/security/spec/RSAKeyGenParameterSpec;", // java.math.NativeBN.BN_new() + "Ljava/sql/Date;", // Calls OsConstants.initConstants. + "Ljava/sql/DriverManager;", // Calls OsConstants.initConstants. + "Ljava/sql/Time;", // Calls OsConstants.initConstants. + "Ljava/sql/Timestamp;", // Calls OsConstants.initConstants. + "Ljava/util/Date;", // Calls Date. -> System.currentTimeMillis -> OsConstants.initConstants. + "Ljava/util/ListResourceBundle;", // Calls OsConstants.initConstants. + "Ljava/util/Locale;", // Calls System.getProperty -> OsConstants.initConstants. + "Ljava/util/PropertyResourceBundle;", // Calls OsConstants.initConstants. + "Ljava/util/ResourceBundle;", // Calls OsConstants.initConstants. + "Ljava/util/ResourceBundle$MissingBundle;", // Calls OsConstants.initConstants. + "Ljava/util/Scanner;", // regex.Pattern.compileImpl. + "Ljava/util/SimpleTimeZone;", // Sub-class of TimeZone. + "Ljava/util/TimeZone;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. + "Ljava/util/concurrent/ConcurrentHashMap$Segment;", // Calls Runtime.getRuntime().availableProcessors(). + "Ljava/util/concurrent/ConcurrentSkipListMap;", // Calls OsConstants.initConstants. + "Ljava/util/concurrent/Exchanger;", // Calls OsConstants.initConstants. + "Ljava/util/concurrent/ForkJoinPool;", // Calls OsConstants.initConstants. + "Ljava/util/concurrent/LinkedTransferQueue;", // Calls OsConstants.initConstants. + "Ljava/util/concurrent/Phaser;", // Calls OsConstants.initConstants. + "Ljava/util/concurrent/ScheduledThreadPoolExecutor;", // Calls AtomicLong.VMSupportsCS8() + "Ljava/util/concurrent/SynchronousQueue;", // Calls OsConstants.initConstants. + "Ljava/util/concurrent/atomic/AtomicLong;", // Calls AtomicLong.VMSupportsCS8() + "Ljava/util/logging/LogManager;", // Calls System.getProperty -> OsConstants.initConstants. + "Ljava/util/prefs/AbstractPreferences;", // Calls OsConstants.initConstants. + "Ljava/util/prefs/FilePreferencesImpl;", // Calls OsConstants.initConstants. + "Ljava/util/prefs/FilePreferencesFactoryImpl;", // Calls OsConstants.initConstants. + "Ljava/util/prefs/Preferences;", // Calls OsConstants.initConstants. + "Ljavax/crypto/KeyAgreement;", // Calls OsConstants.initConstants. + "Ljavax/crypto/KeyGenerator;", // Calls OsConstants.initConstants. + "Ljavax/security/cert/X509Certificate;", // Calls VMClassLoader.getBootClassPathSize. + "Ljavax/security/cert/X509Certificate$1;", // Calls VMClassLoader.getBootClassPathSize. + "Ljavax/microedition/khronos/egl/EGL10;", // Requires EGLContext. + "Ljavax/microedition/khronos/egl/EGLContext;", // Requires com.google.android.gles_jni.EGLImpl. + "Ljavax/net/ssl/HttpsURLConnection;", // Calls SSLSocketFactory.getDefault -> java.security.Security.getProperty. + "Ljavax/xml/datatype/DatatypeConstants;", // Calls OsConstants.initConstants. + "Ljavax/xml/datatype/FactoryFinder;", // Calls OsConstants.initConstants. + "Ljavax/xml/namespace/QName;", // Calls OsConstants.initConstants. + "Ljavax/xml/validation/SchemaFactoryFinder;", // Calls OsConstants.initConstants. + "Ljavax/xml/xpath/XPathConstants;", // Calls OsConstants.initConstants. + "Ljavax/xml/xpath/XPathFactoryFinder;", // Calls OsConstants.initConstants. + "Llibcore/icu/LocaleData;", // Requires java.util.Locale. + "Llibcore/icu/TimeZoneNames;", // Requires java.util.TimeZone. + "Llibcore/io/IoUtils;", // Calls Random. -> System.currentTimeMillis -> FileDescriptor -> OsConstants.initConstants. + "Llibcore/io/OsConstants;", // Platform specific. + "Llibcore/net/MimeUtils;", // Calls libcore.net.MimeUtils.getContentTypesPropertiesStream -> System.getProperty. + "Llibcore/reflect/Types;", // Calls OsConstants.initConstants. + "Llibcore/util/ZoneInfo;", // Sub-class of TimeZone. + "Llibcore/util/ZoneInfoDB;", // Calls System.getenv -> OsConstants.initConstants. + "Lorg/apache/commons/logging/LogFactory;", // Calls System.getProperty. + "Lorg/apache/commons/logging/impl/LogFactoryImpl;", // Calls OsConstants.initConstants. + "Lorg/apache/harmony/security/fortress/Services;", // Calls ClassLoader.getSystemClassLoader -> System.getProperty. + "Lorg/apache/harmony/security/provider/cert/X509CertFactoryImpl;", // Requires java.nio.charsets.Charsets. + "Lorg/apache/harmony/security/provider/crypto/RandomBitsSupplier;", // Requires java.io.File. + "Lorg/apache/harmony/security/utils/AlgNameMapper;", // Requires java.util.Locale. + "Lorg/apache/harmony/security/pkcs10/CertificationRequest;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/pkcs10/CertificationRequestInfo;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/pkcs7/AuthenticatedAttributes;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/pkcs7/SignedData;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/pkcs7/SignerInfo;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/pkcs8/PrivateKeyInfo;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/provider/crypto/SHA1PRNG_SecureRandomImpl;", // Calls OsConstants.initConstants. + "Lorg/apache/harmony/security/x501/AttributeTypeAndValue;", // Calls IntegralToString.convertInt -> Thread.currentThread. + "Lorg/apache/harmony/security/x501/DirectoryString;", // Requires BigInteger. + "Lorg/apache/harmony/security/x501/Name;", // Requires org.apache.harmony.security.x501.AttributeTypeAndValue. + "Lorg/apache/harmony/security/x509/AccessDescription;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/AuthorityKeyIdentifier;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/CRLDistributionPoints;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/Certificate;", // Requires org.apache.harmony.security.x509.TBSCertificate. + "Lorg/apache/harmony/security/x509/CertificateIssuer;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/CertificateList;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/DistributionPoint;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/DistributionPointName;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/EDIPartyName;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lorg/apache/harmony/security/x509/GeneralName;", // Requires org.apache.harmony.security.x501.Name. + "Lorg/apache/harmony/security/x509/GeneralNames;", // Requires GeneralName. + "Lorg/apache/harmony/security/x509/GeneralSubtree;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/GeneralSubtrees;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/InfoAccessSyntax;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/IssuingDistributionPoint;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/NameConstraints;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/TBSCertList$RevokedCertificate;", // Calls NativeBN.BN_new(). + "Lorg/apache/harmony/security/x509/TBSCertList;", // Calls Thread.currentThread. + "Lorg/apache/harmony/security/x509/TBSCertificate;", // Requires org.apache.harmony.security.x501.Name. + "Lorg/apache/harmony/security/x509/Time;", // Calls native ... -> java.math.NativeBN.BN_new(). + "Lorg/apache/harmony/security/x509/Validity;", // Requires x509.Time. + "Lorg/apache/harmony/security/x509/tsp/TSTInfo;", // Calls Thread.currentThread. + "Lorg/apache/harmony/xml/ExpatParser;", // Calls native ExpatParser.staticInitialize. + "Lorg/apache/harmony/xml/ExpatParser$EntityParser;", // Calls ExpatParser.staticInitialize. + "Lorg/apache/http/conn/params/ConnRouteParams;", // Requires java.util.Locale. + "Lorg/apache/http/conn/ssl/SSLSocketFactory;", // Calls java.security.Security.getProperty. + "Lorg/apache/http/conn/util/InetAddressUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl. +}; + +static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index) + LOCKS_EXCLUDED(Locks::mutator_lock_) { + const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index); + ScopedObjectAccess soa(Thread::Current()); + mirror::ClassLoader* class_loader = soa.Decode(manager->GetClassLoader()); + const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def); + mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor, class_loader); + bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1; + bool can_init_static_fields = compiling_boot && + manager->GetCompiler()->IsImageClass(descriptor); + if (klass != NULL) { + // We don't want class initialization occurring on multiple threads due to deadlock problems. + // For example, a parent class is initialized (holding its lock) that refers to a sub-class + // in its static/class initializer causing it to try to acquire the sub-class' lock. While + // on a second thread the sub-class is initialized (holding its lock) after first initializing + // its parents, whose locks are acquired. This leads to a parent-to-child and a child-to-parent + // lock ordering and consequent potential deadlock. + // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather + // than use a special Object for the purpose we use the Class of java.lang.Class. + ObjectLock lock1(soa.Self(), klass->GetClass()); + // The lock required to initialize the class. + ObjectLock lock2(soa.Self(), klass); + // Only try to initialize classes that were successfully verified. + if (klass->IsVerified()) { + manager->GetClassLinker()->EnsureInitialized(klass, false, can_init_static_fields); + if (soa.Self()->IsExceptionPending()) { + soa.Self()->GetException(NULL)->Dump(); + } + if (!klass->IsInitialized()) { + if (can_init_static_fields) { + bool is_black_listed = false; + for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) { + if (StringPiece(descriptor) == class_initializer_black_list[i]) { + is_black_listed = true; + break; + } + } + if (!is_black_listed) { + LOG(INFO) << "Initializing: " << descriptor; + if (StringPiece(descriptor) == "Ljava/lang/Void;"){ + // Hand initialize j.l.Void to avoid Dex file operations in un-started runtime. + mirror::ObjectArray* fields = klass->GetSFields(); + CHECK_EQ(fields->GetLength(), 1); + fields->Get(0)->SetObj(klass, manager->GetClassLinker()->FindPrimitiveClass('V')); + klass->SetStatus(mirror::Class::kStatusInitialized); + } else { + manager->GetClassLinker()->EnsureInitialized(klass, true, can_init_static_fields); + } + soa.Self()->AssertNoPendingException(); + } + } + } + // If successfully initialized place in SSB array. + if (klass->IsInitialized()) { + klass->GetDexCache()->GetInitializedStaticStorage()->Set(klass->GetDexTypeIndex(), klass); + } + } + // Record the final class status if necessary. + mirror::Class::Status status = klass->GetStatus(); + ClassReference ref(manager->GetDexFile(), class_def_index); + CompiledClass* compiled_class = manager->GetCompiler()->GetCompiledClass(ref); + if (compiled_class == NULL) { + compiled_class = new CompiledClass(status); + manager->GetCompiler()->RecordClassStatus(ref, compiled_class); + } else { + DCHECK_GE(status, compiled_class->GetStatus()) << descriptor; + } + } + // Clear any class not found or verification exceptions. + soa.Self()->ClearException(); +} + +void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, + ThreadPool& thread_pool, TimingLogger& timings) { +#ifndef NDEBUG + for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) { + const char* descriptor = class_initializer_black_list[i]; + CHECK(IsValidDescriptor(descriptor)) << descriptor; + } +#endif + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool); + context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count_); + timings.AddSplit("InitializeNoClinit " + dex_file.GetLocation()); +} + +void CompilerDriver::InitializeClasses(jobject class_loader, + const std::vector& dex_files, + ThreadPool& thread_pool, TimingLogger& timings) { + for (size_t i = 0; i != dex_files.size(); ++i) { + const DexFile* dex_file = dex_files[i]; + CHECK(dex_file != NULL); + InitializeClasses(class_loader, *dex_file, thread_pool, timings); + } +} + +void CompilerDriver::Compile(jobject class_loader, const std::vector& dex_files, + ThreadPool& thread_pool, TimingLogger& timings) { + for (size_t i = 0; i != dex_files.size(); ++i) { + const DexFile* dex_file = dex_files[i]; + CHECK(dex_file != NULL); + CompileDexFile(class_loader, *dex_file, thread_pool, timings); + } +} + +void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) { + jobject jclass_loader = manager->GetClassLoader(); + const DexFile& dex_file = *manager->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + { + ScopedObjectAccess soa(Thread::Current()); + mirror::ClassLoader* class_loader = soa.Decode(jclass_loader); + if (SkipClass(class_loader, dex_file, class_def)) { + return; + } + } + ClassReference ref(&dex_file, class_def_index); + // Skip compiling classes with generic verifier failures since they will still fail at runtime + if (verifier::MethodVerifier::IsClassRejected(ref)) { + return; + } + const byte* class_data = dex_file.GetClassData(class_def); + if (class_data == NULL) { + // empty class, probably a marker interface + return; + } + // Can we run DEX-to-DEX compiler on this class ? + bool allow_dex_compilation; + { + ScopedObjectAccess soa(Thread::Current()); + mirror::ClassLoader* class_loader = soa.Decode(jclass_loader); + allow_dex_compilation = IsDexToDexCompilationAllowed(class_loader, dex_file, class_def); + } + ClassDataItemIterator it(dex_file, class_data); + // Skip fields + while (it.HasNextStaticField()) { + it.Next(); + } + while (it.HasNextInstanceField()) { + it.Next(); + } + // Compile direct methods + int64_t previous_direct_method_idx = -1; + while (it.HasNextDirectMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_direct_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_direct_method_idx = method_idx; + manager->GetCompiler()->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, allow_dex_compilation); + it.Next(); + } + // Compile virtual methods + int64_t previous_virtual_method_idx = -1; + while (it.HasNextVirtualMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_virtual_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_virtual_method_idx = method_idx; + manager->GetCompiler()->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, allow_dex_compilation); + it.Next(); + } + DCHECK(!it.HasNext()); +} + +void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file, + ThreadPool& thread_pool, TimingLogger& timings) { + ParallelCompilationManager context(NULL, class_loader, this, &dex_file, thread_pool); + context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_); + timings.AddSplit("Compile " + dex_file.GetLocation()); +} + +void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, + InvokeType invoke_type, uint32_t class_def_idx, + uint32_t method_idx, jobject class_loader, + const DexFile& dex_file, + bool allow_dex_to_dex_compilation) { + CompiledMethod* compiled_method = NULL; + uint64_t start_ns = NanoTime(); + + if ((access_flags & kAccNative) != 0) { + compiled_method = (*jni_compiler_)(*this, access_flags, method_idx, dex_file); + CHECK(compiled_method != NULL); + } else if ((access_flags & kAccAbstract) != 0) { + } else { + // In small mode we only compile image classes. + bool dont_compile = (Runtime::Current()->IsSmallMode() && + ((image_classes_.get() == NULL) || (image_classes_->size() == 0))); + + // Don't compile class initializers, ever. + if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { + dont_compile = true; + } else if (code_item->insns_size_in_code_units_ < Runtime::Current()->GetSmallModeMethodDexSizeLimit()) { + // Do compile small methods. + dont_compile = false; + } + if (!dont_compile) { + CompilerFn compiler = compiler_; +#ifdef ART_SEA_IR_MODE + bool use_sea = Runtime::Current()->IsSeaIRMode(); + use_sea &&= (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci")); + if (use_sea) { + compiler = sea_ir_compiler_; + } +#endif + compiled_method = (*compiler)(*this, code_item, access_flags, invoke_type, class_def_idx, + method_idx, class_loader, dex_file); + CHECK(compiled_method != NULL) << PrettyMethod(method_idx, dex_file); + } else if (allow_dex_to_dex_compilation) { + // TODO: add a mode to disable DEX-to-DEX compilation ? + compiled_method = (*dex_to_dex_compiler_)(*this, code_item, access_flags, + invoke_type, class_def_idx, + method_idx, class_loader, dex_file); + // No native code is generated. + CHECK(compiled_method == NULL) << PrettyMethod(method_idx, dex_file); + } + } + uint64_t duration_ns = NanoTime() - start_ns; +#ifdef ART_USE_PORTABLE_COMPILER + const uint64_t kWarnMilliSeconds = 1000; +#else + const uint64_t kWarnMilliSeconds = 100; +#endif + if (duration_ns > MsToNs(kWarnMilliSeconds)) { + LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file) + << " took " << PrettyDuration(duration_ns); + } + + Thread* self = Thread::Current(); + if (compiled_method != NULL) { + MethodReference ref(&dex_file, method_idx); + CHECK(GetCompiledMethod(ref) == NULL) << PrettyMethod(method_idx, dex_file); + { + MutexLock mu(self, compiled_methods_lock_); + compiled_methods_.Put(ref, compiled_method); + } + DCHECK(GetCompiledMethod(ref) != NULL) << PrettyMethod(method_idx, dex_file); + } + + if (self->IsExceptionPending()) { + ScopedObjectAccess soa(self); + LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n" + << self->GetException(NULL)->Dump(); + } +} + +CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const { + MutexLock mu(Thread::Current(), compiled_classes_lock_); + ClassTable::const_iterator it = compiled_classes_.find(ref); + if (it == compiled_classes_.end()) { + return NULL; + } + CHECK(it->second != NULL); + return it->second; +} + +CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + MethodTable::const_iterator it = compiled_methods_.find(ref); + if (it == compiled_methods_.end()) { + return NULL; + } + CHECK(it->second != NULL); + return it->second; +} + +void CompilerDriver::SetBitcodeFileName(std::string const& filename) { + typedef void (*SetBitcodeFileNameFn)(CompilerDriver&, std::string const&); + + SetBitcodeFileNameFn set_bitcode_file_name = + reinterpret_cast(compilerLLVMSetBitcodeFileName); + + set_bitcode_file_name(*this, filename); +} + + +void CompilerDriver::AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file, + size_t class_def_index) { + MutexLock mu(self, freezing_constructor_lock_); + freezing_constructor_classes_.insert(ClassReference(dex_file, class_def_index)); +} + +bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, + size_t class_def_index) { + MutexLock mu(self, freezing_constructor_lock_); + return freezing_constructor_classes_.count(ClassReference(dex_file, class_def_index)) != 0; +} + +bool CompilerDriver::WriteElf(const std::string& android_root, + bool is_host, + const std::vector& dex_files, + std::vector& oat_contents, + art::File* file) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(ART_USE_PORTABLE_COMPILER) + return art::ElfWriterMclinker::Create(file, oat_contents, dex_files, android_root, is_host, *this); +#else + return art::ElfWriterQuick::Create(file, oat_contents, dex_files, android_root, is_host, *this); +#endif +} +void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set, + std::string& target_triple, + std::string& target_cpu, + std::string& target_attr) { + switch (instruction_set) { + case kThumb2: + target_triple = "thumb-none-linux-gnueabi"; + target_cpu = "cortex-a9"; + target_attr = "+thumb2,+neon,+neonfp,+vfp3,+db"; + break; + + case kArm: + target_triple = "armv7-none-linux-gnueabi"; + // TODO: Fix for Nexus S. + target_cpu = "cortex-a9"; + // TODO: Fix for Xoom. + target_attr = "+v7,+neon,+neonfp,+vfp3,+db"; + break; + + case kX86: + target_triple = "i386-pc-linux-gnu"; + target_attr = ""; + break; + + case kMips: + target_triple = "mipsel-unknown-linux"; + target_attr = "mips32r2"; + break; + + default: + LOG(FATAL) << "Unknown instruction set: " << instruction_set; + } + } +} // namespace art -- cgit v1.2.3-59-g8ed1b From 56d947fbc9bc2992e2f93112fafb73e50d2aaa7a Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Mon, 15 Jul 2013 13:14:23 -0700 Subject: Add verification of boot.oat generated on device Change-Id: I069586205a9a92fc7375ccf5cdde136bbbcfc800 --- compiler/driver/compiler_driver.cc | 1 - compiler/driver/compiler_driver.h | 2 +- compiler/elf_writer.cc | 1 - compiler/llvm/compiler_llvm.cc | 1 - runtime/class_linker.cc | 55 ++++------- runtime/class_linker.h | 2 +- runtime/gc/heap.cc | 133 ++------------------------ runtime/gc/heap.h | 6 -- runtime/gc/space/image_space.cc | 188 +++++++++++++++++++++++++++++++++++-- runtime/gc/space/image_space.h | 39 +++++++- runtime/oat_file.h | 9 +- 11 files changed, 254 insertions(+), 183 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index c99d103c17..9e71dff464 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -26,7 +26,6 @@ #include "dex_compilation_unit.h" #include "dex_file-inl.h" #include "jni_internal.h" -#include "oat_file.h" #include "object_utils.h" #include "runtime.h" #include "gc/accounting/card_table-inl.h" diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index d37f494ef1..4d7f0cf7b6 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -29,7 +29,7 @@ #include "instruction_set.h" #include "invoke_type.h" #include "method_reference.h" -#include "oat_file.h" +#include "os.h" #include "runtime.h" #include "safe_map.h" #include "thread_pool.h" diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc index 0823a53f87..70d17de102 100644 --- a/compiler/elf_writer.cc +++ b/compiler/elf_writer.cc @@ -27,7 +27,6 @@ #include "mirror/abstract_method-inl.h" #include "mirror/object-inl.h" #include "oat.h" -#include "oat_file.h" #include "scoped_thread_state_change.h" namespace art { diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc index afca223192..4475b25043 100644 --- a/compiler/llvm/compiler_llvm.cc +++ b/compiler/llvm/compiler_llvm.cc @@ -26,7 +26,6 @@ #include "ir_builder.h" #include "jni/portable/jni_compiler.h" #include "llvm_compilation_unit.h" -#include "oat_file.h" #include "utils_llvm.h" #include "verifier/method_verifier.h" diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index e35b95c1e9..fbceb597f0 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -680,35 +680,12 @@ void ClassLinker::RegisterOatFileLocked(const OatFile& oat_file) { oat_files_.push_back(&oat_file); } -OatFile* ClassLinker::OpenOat(const gc::space::ImageSpace* space) { +OatFile& ClassLinker::GetImageOatFile(gc::space::ImageSpace* space) { + VLOG(startup) << "ClassLinker::GetImageOatFile entering"; + OatFile& oat_file = space->ReleaseOatFile(); WriterMutexLock mu(Thread::Current(), dex_lock_); - const Runtime* runtime = Runtime::Current(); - const ImageHeader& image_header = space->GetImageHeader(); - // Grab location but don't use Object::AsString as we haven't yet initialized the roots to - // check the down cast - mirror::String* oat_location = - down_cast(image_header.GetImageRoot(ImageHeader::kOatLocation)); - std::string oat_filename; - oat_filename += runtime->GetHostPrefix(); - oat_filename += oat_location->ToModifiedUtf8(); - runtime->GetHeap()->UnReserveOatFileAddressRange(); - OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(), - !Runtime::Current()->IsCompiler()); - VLOG(startup) << "ClassLinker::OpenOat entering oat_filename=" << oat_filename; - if (oat_file == NULL) { - LOG(ERROR) << "Failed to open oat file " << oat_filename << " referenced from image."; - return NULL; - } - uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum(); - uint32_t image_oat_checksum = image_header.GetOatChecksum(); - if (oat_checksum != image_oat_checksum) { - LOG(ERROR) << "Failed to match oat file checksum " << std::hex << oat_checksum - << " to expected oat checksum " << std::hex << image_oat_checksum - << " in image"; - return NULL; - } - RegisterOatFileLocked(*oat_file); - VLOG(startup) << "ClassLinker::OpenOat exiting"; + RegisterOatFileLocked(oat_file); + VLOG(startup) << "ClassLinker::GetImageOatFile exiting"; return oat_file; } @@ -952,13 +929,13 @@ void ClassLinker::InitFromImage() { gc::Heap* heap = Runtime::Current()->GetHeap(); gc::space::ImageSpace* space = heap->GetImageSpace(); - OatFile* oat_file = OpenOat(space); - CHECK(oat_file != NULL) << "Failed to open oat file for image"; - CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U); - CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U); - CHECK(oat_file->GetOatHeader().GetImageFileLocation().empty()); - portable_resolution_trampoline_ = oat_file->GetOatHeader().GetPortableResolutionTrampoline(); - quick_resolution_trampoline_ = oat_file->GetOatHeader().GetQuickResolutionTrampoline(); + CHECK(space != NULL); + OatFile& oat_file = GetImageOatFile(space); + CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatChecksum(), 0U); + CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatDataBegin(), 0U); + CHECK(oat_file.GetOatHeader().GetImageFileLocation().empty()); + portable_resolution_trampoline_ = oat_file.GetOatHeader().GetPortableResolutionTrampoline(); + quick_resolution_trampoline_ = oat_file.GetOatHeader().GetQuickResolutionTrampoline(); mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); mirror::ObjectArray* dex_caches = dex_caches_object->AsObjectArray(); @@ -971,18 +948,18 @@ void ClassLinker::InitFromImage() { // as being Strings or not mirror::String::SetClass(GetClassRoot(kJavaLangString)); - CHECK_EQ(oat_file->GetOatHeader().GetDexFileCount(), + CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(), static_cast(dex_caches->GetLength())); Thread* self = Thread::Current(); for (int i = 0; i < dex_caches->GetLength(); i++) { SirtRef dex_cache(self, dex_caches->Get(i)); const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8()); - const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location); - CHECK(oat_dex_file != NULL) << oat_file->GetLocation() << " " << dex_file_location; + const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location); + CHECK(oat_dex_file != NULL) << oat_file.GetLocation() << " " << dex_file_location; const DexFile* dex_file = oat_dex_file->OpenDexFile(); if (dex_file == NULL) { LOG(FATAL) << "Failed to open dex file " << dex_file_location - << " from within oat file " << oat_file->GetLocation(); + << " from within oat file " << oat_file.GetLocation(); } CHECK_EQ(dex_file->GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum()); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index df336724fa..df1ecc6207 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -359,7 +359,7 @@ class ClassLinker { // Initialize class linker from one or more images. void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - OatFile* OpenOat(const gc::space::ImageSpace* space) + OatFile& GetImageOatFile(gc::space::ImageSpace* space) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void InitFromImageCallback(mirror::Object* obj, void* arg) diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index a68cc02435..53b8cd9550 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -18,8 +18,6 @@ #define ATRACE_TAG ATRACE_TAG_DALVIK #include -#include -#include #include #include @@ -66,96 +64,6 @@ static const bool kDumpGcPerformanceOnShutdown = false; static const size_t kMinConcurrentRemainingBytes = 128 * KB; const double Heap::kDefaultTargetUtilization = 0.5; -static bool GenerateImage(const std::string& image_file_name) { - const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString()); - std::vector boot_class_path; - Split(boot_class_path_string, ':', boot_class_path); - if (boot_class_path.empty()) { - LOG(FATAL) << "Failed to generate image because no boot class path specified"; - } - - std::vector arg_vector; - - std::string dex2oat_string(GetAndroidRoot()); - dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat"); - const char* dex2oat = dex2oat_string.c_str(); - arg_vector.push_back(strdup(dex2oat)); - - std::string image_option_string("--image="); - image_option_string += image_file_name; - const char* image_option = image_option_string.c_str(); - arg_vector.push_back(strdup(image_option)); - - arg_vector.push_back(strdup("--runtime-arg")); - arg_vector.push_back(strdup("-Xms64m")); - - arg_vector.push_back(strdup("--runtime-arg")); - arg_vector.push_back(strdup("-Xmx64m")); - - for (size_t i = 0; i < boot_class_path.size(); i++) { - std::string dex_file_option_string("--dex-file="); - dex_file_option_string += boot_class_path[i]; - const char* dex_file_option = dex_file_option_string.c_str(); - arg_vector.push_back(strdup(dex_file_option)); - } - - std::string oat_file_option_string("--oat-file="); - oat_file_option_string += image_file_name; - oat_file_option_string.erase(oat_file_option_string.size() - 3); - oat_file_option_string += "oat"; - const char* oat_file_option = oat_file_option_string.c_str(); - arg_vector.push_back(strdup(oat_file_option)); - - std::string base_option_string(StringPrintf("--base=0x%x", ART_BASE_ADDRESS)); - arg_vector.push_back(strdup(base_option_string.c_str())); - - if (kIsTargetBuild) { - arg_vector.push_back(strdup("--image-classes-zip=/system/framework/framework.jar")); - arg_vector.push_back(strdup("--image-classes=preloaded-classes")); - } else { - arg_vector.push_back(strdup("--host")); - } - - std::string command_line(Join(arg_vector, ' ')); - LOG(INFO) << command_line; - - arg_vector.push_back(NULL); - char** argv = &arg_vector[0]; - - // fork and exec dex2oat - pid_t pid = fork(); - if (pid == 0) { - // no allocation allowed between fork and exec - - // change process groups, so we don't get reaped by ProcessManager - setpgid(0, 0); - - execv(dex2oat, argv); - - PLOG(FATAL) << "execv(" << dex2oat << ") failed"; - return false; - } else { - STLDeleteElements(&arg_vector); - - // wait for dex2oat to finish - int status; - pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0)); - if (got_pid != pid) { - PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid; - return false; - } - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { - LOG(ERROR) << dex2oat << " failed: " << command_line; - return false; - } - } - return true; -} - -void Heap::UnReserveOatFileAddressRange() { - oat_file_map_.reset(NULL); -} - Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, double target_utilization, size_t capacity, const std::string& original_image_file_name, bool concurrent_gc) @@ -206,45 +114,20 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max mark_bitmap_.reset(new accounting::HeapBitmap(this)); // Requested begin for the alloc space, to follow the mapped image and oat files - byte* requested_begin = NULL; + byte* requested_alloc_space_begin = NULL; std::string image_file_name(original_image_file_name); if (!image_file_name.empty()) { - space::ImageSpace* image_space = NULL; - - if (OS::FileExists(image_file_name.c_str())) { - // If the /system file exists, it should be up-to-date, don't try to generate - image_space = space::ImageSpace::Create(image_file_name); - } else { - // If the /system file didn't exist, we need to use one from the dalvik-cache. - // If the cache file exists, try to open, but if it fails, regenerate. - // If it does not exist, generate. - image_file_name = GetDalvikCacheFilenameOrDie(image_file_name); - if (OS::FileExists(image_file_name.c_str())) { - image_space = space::ImageSpace::Create(image_file_name); - } - if (image_space == NULL) { - CHECK(GenerateImage(image_file_name)) << "Failed to generate image: " << image_file_name; - image_space = space::ImageSpace::Create(image_file_name); - } - } - - CHECK(image_space != NULL) << "Failed to create space from " << image_file_name; + space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name); + CHECK(image_space != NULL) << "Failed to create space for " << image_file_name; AddContinuousSpace(image_space); // Oat files referenced by image files immediately follow them in memory, ensure alloc space // isn't going to get in the middle byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); CHECK_GT(oat_file_end_addr, image_space->End()); - - // Reserve address range from image_space->End() to image_space->GetImageHeader().GetOatEnd() - uintptr_t reserve_begin = RoundUp(reinterpret_cast(image_space->End()), kPageSize); - uintptr_t reserve_end = RoundUp(reinterpret_cast(oat_file_end_addr), kPageSize); - oat_file_map_.reset(MemMap::MapAnonymous("oat file reserve", - reinterpret_cast(reserve_begin), - reserve_end - reserve_begin, PROT_NONE)); - - if (oat_file_end_addr > requested_begin) { - requested_begin = reinterpret_cast(RoundUp(reinterpret_cast(oat_file_end_addr), - kPageSize)); + if (oat_file_end_addr > requested_alloc_space_begin) { + requested_alloc_space_begin = + reinterpret_cast(RoundUp(reinterpret_cast(oat_file_end_addr), + kPageSize)); } } @@ -261,7 +144,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max alloc_space_ = space::DlMallocSpace::Create("alloc space", initial_size, growth_limit, capacity, - requested_begin); + requested_alloc_space_begin); CHECK(alloc_space_ != NULL) << "Failed to create alloc space"; alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); AddContinuousSpace(alloc_space_); diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 790ab0216d..e6c92211d4 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -373,9 +373,6 @@ class Heap { void DumpSpaces(); - // UnReserve the address range where the oat file will be placed. - void UnReserveOatFileAddressRange(); - // GC performance measuring void DumpGcPerformanceInfo(std::ostream& os); @@ -599,9 +596,6 @@ class Heap { std::vector mark_sweep_collectors_; - // A map that we use to temporarily reserve address range for the oat file. - UniquePtr oat_file_map_; - friend class collector::MarkSweep; friend class VerifyReferenceCardVisitor; friend class VerifyReferenceVisitor; diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 46c39378d7..c279ecf1ff 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -16,11 +16,16 @@ #include "image_space.h" +#include +#include + +#include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "gc/accounting/space_bitmap-inl.h" #include "mirror/abstract_method.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" +#include "oat_file.h" #include "os.h" #include "runtime.h" #include "space-inl.h" @@ -41,13 +46,118 @@ ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map) DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index; } -ImageSpace* ImageSpace::Create(const std::string& image_file_name) { +static bool GenerateImage(const std::string& image_file_name) { + const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString()); + std::vector boot_class_path; + Split(boot_class_path_string, ':', boot_class_path); + if (boot_class_path.empty()) { + LOG(FATAL) << "Failed to generate image because no boot class path specified"; + } + + std::vector arg_vector; + + std::string dex2oat_string(GetAndroidRoot()); + dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat"); + const char* dex2oat = dex2oat_string.c_str(); + arg_vector.push_back(strdup(dex2oat)); + + std::string image_option_string("--image="); + image_option_string += image_file_name; + const char* image_option = image_option_string.c_str(); + arg_vector.push_back(strdup(image_option)); + + arg_vector.push_back(strdup("--runtime-arg")); + arg_vector.push_back(strdup("-Xms64m")); + + arg_vector.push_back(strdup("--runtime-arg")); + arg_vector.push_back(strdup("-Xmx64m")); + + for (size_t i = 0; i < boot_class_path.size(); i++) { + std::string dex_file_option_string("--dex-file="); + dex_file_option_string += boot_class_path[i]; + const char* dex_file_option = dex_file_option_string.c_str(); + arg_vector.push_back(strdup(dex_file_option)); + } + + std::string oat_file_option_string("--oat-file="); + oat_file_option_string += image_file_name; + oat_file_option_string.erase(oat_file_option_string.size() - 3); + oat_file_option_string += "oat"; + const char* oat_file_option = oat_file_option_string.c_str(); + arg_vector.push_back(strdup(oat_file_option)); + + std::string base_option_string(StringPrintf("--base=0x%x", ART_BASE_ADDRESS)); + arg_vector.push_back(strdup(base_option_string.c_str())); + + if (kIsTargetBuild) { + arg_vector.push_back(strdup("--image-classes-zip=/system/framework/framework.jar")); + arg_vector.push_back(strdup("--image-classes=preloaded-classes")); + } else { + arg_vector.push_back(strdup("--host")); + } + + std::string command_line(Join(arg_vector, ' ')); + LOG(INFO) << "GenerateImage: " << command_line; + + arg_vector.push_back(NULL); + char** argv = &arg_vector[0]; + + // fork and exec dex2oat + pid_t pid = fork(); + if (pid == 0) { + // no allocation allowed between fork and exec + + // change process groups, so we don't get reaped by ProcessManager + setpgid(0, 0); + + execv(dex2oat, argv); + + PLOG(FATAL) << "execv(" << dex2oat << ") failed"; + return false; + } else { + STLDeleteElements(&arg_vector); + + // wait for dex2oat to finish + int status; + pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0)); + if (got_pid != pid) { + PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid; + return false; + } + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + LOG(ERROR) << dex2oat << " failed: " << command_line; + return false; + } + } + return true; +} + +ImageSpace* ImageSpace::Create(const std::string& original_image_file_name) { + if (OS::FileExists(original_image_file_name.c_str())) { + // If the /system file exists, it should be up-to-date, don't try to generate + return space::ImageSpace::Init(original_image_file_name, false); + } + // If the /system file didn't exist, we need to use one from the dalvik-cache. + // If the cache file exists, try to open, but if it fails, regenerate. + // If it does not exist, generate. + std::string image_file_name(GetDalvikCacheFilenameOrDie(original_image_file_name)); + if (OS::FileExists(image_file_name.c_str())) { + space::ImageSpace* image_space = space::ImageSpace::Init(image_file_name, true); + if (image_space != NULL) { + return image_space; + } + } + CHECK(GenerateImage(image_file_name)) << "Failed to generate image: " << image_file_name; + return space::ImageSpace::Init(image_file_name, true); +} + +ImageSpace* ImageSpace::Init(const std::string& image_file_name, bool validate_oat_file) { CHECK(!image_file_name.empty()); uint64_t start_time = 0; if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { start_time = NanoTime(); - LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name; + LOG(INFO) << "ImageSpace::Init entering image_file_name=" << image_file_name; } UniquePtr file(OS::OpenFile(image_file_name.c_str(), false)); @@ -86,12 +196,78 @@ ImageSpace* ImageSpace::Create(const std::string& image_file_name) { callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod); runtime->SetCalleeSaveMethod(down_cast(callee_save_method), Runtime::kRefsAndArgs); - ImageSpace* space = new ImageSpace(image_file_name, map.release()); + UniquePtr space(new ImageSpace(image_file_name, map.release())); + + space->oat_file_.reset(space->OpenOatFile()); + if (space->oat_file_.get() == NULL) { + LOG(ERROR) << "Failed to open oat file for image: " << image_file_name; + return NULL; + } + + if (validate_oat_file && !space->ValidateOatFile()) { + LOG(WARNING) << "Failed to validate oat file for image: " << image_file_name; + return NULL; + } + if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { - LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time) - << ") " << *space; + LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time) + << ") " << *space.get(); } - return space; + return space.release(); +} + +OatFile* ImageSpace::OpenOatFile() const { + const Runtime* runtime = Runtime::Current(); + const ImageHeader& image_header = GetImageHeader(); + // Grab location but don't use Object::AsString as we haven't yet initialized the roots to + // check the down cast + mirror::String* oat_location = + down_cast(image_header.GetImageRoot(ImageHeader::kOatLocation)); + std::string oat_filename; + oat_filename += runtime->GetHostPrefix(); + oat_filename += oat_location->ToModifiedUtf8(); + OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(), + !Runtime::Current()->IsCompiler()); + if (oat_file == NULL) { + LOG(ERROR) << "Failed to open oat file " << oat_filename << " referenced from image."; + return NULL; + } + uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum(); + uint32_t image_oat_checksum = image_header.GetOatChecksum(); + if (oat_checksum != image_oat_checksum) { + LOG(ERROR) << "Failed to match oat file checksum " << std::hex << oat_checksum + << " to expected oat checksum " << std::hex << image_oat_checksum + << " in image"; + return NULL; + } + return oat_file; +} + +bool ImageSpace::ValidateOatFile() const { + CHECK(oat_file_.get() != NULL); + std::vector oat_dex_files = oat_file_->GetOatDexFiles(); + for (size_t i = 0; i < oat_dex_files.size(); i++) { + const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i]; + const std::string& dex_file_location = oat_dex_file->GetDexFileLocation(); + uint32_t dex_file_location_checksum; + if (!DexFile::GetChecksum(dex_file_location.c_str(), dex_file_location_checksum)) { + LOG(WARNING) << "ValidateOatFile could not find checksum for " << dex_file_location; + return false; + } + if (dex_file_location_checksum != oat_dex_file->GetDexFileLocationChecksum()) { + LOG(WARNING) << "ValidateOatFile found checksum mismatch between oat file " + << oat_file_->GetLocation() << " and dex file " << dex_file_location + << " (" << oat_dex_file->GetDexFileLocationChecksum() << " != " + << dex_file_location_checksum << ")"; + return false; + } + } + return true; +} + +OatFile& ImageSpace::ReleaseOatFile() { + CHECK(oat_file_.get() != NULL); + return *oat_file_.release(); } void ImageSpace::RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const { diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index afec5b7305..833fb8d73a 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -20,6 +20,9 @@ #include "space.h" namespace art { + +class OatFile; + namespace gc { namespace space { @@ -34,10 +37,22 @@ class ImageSpace : public MemMapSpace { return kSpaceTypeImageSpace; } - // create a Space from an image file. cannot be used for future allocation or collected. + // Create a Space from an image file. Cannot be used for future + // allocation or collected. + // + // Create also opens the OatFile associated with the image file so + // that it be contiguously allocated with the image before the + // creation of the alloc space. The ReleaseOatFile will later be + // used to transfer ownership of the OatFile to the ClassLinker when + // it is initialized. static ImageSpace* Create(const std::string& image) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Releases the OatFile from the ImageSpace so it can be transfer to + // the caller, presumably the ClassLinker. + OatFile& ReleaseOatFile() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const ImageHeader& GetImageHeader() const { return *reinterpret_cast(Begin()); } @@ -63,6 +78,23 @@ class ImageSpace : public MemMapSpace { void Dump(std::ostream& os) const; private: + + // Tries to initialize an ImageSpace from the given image path, + // returning NULL on error. + // + // If validate_oat_file is false (for /system), do not verify that + // image's OatFile is up-to-date relative to its DexFile + // inputs. Otherwise (for /data), validate the inputs and generate + // the OatFile in /data/dalvik-cache if necessary. + static ImageSpace* Init(const std::string& image, bool validate_oat_file) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + OatFile* OpenOatFile() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool ValidateOatFile() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + friend class Space; static size_t bitmap_index_; @@ -71,6 +103,11 @@ class ImageSpace : public MemMapSpace { ImageSpace(const std::string& name, MemMap* mem_map); + // The OatFile associated with the image during early startup to + // reserve space contiguous to the image. It is later released to + // the ClassLinker during it's initialization. + UniquePtr oat_file_; + DISALLOW_COPY_AND_ASSIGN(ImageSpace); }; diff --git a/runtime/oat_file.h b/runtime/oat_file.h index e3fd0025f0..ecc8d0c965 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -166,18 +166,25 @@ class OatFile { class OatDexFile { public: + // Opens the DexFile referred to by this OatDexFile from within the containing OatFile. const DexFile* OpenDexFile() const; - const OatClass* GetOatClass(uint32_t class_def_index) const; + + // Returns the size of the DexFile refered to by this OatDexFile. size_t FileSize() const; + // Returns original path of DexFile that was the source of this OatDexFile. const std::string& GetDexFileLocation() const { return dex_file_location_; } + // Returns checksum of original DexFile that was the source of this OatDexFile; uint32_t GetDexFileLocationChecksum() const { return dex_file_location_checksum_; } + // Returns the OatClass for the class specified by the given DexFile class_def_index. + const OatClass* GetOatClass(uint32_t class_def_index) const; + ~OatDexFile(); private: -- cgit v1.2.3-59-g8ed1b From 2ce745c06271d5223d57dbf08117b20d5b60694a Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 17:44:30 -0700 Subject: Fix cpplint whitespace/braces issues Change-Id: Ide80939faf8e8690d8842dde8133902ac725ed1a --- Android.mk | 2 +- compiler/dex/arena_allocator.h | 2 +- compiler/dex/arena_bit_vector.cc | 6 +- compiler/dex/dataflow_iterator.h | 2 +- compiler/dex/frontend.cc | 2 +- compiler/dex/local_value_numbering.cc | 3 +- compiler/dex/mir_dataflow.cc | 45 +++----- compiler/dex/mir_graph.cc | 71 ++++-------- compiler/dex/mir_graph.h | 18 +-- compiler/dex/mir_optimization.cc | 69 ++++------- compiler/dex/portable/mir_to_gbc.cc | 180 ++++++++++------------------- compiler/dex/quick/arm/assemble_arm.cc | 6 +- compiler/dex/quick/arm/call_arm.cc | 57 ++++------ compiler/dex/quick/arm/fp_arm.cc | 21 ++-- compiler/dex/quick/arm/int_arm.cc | 114 +++++++------------ compiler/dex/quick/arm/target_arm.cc | 105 ++++++----------- compiler/dex/quick/arm/utility_arm.cc | 99 ++++++---------- compiler/dex/quick/codegen_util.cc | 111 ++++++------------ compiler/dex/quick/gen_common.cc | 107 ++++++----------- compiler/dex/quick/gen_invoke.cc | 81 +++++-------- compiler/dex/quick/gen_loadstore.cc | 42 +++---- compiler/dex/quick/local_optimizations.cc | 18 +-- compiler/dex/quick/mips/assemble_mips.cc | 9 +- compiler/dex/quick/mips/call_mips.cc | 30 ++--- compiler/dex/quick/mips/fp_mips.cc | 24 ++-- compiler/dex/quick/mips/int_mips.cc | 99 ++++++---------- compiler/dex/quick/mips/target_mips.cc | 105 ++++++----------- compiler/dex/quick/mips/utility_mips.cc | 81 +++++-------- compiler/dex/quick/mir_to_lir.cc | 15 +-- compiler/dex/quick/mir_to_lir.h | 2 +- compiler/dex/quick/ralloc_util.cc | 183 ++++++++++-------------------- compiler/dex/quick/x86/call_x86.cc | 27 ++--- compiler/dex/quick/x86/fp_x86.cc | 6 +- compiler/dex/quick/x86/int_x86.cc | 99 ++++++---------- compiler/dex/quick/x86/target_x86.cc | 81 +++++-------- compiler/dex/quick/x86/utility_x86.cc | 60 ++++------ compiler/dex/ssa_transformation.cc | 60 ++++------ compiler/dex/vreg_analysis.cc | 9 +- compiler/driver/compiler_driver.cc | 13 +-- compiler/image_writer.cc | 2 +- compiler/jni/jni_compiler_test.cc | 2 +- compiler/llvm/runtime_support_builder.cc | 3 +- jdwpspy/Common.h | 12 +- runtime/base/mutex.cc | 2 +- runtime/common_throws.cc | 4 +- runtime/compiled_method.cc | 6 +- runtime/dex_file.cc | 2 +- runtime/dex_instruction.h | 2 +- runtime/gc/space/large_object_space.cc | 5 +- runtime/interpreter/interpreter.cc | 2 +- runtime/mirror/abstract_method.cc | 2 +- runtime/mirror/class-inl.h | 6 +- runtime/native/dalvik_system_Zygote.cc | 2 +- runtime/oat/runtime/support_jni.cc | 2 +- runtime/oat/runtime/x86/context_x86.cc | 2 +- runtime/oat_file.cc | 3 +- runtime/runtime.cc | 2 +- runtime/thread_pool.cc | 4 +- 58 files changed, 719 insertions(+), 1410 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/Android.mk b/Android.mk index 5a28723e8e..4e4928c022 100644 --- a/Android.mk +++ b/Android.mk @@ -334,7 +334,7 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard, \ + --filter=-,+build/header_guard,+whitespace/braces \ $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) # "mm cpplint-art-aspirational" to see warnings we would like to fix diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 23d6b9f06b..cc81e50c5b 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -86,7 +86,7 @@ struct MemStats { void Dump(std::ostream& os) const { arena_.DumpMemStats(os); } - MemStats(const ArenaAllocator &arena) : arena_(arena){}; + MemStats(const ArenaAllocator &arena) : arena_(arena) {}; private: const ArenaAllocator &arena_; }; // MemStats diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc index 1fbf7740ac..724fdf81c7 100644 --- a/compiler/dex/arena_bit_vector.cc +++ b/compiler/dex/arena_bit_vector.cc @@ -114,8 +114,7 @@ void ArenaBitVector::Union(const ArenaBitVector* src) { } // Count the number of bits that are set. -int ArenaBitVector::NumSetBits() -{ +int ArenaBitVector::NumSetBits() { unsigned int count = 0; for (unsigned int word = 0; word < storage_size_; word++) { @@ -129,8 +128,7 @@ int ArenaBitVector::NumSetBits() * since there might be unused bits - setting those to one will confuse the * iterator. */ -void ArenaBitVector::SetInitialBits(unsigned int num_bits) -{ +void ArenaBitVector::SetInitialBits(unsigned int num_bits) { DCHECK_LE(((num_bits + 31) >> 5), storage_size_); unsigned int idx; for (idx = 0; idx < (num_bits >> 5); idx++) { diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 4c112f9678..19468698f9 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -42,7 +42,7 @@ namespace art { class DataflowIterator { public: - virtual ~DataflowIterator(){} + virtual ~DataflowIterator() {} // Return the next BasicBlock* to visit. BasicBlock* Next() { diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 746d475a9b..2d7c973859 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -220,7 +220,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, llvm_compilation_unit)); } else #endif - { + { // NOLINT(whitespace/braces) switch (compiler.GetInstructionSet()) { case kThumb2: cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break; diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index ec5ab5db38..b783f3ed52 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -19,8 +19,7 @@ namespace art { -uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) -{ +uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { uint16_t res = NO_VALUE; uint16_t opcode = mir->dalvikInsn.opcode; switch (opcode) { diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 6c152d2fb3..9632388e19 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -849,8 +849,7 @@ int MIRGraph::SRegToVReg(int ssa_reg) const { /* Any register that is used before being defined is considered live-in */ void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v, - ArenaBitVector* live_in_v, int dalvik_reg_id) -{ + ArenaBitVector* live_in_v, int dalvik_reg_id) { use_v->SetBit(dalvik_reg_id); if (!def_v->IsBitSet(dalvik_reg_id)) { live_in_v->SetBit(dalvik_reg_id); @@ -858,8 +857,7 @@ void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v, } /* Mark a reg as being defined */ -void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) -{ +void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) { def_v->SetBit(dalvik_reg_id); } @@ -867,8 +865,7 @@ void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) * Find out live-in variables for natural loops. Variables that are live-in in * the main loop body are considered to be defined in the entry block. */ -bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) -{ +bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) { MIR* mir; ArenaBitVector *use_v, *def_v, *live_in_v; @@ -925,8 +922,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) return true; } -int MIRGraph::AddNewSReg(int v_reg) -{ +int MIRGraph::AddNewSReg(int v_reg) { // Compiler temps always have a subscript of 0 int subscript = (v_reg < 0) ? 0 : ++ssa_last_defs_[v_reg]; int ssa_reg = GetNumSSARegs(); @@ -938,15 +934,13 @@ int MIRGraph::AddNewSReg(int v_reg) } /* Find out the latest SSA register for a given Dalvik register */ -void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) -{ +void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) { DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers)); uses[reg_index] = vreg_to_ssa_map_[dalvik_reg]; } /* Setup a new SSA register for a given Dalvik register */ -void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) -{ +void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) { DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers)); int ssa_reg = AddNewSReg(dalvik_reg); vreg_to_ssa_map_[dalvik_reg] = ssa_reg; @@ -954,8 +948,7 @@ void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) } /* Look up new SSA names for format_35c instructions */ -void MIRGraph::DataFlowSSAFormat35C(MIR* mir) -{ +void MIRGraph::DataFlowSSAFormat35C(MIR* mir) { DecodedInstruction *d_insn = &mir->dalvikInsn; int num_uses = d_insn->vA; int i; @@ -973,8 +966,7 @@ void MIRGraph::DataFlowSSAFormat35C(MIR* mir) } /* Look up new SSA names for format_3rc instructions */ -void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) -{ +void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) { DecodedInstruction *d_insn = &mir->dalvikInsn; int num_uses = d_insn->vA; int i; @@ -992,8 +984,7 @@ void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) } /* Entry function to convert a block into SSA representation */ -bool MIRGraph::DoSSAConversion(BasicBlock* bb) -{ +bool MIRGraph::DoSSAConversion(BasicBlock* bb) { MIR* mir; if (bb->data_flow_info == NULL) return false; @@ -1127,8 +1118,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) } /* Setup the basic data structures for SSA conversion */ -void MIRGraph::CompilerInitializeSSAConversion() -{ +void MIRGraph::CompilerInitializeSSAConversion() { size_t num_dalvik_reg = cu_->num_dalvik_registers; ssa_base_vregs_ = new (arena_) GrowableArray(arena_, num_dalvik_reg + GetDefCount() + 128, @@ -1196,8 +1186,7 @@ void MIRGraph::CompilerInitializeSSAConversion() * and attempting to do would involve more complexity than it's * worth. */ -bool MIRGraph::InvokeUsesMethodStar(MIR* mir) -{ +bool MIRGraph::InvokeUsesMethodStar(MIR* mir) { InvokeType type; Instruction::Code opcode = mir->dalvikInsn.opcode; switch (opcode) { @@ -1246,8 +1235,7 @@ bool MIRGraph::InvokeUsesMethodStar(MIR* mir) * counts explicitly used s_regs. A later phase will add implicit * counts for things such as Method*, null-checked references, etc. */ -bool MIRGraph::CountUses(struct BasicBlock* bb) -{ +bool MIRGraph::CountUses(struct BasicBlock* bb) { if (bb->block_type != kDalvikByteCode) { return false; } @@ -1286,8 +1274,7 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) return false; } -void MIRGraph::MethodUseCount() -{ +void MIRGraph::MethodUseCount() { // Now that we know, resize the lists. int num_ssa_regs = GetNumSSARegs(); use_counts_.Resize(num_ssa_regs + 32); @@ -1307,8 +1294,7 @@ void MIRGraph::MethodUseCount() } /* Verify if all the successor is connected with all the claimed predecessors */ -bool MIRGraph::VerifyPredInfo(BasicBlock* bb) -{ +bool MIRGraph::VerifyPredInfo(BasicBlock* bb) { GrowableArray::Iterator iter(bb->predecessors); while (true) { @@ -1343,8 +1329,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) return true; } -void MIRGraph::VerifyDataflow() -{ +void MIRGraph::VerifyDataflow() { /* Verify if all blocks are connected as claimed */ AllNodesIterator iter(this, false /* not iterative */); for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) { diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 11e100dc61..ef9955e585 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -107,8 +107,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) method_sreg_(0), attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke. checkstats_(NULL), - arena_(arena) - { + arena_(arena) { try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); } @@ -129,8 +128,7 @@ bool MIRGraph::ContentIsInsn(const uint16_t* code_ptr) { /* * Parse an instruction, return the length of the instruction */ -int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) -{ +int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) { // Don't parse instruction data if (!ContentIsInsn(code_ptr)) { return 0; @@ -145,8 +143,7 @@ int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_in /* Split an existing block from the specified code offset into two */ BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset, - BasicBlock* orig_block, BasicBlock** immed_pred_block_p) -{ + BasicBlock* orig_block, BasicBlock** immed_pred_block_p) { MIR* insn = orig_block->first_mir_insn; while (insn) { if (insn->offset == code_offset) break; @@ -224,8 +221,7 @@ BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset, * Utilizes a map for fast lookup of the typical cases. */ BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create, - BasicBlock** immed_pred_block_p) -{ + BasicBlock** immed_pred_block_p) { BasicBlock* bb; unsigned int i; SafeMap::iterator it; @@ -260,8 +256,7 @@ BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool creat } /* Identify code range in try blocks and set up the empty catch blocks */ -void MIRGraph::ProcessTryCatchBlocks() -{ +void MIRGraph::ProcessTryCatchBlocks() { int tries_size = current_code_item_->tries_size_; int offset; @@ -296,8 +291,7 @@ void MIRGraph::ProcessTryCatchBlocks() /* Process instructions with the kBranch flag */ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags, const uint16_t* code_ptr, - const uint16_t* code_end) -{ + const uint16_t* code_end) { int target = cur_offset; switch (insn->dalvikInsn.opcode) { case Instruction::GOTO: @@ -365,8 +359,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur /* Process instructions with the kSwitch flag */ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, - int flags) -{ + int flags) { const uint16_t* switch_data = reinterpret_cast(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); int size; @@ -443,8 +436,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset /* Process instructions with the kThrow flag */ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags, ArenaBitVector* try_block_addr, - const uint16_t* code_ptr, const uint16_t* code_end) -{ + const uint16_t* code_ptr, const uint16_t* code_end) { bool in_try_block = try_block_addr->IsBitSet(cur_offset); /* In try block */ @@ -483,7 +475,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_ eh_block->predecessors->Insert(cur_block); } - if (insn->dalvikInsn.opcode == Instruction::THROW){ + if (insn->dalvikInsn.opcode == Instruction::THROW) { cur_block->explicit_throw = true; if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) { // Force creation of new block following THROW via side-effect @@ -529,8 +521,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_ /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t class_def_idx, - uint32_t method_idx, jobject class_loader, const DexFile& dex_file) -{ + uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { current_code_item_ = code_item; method_stack_.push_back(std::make_pair(current_method_, current_offset_)); current_method_ = m_units_.size(); @@ -705,8 +696,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ } } -void MIRGraph::ShowOpcodeStats() -{ +void MIRGraph::ShowOpcodeStats() { DCHECK(opcode_count_ != NULL); LOG(INFO) << "Opcode Count"; for (int i = 0; i < kNumPackedOpcodes; i++) { @@ -719,8 +709,7 @@ void MIRGraph::ShowOpcodeStats() // TODO: use a configurable base prefix, and adjust callers to supply pass name. /* Dump the CFG into a DOT graph */ -void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) -{ +void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) { FILE* file; std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); ReplaceSpecialChars(fname); @@ -849,8 +838,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) } /* Insert an MIR instruction to the end of a basic block */ -void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) -{ +void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) { if (bb->first_mir_insn == NULL) { DCHECK(bb->last_mir_insn == NULL); bb->last_mir_insn = bb->first_mir_insn = mir; @@ -864,8 +852,7 @@ void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) } /* Insert an MIR instruction to the head of a basic block */ -void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) -{ +void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) { if (bb->first_mir_insn == NULL) { DCHECK(bb->last_mir_insn == NULL); bb->last_mir_insn = bb->first_mir_insn = mir; @@ -879,8 +866,7 @@ void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) } /* Insert a MIR instruction after the specified MIR */ -void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) -{ +void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) { new_mir->prev = current_mir; new_mir->next = current_mir->next; current_mir->next = new_mir; @@ -894,8 +880,7 @@ void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) } } -char* MIRGraph::GetDalvikDisassembly(const MIR* mir) -{ +char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { DecodedInstruction insn = mir->dalvikInsn; std::string str; int flags = 0; @@ -1024,8 +1009,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) } /* Turn method name into a legal Linux file name */ -void MIRGraph::ReplaceSpecialChars(std::string& str) -{ +void MIRGraph::ReplaceSpecialChars(std::string& str) { static const struct { const char before; const char after; } match[] = {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'}, {'(','@'}, {')','@'}, {'<','='}, {'>','='}}; @@ -1034,8 +1018,7 @@ void MIRGraph::ReplaceSpecialChars(std::string& str) } } -std::string MIRGraph::GetSSAName(int ssa_reg) -{ +std::string MIRGraph::GetSSAName(int ssa_reg) { // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to // the arena. We should be smarter and just place straight into the arena, or compute the // value more lazily. @@ -1043,8 +1026,7 @@ std::string MIRGraph::GetSSAName(int ssa_reg) } // Similar to GetSSAName, but if ssa name represents an immediate show that as well. -std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) -{ +std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) { if (reg_location_ == NULL) { // Pre-SSA - just use the standard name return GetSSAName(ssa_reg); @@ -1062,8 +1044,7 @@ std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) } } -void MIRGraph::GetBlockName(BasicBlock* bb, char* name) -{ +void MIRGraph::GetBlockName(BasicBlock* bb, char* name) { switch (bb->block_type) { case kEntryBlock: snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id); @@ -1084,16 +1065,14 @@ void MIRGraph::GetBlockName(BasicBlock* bb, char* name) } } -const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) -{ +const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) { // FIXME: use current code unit for inline support. const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx); return cu_->dex_file->GetShorty(method_id.proto_idx_); } /* Debug Utility - dump a compilation unit */ -void MIRGraph::DumpMIRGraph() -{ +void MIRGraph::DumpMIRGraph() { BasicBlock* bb; const char* block_type_names[] = { "Entry Block", @@ -1135,8 +1114,7 @@ void MIRGraph::DumpMIRGraph() * MOVE_RESULT and incorporate it into the invoke. */ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, - bool is_range) -{ + bool is_range) { CallInfo* info = static_cast(arena_->NewMem(sizeof(CallInfo), true, ArenaAllocator::kAllocMisc)); MIR* move_result_mir = FindMoveResult(bb, mir); @@ -1163,8 +1141,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, } // Allocate a new basic block. -BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) -{ +BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { BasicBlock* bb = static_cast(arena_->NewMem(sizeof(BasicBlock), true, ArenaAllocator::kAllocBB)); bb->block_type = block_type; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index a40fa97ad5..f86e13016d 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -452,43 +452,37 @@ class MIRGraph { return ssa_subscripts_->Get(ssa_reg); } - RegLocation GetRawSrc(MIR* mir, int num) - { + RegLocation GetRawSrc(MIR* mir, int num) { DCHECK(num < mir->ssa_rep->num_uses); RegLocation res = reg_location_[mir->ssa_rep->uses[num]]; return res; } - RegLocation GetRawDest(MIR* mir) - { + RegLocation GetRawDest(MIR* mir) { DCHECK_GT(mir->ssa_rep->num_defs, 0); RegLocation res = reg_location_[mir->ssa_rep->defs[0]]; return res; } - RegLocation GetDest(MIR* mir) - { + RegLocation GetDest(MIR* mir) { RegLocation res = GetRawDest(mir); DCHECK(!res.wide); return res; } - RegLocation GetSrc(MIR* mir, int num) - { + RegLocation GetSrc(MIR* mir, int num) { RegLocation res = GetRawSrc(mir, num); DCHECK(!res.wide); return res; } - RegLocation GetDestWide(MIR* mir) - { + RegLocation GetDestWide(MIR* mir) { RegLocation res = GetRawDest(mir); DCHECK(res.wide); return res; } - RegLocation GetSrcWide(MIR* mir, int low) - { + RegLocation GetSrcWide(MIR* mir, int low) { RegLocation res = GetRawSrc(mir, low); DCHECK(res.wide); return res; diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 6b8f3f0915..306dbc7b6b 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -20,27 +20,23 @@ namespace art { -static unsigned int Predecessors(BasicBlock* bb) -{ +static unsigned int Predecessors(BasicBlock* bb) { return bb->predecessors->Size(); } /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */ -void MIRGraph::SetConstant(int32_t ssa_reg, int value) -{ +void MIRGraph::SetConstant(int32_t ssa_reg, int value) { is_constant_v_->SetBit(ssa_reg); constant_values_[ssa_reg] = value; } -void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) -{ +void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) { is_constant_v_->SetBit(ssa_reg); constant_values_[ssa_reg] = Low32Bits(value); constant_values_[ssa_reg + 1] = High32Bits(value); } -void MIRGraph::DoConstantPropogation(BasicBlock* bb) -{ +void MIRGraph::DoConstantPropogation(BasicBlock* bb) { MIR* mir; for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { @@ -96,8 +92,7 @@ void MIRGraph::DoConstantPropogation(BasicBlock* bb) /* TODO: implement code to handle arithmetic operations */ } -void MIRGraph::PropagateConstants() -{ +void MIRGraph::PropagateConstants() { is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false); constant_values_ = static_cast(arena_->NewMem(sizeof(int) * GetNumSSARegs(), true, ArenaAllocator::kAllocDFInfo)); @@ -108,8 +103,7 @@ void MIRGraph::PropagateConstants() } /* Advance to next strictly dominated MIR node in an extended basic block */ -static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) -{ +static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) { BasicBlock* bb = *p_bb; if (mir != NULL) { mir = mir->next; @@ -133,8 +127,7 @@ static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) * opcodes or incoming arcs. However, if the result of the invoke is not * used, a move-result may not be present. */ -MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) -{ +MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) { BasicBlock* tbb = bb; mir = AdvanceMIR(&tbb, mir); while (mir != NULL) { @@ -154,8 +147,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) return mir; } -static BasicBlock* NextDominatedBlock(BasicBlock* bb) -{ +static BasicBlock* NextDominatedBlock(BasicBlock* bb) { if (bb->block_type == kDead) { return NULL; } @@ -169,8 +161,7 @@ static BasicBlock* NextDominatedBlock(BasicBlock* bb) return bb; } -static MIR* FindPhi(BasicBlock* bb, int ssa_name) -{ +static MIR* FindPhi(BasicBlock* bb, int ssa_name) { for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { if (static_cast(mir->dalvikInsn.opcode) == kMirOpPhi) { for (int i = 0; i < mir->ssa_rep->num_uses; i++) { @@ -183,8 +174,7 @@ static MIR* FindPhi(BasicBlock* bb, int ssa_name) return NULL; } -static SelectInstructionKind SelectKind(MIR* mir) -{ +static SelectInstructionKind SelectKind(MIR* mir) { switch (mir->dalvikInsn.opcode) { case Instruction::MOVE: case Instruction::MOVE_OBJECT: @@ -206,15 +196,13 @@ static SelectInstructionKind SelectKind(MIR* mir) return kSelectNone; } -int MIRGraph::GetSSAUseCount(int s_reg) -{ +int MIRGraph::GetSSAUseCount(int s_reg) { return raw_use_counts_.Get(s_reg); } /* Do some MIR-level extended basic block optimizations */ -bool MIRGraph::BasicBlockOpt(BasicBlock* bb) -{ +bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { if (bb->block_type == kDead) { return true; } @@ -474,8 +462,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) return true; } -void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) -{ +void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) { if (bb->data_flow_info != NULL) { bb->data_flow_info->ending_null_check_v = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapNullCheck); @@ -483,8 +470,7 @@ void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) } /* Collect stats on number of checks removed */ -void MIRGraph::CountChecks(struct BasicBlock* bb) -{ +void MIRGraph::CountChecks(struct BasicBlock* bb) { if (bb->data_flow_info != NULL) { for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { if (mir->ssa_rep == NULL) { @@ -508,8 +494,7 @@ void MIRGraph::CountChecks(struct BasicBlock* bb) } /* Try to make common case the fallthrough path */ -static bool LayoutBlocks(struct BasicBlock* bb) -{ +static bool LayoutBlocks(struct BasicBlock* bb) { // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback if (!bb->explicit_throw) { return false; @@ -556,8 +541,7 @@ static bool LayoutBlocks(struct BasicBlock* bb) } /* Combine any basic blocks terminated by instructions that we now know can't throw */ -bool MIRGraph::CombineBlocks(struct BasicBlock* bb) -{ +bool MIRGraph::CombineBlocks(struct BasicBlock* bb) { // Loop here to allow combining a sequence of blocks while (true) { // Check termination conditions @@ -625,8 +609,7 @@ bool MIRGraph::CombineBlocks(struct BasicBlock* bb) } /* Eliminate unnecessary null checks for a basic block. */ -bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) -{ +bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { if (bb->data_flow_info == NULL) return false; /* @@ -770,8 +753,7 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) return changed; } -void MIRGraph::NullCheckElimination() -{ +void MIRGraph::NullCheckElimination() { if (!(cu_->disable_opt & (1 << kNullCheckElimination))) { DCHECK(temp_ssa_register_v_ != NULL); AllNodesIterator iter(this, false /* not iterative */); @@ -789,8 +771,7 @@ void MIRGraph::NullCheckElimination() } } -void MIRGraph::BasicBlockCombine() -{ +void MIRGraph::BasicBlockCombine() { PreOrderDfsIterator iter(this, false /* not iterative */); for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) { CombineBlocks(bb); @@ -800,8 +781,7 @@ void MIRGraph::BasicBlockCombine() } } -void MIRGraph::CodeLayout() -{ +void MIRGraph::CodeLayout() { if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) { VerifyDataflow(); } @@ -814,8 +794,7 @@ void MIRGraph::CodeLayout() } } -void MIRGraph::DumpCheckStats() -{ +void MIRGraph::DumpCheckStats() { Checkstats* stats = static_cast(arena_->NewMem(sizeof(Checkstats), true, ArenaAllocator::kAllocDFInfo)); @@ -840,8 +819,7 @@ void MIRGraph::DumpCheckStats() } } -bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) -{ +bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) { if (bb->visited) return false; if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock))) { @@ -871,8 +849,7 @@ bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) } -void MIRGraph::BasicBlockOptimization() -{ +void MIRGraph::BasicBlockOptimization() { if (!(cu_->disable_opt & (1 << kBBOpt))) { DCHECK_EQ(cu_->num_compiler_temps, 0); ClearAllVisitedFlags(); diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 2be1ef435b..4317d1e354 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -42,18 +42,15 @@ const char kCatchBlock = 'C'; namespace art { -::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id) -{ +::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id) { return id_to_block_map_.Get(id); } -::llvm::Value* MirConverter::GetLLVMValue(int s_reg) -{ +::llvm::Value* MirConverter::GetLLVMValue(int s_reg) { return llvm_values_.Get(s_reg); } -void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) -{ +void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) { // Set vreg for debugging art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::SetVReg; ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id); @@ -64,8 +61,7 @@ void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) } // Replace the placeholder value with the real definition -void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) -{ +void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) { ::llvm::Value* placeholder = GetLLVMValue(s_reg); if (placeholder == NULL) { // This can happen on instruction rewrite on verification failure @@ -81,14 +77,12 @@ void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) } -void MirConverter::DefineValue(::llvm::Value* val, int s_reg) -{ +void MirConverter::DefineValue(::llvm::Value* val, int s_reg) { DefineValueOnly(val, s_reg); SetVregOnValue(val, s_reg); } -::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc) -{ +::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc) { ::llvm::Type* res = NULL; if (loc.wide) { if (loc.fp) @@ -108,8 +102,7 @@ void MirConverter::DefineValue(::llvm::Value* val, int s_reg) return res; } -void MirConverter::InitIR() -{ +void MirConverter::InitIR() { if (llvm_info_ == NULL) { CompilerTls* tls = cu_->compiler_driver->GetTls(); CHECK(tls != NULL); @@ -125,16 +118,14 @@ void MirConverter::InitIR() irb_ = llvm_info_->GetIRBuilder(); } -::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr) -{ +::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr) { BasicBlock* bb = mir_graph_->FindBlock(vaddr); DCHECK(bb != NULL); return GetLLVMBlock(bb->id); } void MirConverter::ConvertPackedSwitch(BasicBlock* bb, - int32_t table_offset, RegLocation rl_src) -{ + int32_t table_offset, RegLocation rl_src) { const Instruction::PackedSwitchPayload* payload = reinterpret_cast( cu_->insns + current_dalvik_offset_ + table_offset); @@ -158,8 +149,7 @@ void MirConverter::ConvertPackedSwitch(BasicBlock* bb, } void MirConverter::ConvertSparseSwitch(BasicBlock* bb, - int32_t table_offset, RegLocation rl_src) -{ + int32_t table_offset, RegLocation rl_src) { const Instruction::SparseSwitchPayload* payload = reinterpret_cast( cu_->insns + current_dalvik_offset_ + table_offset); @@ -186,8 +176,7 @@ void MirConverter::ConvertSparseSwitch(BasicBlock* bb, } void MirConverter::ConvertSget(int32_t field_index, - art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) -{ + art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) { ::llvm::Constant* field_idx = irb_->getInt32(field_index); ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* res = irb_->CreateCall(intr, field_idx); @@ -195,8 +184,7 @@ void MirConverter::ConvertSget(int32_t field_index, } void MirConverter::ConvertSput(int32_t field_index, - art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src) -{ + art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src) { ::llvm::SmallVector< ::llvm::Value*, 2> args; args.push_back(irb_->getInt32(field_index)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -204,8 +192,7 @@ void MirConverter::ConvertSput(int32_t field_index, irb_->CreateCall(intr, args); } -void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) -{ +void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::HLFillArrayData; ::llvm::SmallVector< ::llvm::Value*, 2> args; @@ -216,8 +203,7 @@ void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) } ::llvm::Value* MirConverter::EmitConst(::llvm::ArrayRef< ::llvm::Value*> src, - RegLocation loc) -{ + RegLocation loc) { art::llvm::IntrinsicHelper::IntrinsicId id; if (loc.wide) { if (loc.fp) { @@ -238,16 +224,14 @@ void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) return irb_->CreateCall(intr, src); } -void MirConverter::EmitPopShadowFrame() -{ +void MirConverter::EmitPopShadowFrame() { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction( art::llvm::IntrinsicHelper::PopShadowFrame); irb_->CreateCall(intr); } ::llvm::Value* MirConverter::EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src, - RegLocation loc) -{ + RegLocation loc) { art::llvm::IntrinsicHelper::IntrinsicId id; if (loc.wide) { if (loc.fp) { @@ -268,16 +252,14 @@ void MirConverter::EmitPopShadowFrame() return irb_->CreateCall(intr, src); } -void MirConverter::ConvertMoveException(RegLocation rl_dest) -{ +void MirConverter::ConvertMoveException(RegLocation rl_dest) { ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction( art::llvm::IntrinsicHelper::GetException); ::llvm::Value* res = irb_->CreateCall(func); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertThrow(RegLocation rl_src) -{ +void MirConverter::ConvertThrow(RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction( art::llvm::IntrinsicHelper::HLThrowException); @@ -286,8 +268,7 @@ void MirConverter::ConvertThrow(RegLocation rl_src) void MirConverter::ConvertMonitorEnterExit(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_src) -{ + RegLocation rl_src) { ::llvm::SmallVector< ::llvm::Value*, 2> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -296,8 +277,7 @@ void MirConverter::ConvertMonitorEnterExit(int opt_flags, } void MirConverter::ConvertArrayLength(int opt_flags, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { ::llvm::SmallVector< ::llvm::Value*, 2> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -307,8 +287,7 @@ void MirConverter::ConvertArrayLength(int opt_flags, DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::EmitSuspendCheck() -{ +void MirConverter::EmitSuspendCheck() { art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::CheckSuspend; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -316,8 +295,7 @@ void MirConverter::EmitSuspendCheck() } ::llvm::Value* MirConverter::ConvertCompare(ConditionCode cc, - ::llvm::Value* src1, ::llvm::Value* src2) -{ + ::llvm::Value* src1, ::llvm::Value* src2) { ::llvm::Value* res = NULL; DCHECK_EQ(src1->getType(), src2->getType()); switch(cc) { @@ -333,8 +311,7 @@ void MirConverter::EmitSuspendCheck() } void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, - ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) -{ + ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) { if (bb->taken->start_offset <= mir->offset) { EmitSuspendCheck(); } @@ -349,8 +326,7 @@ void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, } void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, - MIR* mir, ConditionCode cc, RegLocation rl_src1) -{ + MIR* mir, ConditionCode cc, RegLocation rl_src1) { if (bb->taken->start_offset <= mir->offset) { EmitSuspendCheck(); } @@ -369,8 +345,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, } ::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long, - ::llvm::Value* src1, ::llvm::Value* src2) -{ + ::llvm::Value* src1, ::llvm::Value* src2) { art::llvm::IntrinsicHelper::IntrinsicId id; if (is_long) { if (is_div) { @@ -393,8 +368,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, } ::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long, - ::llvm::Value* src1, ::llvm::Value* src2) -{ + ::llvm::Value* src1, ::llvm::Value* src2) { ::llvm::Value* res = NULL; switch(op) { case kOpAdd: res = irb_->CreateAdd(src1, src2); break; @@ -416,8 +390,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, } void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg); ::llvm::Value* res = NULL; @@ -434,8 +407,7 @@ void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest, } void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::SmallVector< ::llvm::Value*, 2>args; args.push_back(GetLLVMValue(rl_src1.orig_sreg)); @@ -445,8 +417,7 @@ void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id, } void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_src, int shift_amount) -{ + RegLocation rl_dest, RegLocation rl_src, int shift_amount) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::SmallVector< ::llvm::Value*, 2>args; args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -456,8 +427,7 @@ void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id, } void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg); DCHECK_EQ(src1->getType(), src2->getType()); @@ -466,8 +436,7 @@ void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest, } void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest, - RegLocation rl_src1, int32_t imm) -{ + RegLocation rl_src1, int32_t imm) { ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = irb_->getInt32(imm); ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2); @@ -480,8 +449,7 @@ void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest, * The requirements are similar. */ void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir, - InvokeType invoke_type, bool is_range, bool is_filled_new_array) -{ + InvokeType invoke_type, bool is_range, bool is_filled_new_array) { CallInfo* info = mir_graph_->NewMemCallInfo(bb, mir, invoke_type, is_range); ::llvm::SmallVector< ::llvm::Value*, 10> args; // Insert the invoke_type @@ -529,16 +497,14 @@ void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir, } void MirConverter::ConvertConstObject(uint32_t idx, - art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) -{ + art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* index = irb_->getInt32(idx); ::llvm::Value* res = irb_->CreateCall(intr, index); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) -{ +void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::HLCheckCast; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -548,8 +514,7 @@ void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) irb_->CreateCall(intr, args); } -void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) -{ +void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::NewInstance; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -559,8 +524,7 @@ void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) } void MirConverter::ConvertNewArray(uint32_t type_idx, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::NewArray; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -573,8 +537,7 @@ void MirConverter::ConvertNewArray(uint32_t type_idx, void MirConverter::ConvertAget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index) -{ + RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index) { ::llvm::SmallVector< ::llvm::Value*, 3> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_array.orig_sreg)); @@ -586,8 +549,7 @@ void MirConverter::ConvertAget(int opt_flags, void MirConverter::ConvertAput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_src, RegLocation rl_array, RegLocation rl_index) -{ + RegLocation rl_src, RegLocation rl_array, RegLocation rl_index) { ::llvm::SmallVector< ::llvm::Value*, 4> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -599,8 +561,7 @@ void MirConverter::ConvertAput(int opt_flags, void MirConverter::ConvertIget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_obj, int field_index) -{ + RegLocation rl_dest, RegLocation rl_obj, int field_index) { ::llvm::SmallVector< ::llvm::Value*, 3> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_obj.orig_sreg)); @@ -612,8 +573,7 @@ void MirConverter::ConvertIget(int opt_flags, void MirConverter::ConvertIput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_src, RegLocation rl_obj, int field_index) -{ + RegLocation rl_src, RegLocation rl_obj, int field_index) { ::llvm::SmallVector< ::llvm::Value*, 4> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -624,8 +584,7 @@ void MirConverter::ConvertIput(int opt_flags, } void MirConverter::ConvertInstanceOf(uint32_t type_idx, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::InstanceOf; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -636,29 +595,25 @@ void MirConverter::ConvertInstanceOf(uint32_t type_idx, DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* res = irb_->CreateSExt(GetLLVMValue(rl_src.orig_sreg), irb_->getInt64Ty()); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateTrunc(src, irb_->getInt32Ty()); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateFPExt(src, irb_->getDoubleTy()); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateFPTrunc(src, irb_->getFloatTy()); DefineValue(res, rl_dest.orig_sreg); @@ -666,8 +621,7 @@ void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { DCHECK_EQ(rl_src1.fp, rl_src2.fp); DCHECK_EQ(rl_src1.wide, rl_src2.wide); ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -679,23 +633,20 @@ void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId } void MirConverter::ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src, - art::llvm::IntrinsicHelper::IntrinsicId id) -{ + art::llvm::IntrinsicHelper::IntrinsicId id) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* res = irb_->CreateNeg(GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { ::llvm::Value* res = irb_->CreateSIToFP(GetLLVMValue(rl_src.orig_sreg), ty); DefineValue(res, rl_dest.orig_sreg); @@ -703,23 +654,20 @@ void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, void MirConverter::ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* res = irb_->CreateFNeg(GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateXor(src, static_cast(-1)); DefineValue(res, rl_dest.orig_sreg); @@ -737,8 +685,7 @@ void MirConverter::EmitConstructorBarrier() { * when necessary. */ bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb, - ::llvm::BasicBlock* llvm_bb) -{ + ::llvm::BasicBlock* llvm_bb) { bool res = false; // Assume success RegLocation rl_src[3]; RegLocation rl_dest = mir_graph_->GetBadLoc(); @@ -1556,8 +1503,7 @@ bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb, return res; } -void MirConverter::SetDexOffset(int32_t offset) -{ +void MirConverter::SetDexOffset(int32_t offset) { current_dalvik_offset_ = offset; ::llvm::SmallVector< ::llvm::Value*, 1> array_ref; array_ref.push_back(irb_->getInt32(offset)); @@ -1566,8 +1512,7 @@ void MirConverter::SetDexOffset(int32_t offset) } // Attach method info as metadata to special intrinsic -void MirConverter::SetMethodInfo() -{ +void MirConverter::SetMethodInfo() { // We don't want dex offset on this irb_->SetDexOffset(NULL); art::llvm::IntrinsicHelper::IntrinsicId id; @@ -1585,8 +1530,7 @@ void MirConverter::SetMethodInfo() SetDexOffset(current_dalvik_offset_); } -void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) -{ +void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) { SetDexOffset(bb->start_offset); for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { int opcode = mir->dalvikInsn.opcode; @@ -1636,8 +1580,7 @@ void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) /* Extended MIR instructions like PHI */ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, - ::llvm::BasicBlock* llvm_bb) -{ + ::llvm::BasicBlock* llvm_bb) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpPhi: { @@ -1684,8 +1627,7 @@ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, } /* Handle the content in each basic block */ -bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) -{ +bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { if (bb->block_type == kDead) return false; ::llvm::BasicBlock* llvm_bb = GetLLVMBlock(bb->id); if (llvm_bb == NULL) { @@ -1901,8 +1843,7 @@ bool MirConverter::CreateFunction() { return true; } -bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) -{ +bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) { // Skip the exit block if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) { id_to_block_map_.Put(bb->id, NULL); @@ -1933,8 +1874,7 @@ bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) * o Iterate through the MIR a basic block at a time, setting arguments * to recovered ssa name. */ -void MirConverter::MethodMIR2Bitcode() -{ +void MirConverter::MethodMIR2Bitcode() { InitIR(); // Create the function diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index e804215c11..9e144579db 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1002,8 +1002,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { * discover that pc-relative displacements may not fit the selected * instruction. */ -AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) -{ +AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) { LIR* lir; AssemblerStatus res = kSuccess; // Assume success @@ -1389,8 +1388,7 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) return res; } -int ArmMir2Lir::GetInsnSize(LIR* lir) -{ +int ArmMir2Lir::GetInsnSize(LIR* lir) { return EncodingMap[lir->opcode].size; } diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index a6720ce6f2..0e813247db 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -25,8 +25,7 @@ namespace art { /* Return the position of an ssa name within the argument list */ -int ArmMir2Lir::InPosition(int s_reg) -{ +int ArmMir2Lir::InPosition(int s_reg) { int v_reg = mir_graph_->SRegToVReg(s_reg); return v_reg - cu_->num_regs; } @@ -36,8 +35,7 @@ int ArmMir2Lir::InPosition(int s_reg) * there. NOTE: all live arg registers must be locked prior to this call * to avoid having them allocated as a temp by downstream utilities. */ -RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) -{ +RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) { int arg_num = InPosition(loc.s_reg_low); if (loc.wide) { if (arg_num == 2) { @@ -66,8 +64,7 @@ RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) * the frame, we can't use the normal LoadValue() because it assumed * a proper frame - and we're frameless. */ -RegLocation ArmMir2Lir::LoadArg(RegLocation loc) -{ +RegLocation ArmMir2Lir::LoadArg(RegLocation loc) { if (loc.location == kLocDalvikFrame) { int start = (InPosition(loc.s_reg_low) + 1) * sizeof(uint32_t); loc.low_reg = AllocTemp(); @@ -82,8 +79,7 @@ RegLocation ArmMir2Lir::LoadArg(RegLocation loc) } /* Lock any referenced arguments that arrive in registers */ -void ArmMir2Lir::LockLiveArgs(MIR* mir) -{ +void ArmMir2Lir::LockLiveArgs(MIR* mir) { int first_in = cu_->num_regs; const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc for (int i = 0; i < mir->ssa_rep->num_uses; i++) { @@ -97,8 +93,7 @@ void ArmMir2Lir::LockLiveArgs(MIR* mir) /* Find the next MIR, which may be in a following basic block */ // TODO: should this be a utility in mir_graph? -MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) -{ +MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) { BasicBlock* bb = *p_bb; MIR* orig_mir = mir; while (bb != NULL) { @@ -123,8 +118,7 @@ MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) /* Used for the "verbose" listing */ //TODO: move to common code -void ArmMir2Lir::GenPrintLabel(MIR* mir) -{ +void ArmMir2Lir::GenPrintLabel(MIR* mir) { /* Mark the beginning of a Dalvik instruction for line tracking */ char* inst_str = cu_->verbose ? mir_graph_->GetDalvikDisassembly(mir) : NULL; @@ -132,8 +126,7 @@ void ArmMir2Lir::GenPrintLabel(MIR* mir) } MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir, - OpSize size, bool long_or_double, bool is_object) -{ + OpSize size, bool long_or_double, bool is_object) { int field_offset; bool is_volatile; uint32_t field_idx = mir->dalvikInsn.vC; @@ -158,8 +151,7 @@ MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir, } MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir, - OpSize size, bool long_or_double, bool is_object) -{ + OpSize size, bool long_or_double, bool is_object) { int field_offset; bool is_volatile; uint32_t field_idx = mir->dalvikInsn.vC; @@ -192,8 +184,7 @@ MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir, return GetNextMir(bb, mir); } -MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) -{ +MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) { RegLocation rl_src; RegLocation rl_dest; bool wide = (mir->ssa_rep->num_uses == 2); @@ -225,8 +216,7 @@ MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) * Special-case code genration for simple non-throwing leaf methods. */ void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - SpecialCaseHandler special_case) -{ + SpecialCaseHandler special_case) { current_dalvik_offset_ = mir->offset; MIR* next_mir = NULL; switch (special_case) { @@ -319,8 +309,7 @@ void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, * cbnz r_idx, lp */ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpSparseSwitchTable(table); @@ -369,8 +358,7 @@ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpPackedSwitchTable(table); @@ -427,8 +415,7 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, * * Total size is 4+(width * size + 1)/2 16-bit code units. */ -void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) -{ +void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; // Add the table to the list - we'll process it later FillArrayData *tab_rec = @@ -480,8 +467,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) * preserved. * */ -void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) -{ +void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); DCHECK_EQ(LW_SHAPE_THIN, 0); LoadValueDirectFixed(rl_src, r0); // Get obj @@ -515,8 +501,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) * a zero recursion count, it's safe to punch it back to the * initial, unlock thin state with a store word. */ -void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) -{ +void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { DCHECK_EQ(LW_SHAPE_THIN, 0); FlushAllRegs(); LoadValueDirectFixed(rl_src, r0); // Get obj @@ -541,8 +526,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) GenMemBarrier(kStoreLoad); } -void ArmMir2Lir::GenMoveException(RegLocation rl_dest) -{ +void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int reset_reg = AllocTemp(); @@ -556,8 +540,7 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) /* * Mark garbage collection card. Skip if the value we're storing is null. */ -void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) -{ +void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { int reg_card_base = AllocTemp(); int reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); @@ -571,8 +554,7 @@ void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) FreeTemp(reg_card_no); } -void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) -{ +void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { int spill_count = num_core_spills_ + num_fp_spills_; /* * On entry, r0, r1, r2 & r3 are live. Let the register allocation @@ -624,8 +606,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) FreeTemp(r3); } -void ArmMir2Lir::GenExitSequence() -{ +void ArmMir2Lir::GenExitSequence() { int spill_count = num_core_spills_ + num_fp_spills_; /* * In the exit path, r0/r1 are live - make sure they aren't diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 53a5e1a6dc..2c626a0e8f 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -21,8 +21,7 @@ namespace art { void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { int op = kThumbBkpt; RegLocation rl_result; @@ -68,8 +67,7 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, } void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { int op = kThumbBkpt; RegLocation rl_result; @@ -117,8 +115,7 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, } void ArmMir2Lir::GenConversion(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { int op = kThumbBkpt; int src_reg; RegLocation rl_result; @@ -176,8 +173,7 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, } void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, - bool is_double) -{ + bool is_double) { LIR* target = &block_label_list_[bb->taken->id]; RegLocation rl_src1; RegLocation rl_src2; @@ -229,8 +225,7 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { bool is_double = false; int default_result = -1; RegLocation rl_result; @@ -288,8 +283,7 @@ void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, StoreValue(rl_dest, rl_result); } -void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); @@ -297,8 +291,7 @@ void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) StoreValue(rl_dest, rl_result); } -void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index feea896e9f..ee2d76c7b7 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -25,8 +25,7 @@ namespace art { LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, - int src2, LIR* target) -{ + int src2, LIR* target) { OpRegReg(kOpCmp, src1, src2); return OpCondBranch(cond, target); } @@ -41,8 +40,7 @@ LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, * met, and an "E" means the instruction is executed if the condition * is not met. */ -LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) -{ +LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) { int mask; int mask3 = 0; int mask2 = 0; @@ -86,8 +84,7 @@ LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) * done: */ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LIR* target1; LIR* target2; rl_src1 = LoadValueWide(rl_src1, kCoreReg); @@ -121,8 +118,7 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, } void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, - int64_t val, ConditionCode ccode) -{ + int64_t val, ConditionCode ccode) { int32_t val_lo = Low32Bits(val); int32_t val_hi = High32Bits(val); DCHECK(ModifiedImmediate(val_lo) >= 0); @@ -180,8 +176,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, OpCmpImmBranch(ccode, low_reg, val_lo, taken); } -void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) -{ +void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { RegLocation rl_result; RegLocation rl_src = mir_graph_->GetSrc(mir, 0); // Temporary debugging code @@ -249,8 +244,7 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) StoreValue(rl_dest, rl_result); } -void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) -{ +void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0); RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); // Normalize such that if either operand is constant, src2 will be constant. @@ -315,8 +309,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) * is responsible for setting branch target field. */ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value, - LIR* target) -{ + LIR* target) { LIR* branch; int mod_imm; ArmConditionCode arm_cond = ArmConditionEncoding(cond); @@ -341,8 +334,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value, return branch; } -LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) -{ +LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) { LIR* res; int opcode; if (ARM_FPREG(r_dest) || ARM_FPREG(r_src)) @@ -362,16 +354,14 @@ LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) return res; } -LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) -{ +LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) { LIR* res = OpRegCopyNoInsert(r_dest, r_src); AppendLIR(res); return res; } void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, - int src_hi) -{ + int src_hi) { bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi); bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi); DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi)); @@ -426,8 +416,7 @@ static const MagicTable magic_table[] = { // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { if ((lit < 0) || (lit >= static_cast(sizeof(magic_table)/sizeof(magic_table[0])))) { return false; } @@ -471,28 +460,24 @@ bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, } LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, - int reg1, int base, int offset, ThrowKind kind) -{ + int reg1, int base, int offset, ThrowKind kind) { LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm"; return NULL; } RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, - bool is_div) -{ + bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm"; return rl_dest; } RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, - bool is_div) -{ + bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRem for Arm"; return rl_dest; } -bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) -{ +bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { DCHECK_EQ(cu_->instruction_set, kThumb2); RegLocation rl_src1 = info->args[0]; RegLocation rl_src2 = info->args[1]; @@ -509,13 +494,11 @@ bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) return true; } -void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) -{ +void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { LOG(FATAL) << "Unexpected use of OpLea for Arm"; } -void ArmMir2Lir::OpTlsCmp(int offset, int val) -{ +void ArmMir2Lir::OpTlsCmp(int offset, int val) { LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm"; } @@ -577,25 +560,21 @@ bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) { return true; } -LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) -{ +LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) { return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target); } -LIR* ArmMir2Lir::OpVldm(int rBase, int count) -{ +LIR* ArmMir2Lir::OpVldm(int rBase, int count) { return NewLIR3(kThumb2Vldms, rBase, fr0, count); } -LIR* ArmMir2Lir::OpVstm(int rBase, int count) -{ +LIR* ArmMir2Lir::OpVstm(int rBase, int count) { return NewLIR3(kThumb2Vstms, rBase, fr0, count); } void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) -{ + int first_bit, int second_bit) { OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg, EncodeShift(kArmLsl, second_bit - first_bit)); if (first_bit != 0) { @@ -603,8 +582,7 @@ void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, } } -void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) -{ +void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { int t_reg = AllocTemp(); NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0); FreeTemp(t_reg); @@ -612,22 +590,19 @@ void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) } // Test suspend flag, return target of taken suspend branch -LIR* ArmMir2Lir::OpTestSuspend(LIR* target) -{ +LIR* ArmMir2Lir::OpTestSuspend(LIR* target) { NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1); return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); } // Decrement register and branch on condition -LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) -{ +LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) { // Combine sub & test using sub setflags encoding here NewLIR3(kThumb2SubsRRI12, reg, reg, 1); return OpCondBranch(c_code, target); } -void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) -{ +void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #if ANDROID_SMP != 0 int dmb_flavor; // TODO: revisit Arm barrier kinds @@ -646,8 +621,7 @@ void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) #endif } -void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) -{ +void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int z_reg = AllocTemp(); @@ -672,16 +646,14 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) * is not usual for dx to generate, but it is legal (for now). In a future rev of * dex, we'll want to make this case illegal. */ -bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) -{ +bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { DCHECK(rl_src.wide); DCHECK(rl_dest.wide); return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1); } void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { /* * To pull off inline multiply, we have a worst-case requirement of 8 temporary * registers. Normally for Arm, we get 5. We can get to 6 by including @@ -754,32 +726,27 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, } void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenAddLong for Arm"; } void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenSubLong for Arm"; } void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenAndLong for Arm"; } void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenOrLong for Arm"; } void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of genXoLong for Arm"; } @@ -787,8 +754,7 @@ void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, * Generate array load */ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_dest, int scale) -{ + RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -878,8 +844,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, * */ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -968,8 +933,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, * */ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); @@ -1025,8 +989,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, } void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) -{ + RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) { rl_src = LoadValueWide(rl_src, kCoreReg); // Per spec, we only care about low 6 bits of shift amount. int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; @@ -1099,8 +1062,7 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, } void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) { if (!rl_src2.is_const) { // Don't bother with special handling for subtract from immediate. diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index 4bece136bc..7021593a79 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -34,26 +34,22 @@ static int core_temps[] = {r0, r1, r2, r3, r12}; static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15}; -RegLocation ArmMir2Lir::LocCReturn() -{ +RegLocation ArmMir2Lir::LocCReturn() { RegLocation res = ARM_LOC_C_RETURN; return res; } -RegLocation ArmMir2Lir::LocCReturnWide() -{ +RegLocation ArmMir2Lir::LocCReturnWide() { RegLocation res = ARM_LOC_C_RETURN_WIDE; return res; } -RegLocation ArmMir2Lir::LocCReturnFloat() -{ +RegLocation ArmMir2Lir::LocCReturnFloat() { RegLocation res = ARM_LOC_C_RETURN_FLOAT; return res; } -RegLocation ArmMir2Lir::LocCReturnDouble() -{ +RegLocation ArmMir2Lir::LocCReturnDouble() { RegLocation res = ARM_LOC_C_RETURN_DOUBLE; return res; } @@ -85,28 +81,24 @@ int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) { // Create a double from a pair of singles. -int ArmMir2Lir::S2d(int low_reg, int high_reg) -{ +int ArmMir2Lir::S2d(int low_reg, int high_reg) { return ARM_S2D(low_reg, high_reg); } // Return mask to strip off fp reg flags and bias. -uint32_t ArmMir2Lir::FpRegMask() -{ +uint32_t ArmMir2Lir::FpRegMask() { return ARM_FP_REG_MASK; } // True if both regs single, both core or both double. -bool ArmMir2Lir::SameRegType(int reg1, int reg2) -{ +bool ArmMir2Lir::SameRegType(int reg1, int reg2) { return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2)); } /* * Decode the register id. */ -uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) -{ +uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) { uint64_t seed; int shift; int reg_id; @@ -122,13 +114,11 @@ uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) return (seed << shift); } -uint64_t ArmMir2Lir::GetPCUseDefEncoding() -{ +uint64_t ArmMir2Lir::GetPCUseDefEncoding() { return ENCODE_ARM_REG_PC; } -void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir) -{ +void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir) { DCHECK_EQ(cu_->instruction_set, kThumb2); // Thumb2 specific setup @@ -203,8 +193,7 @@ void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir) } } -ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode) -{ +ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode) { ArmConditionCode res; switch (ccode) { case kCondEq: res = kArmCondEq; break; @@ -257,8 +246,7 @@ static const char* shift_names[4] = { "ror"}; /* Decode and print a ARM register name */ -static char* DecodeRegList(int opcode, int vector, char* buf) -{ +static char* DecodeRegList(int opcode, int vector, char* buf) { int i; bool printed = false; buf[0] = 0; @@ -281,8 +269,7 @@ static char* DecodeRegList(int opcode, int vector, char* buf) return buf; } -static char* DecodeFPCSRegList(int count, int base, char* buf) -{ +static char* DecodeFPCSRegList(int count, int base, char* buf) { sprintf(buf, "s%d", base); for (int i = 1; i < count; i++) { sprintf(buf + strlen(buf), ", s%d",base + i); @@ -290,8 +277,7 @@ static char* DecodeFPCSRegList(int count, int base, char* buf) return buf; } -static int ExpandImmediate(int value) -{ +static int ExpandImmediate(int value) { int mode = (value & 0xf00) >> 8; uint32_t bits = value & 0xff; switch (mode) { @@ -316,8 +302,7 @@ const char* cc_names[] = {"eq","ne","cs","cc","mi","pl","vs","vc", * Interpret a format string and build a string no longer than size * See format key in Assemble.c. */ -std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) -{ +std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) { std::string buf; int i; const char* fmt_end = &fmt[strlen(fmt)]; @@ -455,8 +440,7 @@ std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char return buf; } -void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix) -{ +void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix) { char buf[256]; buf[0] = 0; @@ -501,8 +485,7 @@ void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefi } } -bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir) -{ +bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir) { return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond)); } @@ -527,8 +510,7 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, * Alloc a pair of core registers, or a double. Low reg in low byte, * high reg in next byte. */ -int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) -{ +int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) { int high_reg; int low_reg; int res = 0; @@ -544,15 +526,13 @@ int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) return res; } -int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) -{ +int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) return AllocTempFloat(); return AllocTemp(); } -void ArmMir2Lir::CompilerInitializeRegAlloc() -{ +void ArmMir2Lir::CompilerInitializeRegAlloc() { int num_regs = sizeof(core_regs)/sizeof(*core_regs); int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs); int num_temps = sizeof(core_temps)/sizeof(*core_temps); @@ -591,8 +571,7 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() } void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, - RegLocation rl_free) -{ + RegLocation rl_free) { if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { // No overlap, free both @@ -606,8 +585,7 @@ void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, * machinery is in place, always spill lr. */ -void ArmMir2Lir::AdjustSpillMask() -{ +void ArmMir2Lir::AdjustSpillMask() { core_spill_mask_ |= (1 << rARM_LR); num_core_spills_++; } @@ -618,8 +596,7 @@ void ArmMir2Lir::AdjustSpillMask() * include any holes in the mask. Associate holes with * Dalvik register INVALID_VREG (0xFFFFU). */ -void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) -{ +void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) { DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE); reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE; // Ensure fp_vmap_table is large enough @@ -634,8 +611,7 @@ void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE; } -void ArmMir2Lir::FlushRegWide(int reg1, int reg2) -{ +void ArmMir2Lir::FlushRegWide(int reg1, int reg2) { RegisterInfo* info1 = GetRegInfo(reg1); RegisterInfo* info2 = GetRegInfo(reg2); DCHECK(info1 && info2 && info1->pair && info2->pair && @@ -657,8 +633,7 @@ void ArmMir2Lir::FlushRegWide(int reg1, int reg2) } } -void ArmMir2Lir::FlushReg(int reg) -{ +void ArmMir2Lir::FlushReg(int reg) { RegisterInfo* info = GetRegInfo(reg); if (info->live && info->dirty) { info->dirty = false; @@ -673,8 +648,7 @@ bool ArmMir2Lir::IsFpReg(int reg) { } /* Clobber all regs that might be used by an external C call */ -void ArmMir2Lir::ClobberCalleeSave() -{ +void ArmMir2Lir::ClobberCalleeSave() { Clobber(r0); Clobber(r1); Clobber(r2); @@ -699,8 +673,7 @@ void ArmMir2Lir::ClobberCalleeSave() Clobber(fr15); } -RegLocation ArmMir2Lir::GetReturnWideAlt() -{ +RegLocation ArmMir2Lir::GetReturnWideAlt() { RegLocation res = LocCReturnWide(); res.low_reg = r2; res.high_reg = r3; @@ -712,8 +685,7 @@ RegLocation ArmMir2Lir::GetReturnWideAlt() return res; } -RegLocation ArmMir2Lir::GetReturnAlt() -{ +RegLocation ArmMir2Lir::GetReturnAlt() { RegLocation res = LocCReturn(); res.low_reg = r1; Clobber(r1); @@ -721,15 +693,13 @@ RegLocation ArmMir2Lir::GetReturnAlt() return res; } -ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg) -{ +ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg) { return ARM_FPREG(reg) ? ®_pool_->FPRegs[reg & ARM_FP_REG_MASK] : ®_pool_->core_regs[reg]; } /* To be used when explicitly managing register use */ -void ArmMir2Lir::LockCallTemps() -{ +void ArmMir2Lir::LockCallTemps() { LockTemp(r0); LockTemp(r1); LockTemp(r2); @@ -737,32 +707,27 @@ void ArmMir2Lir::LockCallTemps() } /* To be used when explicitly managing register use */ -void ArmMir2Lir::FreeCallTemps() -{ +void ArmMir2Lir::FreeCallTemps() { FreeTemp(r0); FreeTemp(r1); FreeTemp(r2); FreeTemp(r3); } -int ArmMir2Lir::LoadHelper(int offset) -{ +int ArmMir2Lir::LoadHelper(int offset) { LoadWordDisp(rARM_SELF, offset, rARM_LR); return rARM_LR; } -uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) -{ +uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) { return ArmMir2Lir::EncodingMap[opcode].flags; } -const char* ArmMir2Lir::GetTargetInstName(int opcode) -{ +const char* ArmMir2Lir::GetTargetInstName(int opcode) { return ArmMir2Lir::EncodingMap[opcode].name; } -const char* ArmMir2Lir::GetTargetInstFmt(int opcode) -{ +const char* ArmMir2Lir::GetTargetInstFmt(int opcode) { return ArmMir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index abf921f8ad..80f597d640 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -22,8 +22,7 @@ namespace art { /* This file contains codegen for the Thumb ISA. */ -static int EncodeImmSingle(int value) -{ +static int EncodeImmSingle(int value) { int res; int bit_a = (value & 0x80000000) >> 31; int not_bit_b = (value & 0x40000000) >> 30; @@ -48,8 +47,7 @@ static int EncodeImmSingle(int value) * Determine whether value can be encoded as a Thumb2 floating point * immediate. If not, return -1. If so return encoded 8-bit value. */ -static int EncodeImmDouble(int64_t value) -{ +static int EncodeImmDouble(int64_t value) { int res; int bit_a = (value & 0x8000000000000000ll) >> 63; int not_bit_b = (value & 0x4000000000000000ll) >> 62; @@ -70,8 +68,7 @@ static int EncodeImmDouble(int64_t value) return res; } -LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) -{ +LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) { DCHECK(ARM_SINGLEREG(r_dest)); if (value == 0) { // TODO: we need better info about the target CPU. a vector exclusive or @@ -98,8 +95,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) return load_pc_rel; } -static int LeadingZeros(uint32_t val) -{ +static int LeadingZeros(uint32_t val) { uint32_t alt; int n; int count; @@ -121,8 +117,7 @@ static int LeadingZeros(uint32_t val) * Determine whether value can be encoded as a Thumb2 modified * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form. */ -int ArmMir2Lir::ModifiedImmediate(uint32_t value) -{ +int ArmMir2Lir::ModifiedImmediate(uint32_t value) { int z_leading; int z_trailing; uint32_t b0 = value & 0xff; @@ -151,23 +146,19 @@ int ArmMir2Lir::ModifiedImmediate(uint32_t value) return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */ } -bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) -{ +bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) { return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0); } -bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) -{ +bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) { return EncodeImmSingle(value) >= 0; } -bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) -{ +bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) { return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value)); } -bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) -{ +bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) { return EncodeImmDouble(value) >= 0; } @@ -179,8 +170,7 @@ bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) * 1) r_dest is freshly returned from AllocTemp or * 2) The codegen is under fixed register usage */ -LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) -{ +LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) { LIR* res; int mod_imm; @@ -214,23 +204,20 @@ LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) return res; } -LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) -{ +LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) { LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly*/); res->target = target; return res; } -LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) -{ +LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */, ArmConditionEncoding(cc)); branch->target = target; return branch; } -LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) -{ +LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) { ArmOpcode opcode = kThumbBkpt; switch (op) { case kOpBlx: @@ -243,8 +230,7 @@ LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) } LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, - int shift) -{ + int shift) { bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2)); ArmOpcode opcode = kThumbBkpt; switch (op) { @@ -358,14 +344,12 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, } } -LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) -{ +LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { return OpRegRegShift(op, r_dest_src1, r_src2, 0); } LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1, - int r_src2, int shift) -{ + int r_src2, int shift) { ArmOpcode opcode = kThumbBkpt; bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) && ARM_LOWREG(r_src2); @@ -430,13 +414,11 @@ LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1, } } -LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) -{ +LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) { return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0); } -LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) -{ +LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { LIR* res; bool neg = (value < 0); int abs_value = (neg) ? -value : value; @@ -560,8 +542,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) } /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */ -LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) -{ +LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { bool neg = (value < 0); int abs_value = (neg) ? -value : value; bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1)); @@ -605,8 +586,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) } } -LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) -{ +LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { LIR* res = NULL; int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); @@ -656,8 +636,7 @@ int ArmMir2Lir::EncodeShift(int code, int amount) { } LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, - int scale, OpSize size) -{ + int scale, OpSize size) { bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest); LIR* load; ArmOpcode opcode = kThumbBkpt; @@ -721,8 +700,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, } LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, - int scale, OpSize size) -{ + int scale, OpSize size) { bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src); LIR* store = NULL; ArmOpcode opcode = kThumbBkpt; @@ -787,8 +765,7 @@ LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, * performing null check, incoming MIR can be null. */ LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, - int r_dest_hi, OpSize size, int s_reg) -{ + int r_dest_hi, OpSize size, int s_reg) { LIR* load = NULL; ArmOpcode opcode = kThumbBkpt; bool short_form = false; @@ -908,14 +885,12 @@ LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, } LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest, - OpSize size, int s_reg) -{ + OpSize size, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg); } LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, - int r_dest_hi, int s_reg) -{ + int r_dest_hi, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg); } @@ -1024,19 +999,16 @@ LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement, } LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src, - OpSize size) -{ + OpSize size) { return StoreBaseDispBody(rBase, displacement, r_src, -1, size); } LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement, - int r_src_lo, int r_src_hi) -{ + int r_src_lo, int r_src_hi) { return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong); } -LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) -{ +LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) { int opcode; DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src)); if (ARM_DOUBLEREG(r_dest)) { @@ -1056,36 +1028,31 @@ LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) return res; } -LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) -{ +LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) { LOG(FATAL) << "Unexpected use of OpThreadMem for Arm"; return NULL; } -LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) -{ +LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) { LOG(FATAL) << "Unexpected use of OpMem for Arm"; return NULL; } LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, int r_src, int r_src_hi, OpSize size, - int s_reg) -{ + int s_reg) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm"; return NULL; } -LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) -{ +LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) { LOG(FATAL) << "Unexpected use of OpRegMem for Arm"; return NULL; } LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, int r_dest, int r_dest_hi, OpSize size, - int s_reg) -{ + int s_reg) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm"; return NULL; } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 5c10c4ce2b..e728d2769b 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -23,8 +23,7 @@ namespace art { -bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) -{ +bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { bool res = false; if (rl_src.is_const) { if (rl_src.wide) { @@ -44,27 +43,23 @@ bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) return res; } -void Mir2Lir::MarkSafepointPC(LIR* inst) -{ +void Mir2Lir::MarkSafepointPC(LIR* inst) { inst->def_mask = ENCODE_ALL; LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL); } -bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) -{ +bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) { return cu_->compiler_driver->ComputeInstanceFieldInfo( field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put); } /* Convert an instruction to a NOP */ -void Mir2Lir::NopLIR( LIR* lir) -{ +void Mir2Lir::NopLIR( LIR* lir) { lir->flags.is_nop = true; } -void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) -{ +void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { uint64_t *mask_ptr; uint64_t mask = ENCODE_MEM;; DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); @@ -101,8 +96,7 @@ void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) * Mark load/store instructions that access Dalvik registers through the stack. */ void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, - bool is64bit) -{ + bool is64bit) { SetMemRefType(lir, is_load, kDalvikReg); /* @@ -118,8 +112,7 @@ void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, #define DUMP_RESOURCE_MASK(X) /* Pretty-print a LIR instruction */ -void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) -{ +void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { int offset = lir->offset; int dest = lir->operands[0]; const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); @@ -204,8 +197,7 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) } } -void Mir2Lir::DumpPromotionMap() -{ +void Mir2Lir::DumpPromotionMap() { int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1; for (int i = 0; i < num_regs; i++) { PromotionMap v_reg_map = promotion_map_[i]; @@ -249,8 +241,7 @@ void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descri } /* Dump instructions and constant pool contents */ -void Mir2Lir::CodegenDump() -{ +void Mir2Lir::CodegenDump() { LOG(INFO) << "Dumping LIR insns for " << PrettyMethod(cu_->method_idx, *cu_->dex_file); LIR* lir_insn; @@ -291,8 +282,7 @@ void Mir2Lir::CodegenDump() * Search the existing constants in the literal pool for an exact or close match * within specified delta (greater or equal to 0). */ -LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) -{ +LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { while (data_target) { if ((static_cast(value - data_target->operands[0])) <= delta) return data_target; @@ -302,8 +292,7 @@ LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) } /* Search the existing constants in the literal pool for an exact wide match */ -LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) -{ +LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { bool lo_match = false; LIR* lo_target = NULL; while (data_target) { @@ -328,8 +317,7 @@ LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) */ /* Add a 32-bit constant to the constant pool */ -LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) -{ +LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { /* Add the constant to the literal pool */ if (constant_list_p) { LIR* new_value = static_cast(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData)); @@ -342,8 +330,7 @@ LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) } /* Add a 64-bit constant to the constant pool or mixed with code */ -LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) -{ +LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { AddWordData(constant_list_p, val_hi); return AddWordData(constant_list_p, val_lo); } @@ -362,8 +349,7 @@ static void AlignBuffer(std::vector&buf, size_t offset) { } /* Write the literal pool to the output stream */ -void Mir2Lir::InstallLiteralPools() -{ +void Mir2Lir::InstallLiteralPools() { AlignBuffer(code_buffer_, data_offset_); LIR* data_lir = literal_list_; while (data_lir != NULL) { @@ -404,8 +390,7 @@ void Mir2Lir::InstallLiteralPools() } /* Write the switch tables to the output stream */ -void Mir2Lir::InstallSwitchTables() -{ +void Mir2Lir::InstallSwitchTables() { GrowableArray::Iterator iterator(&switch_tables_); while (true) { Mir2Lir::SwitchTable* tab_rec = iterator.Next(); @@ -462,8 +447,7 @@ void Mir2Lir::InstallSwitchTables() } /* Write the fill array dta to the output stream */ -void Mir2Lir::InstallFillArrayData() -{ +void Mir2Lir::InstallFillArrayData() { GrowableArray::Iterator iterator(&fill_array_data_); while (true) { Mir2Lir::FillArrayData *tab_rec = iterator.Next(); @@ -476,8 +460,7 @@ void Mir2Lir::InstallFillArrayData() } } -static int AssignLiteralOffsetCommon(LIR* lir, int offset) -{ +static int AssignLiteralOffsetCommon(LIR* lir, int offset) { for (;lir != NULL; lir = lir->next) { lir->offset = offset; offset += 4; @@ -486,8 +469,7 @@ static int AssignLiteralOffsetCommon(LIR* lir, int offset) } // Make sure we have a code address for every declared catch entry -bool Mir2Lir::VerifyCatchEntries() -{ +bool Mir2Lir::VerifyCatchEntries() { bool success = true; for (std::set::const_iterator it = mir_graph_->catches_.begin(); it != mir_graph_->catches_.end(); ++it) { @@ -521,8 +503,7 @@ bool Mir2Lir::VerifyCatchEntries() } -void Mir2Lir::CreateMappingTables() -{ +void Mir2Lir::CreateMappingTables() { for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { pc2dex_mapping_table_.push_back(tgt_lir->offset); @@ -650,16 +631,14 @@ void Mir2Lir::CreateNativeGcMap() { } /* Determine the offset of each literal field */ -int Mir2Lir::AssignLiteralOffset(int offset) -{ +int Mir2Lir::AssignLiteralOffset(int offset) { offset = AssignLiteralOffsetCommon(literal_list_, offset); offset = AssignLiteralOffsetCommon(code_literal_list_, offset); offset = AssignLiteralOffsetCommon(method_literal_list_, offset); return offset; } -int Mir2Lir::AssignSwitchTablesOffset(int offset) -{ +int Mir2Lir::AssignSwitchTablesOffset(int offset) { GrowableArray::Iterator iterator(&switch_tables_); while (true) { Mir2Lir::SwitchTable *tab_rec = iterator.Next(); @@ -676,8 +655,7 @@ int Mir2Lir::AssignSwitchTablesOffset(int offset) return offset; } -int Mir2Lir::AssignFillArrayDataOffset(int offset) -{ +int Mir2Lir::AssignFillArrayDataOffset(int offset) { GrowableArray::Iterator iterator(&fill_array_data_); while (true) { Mir2Lir::FillArrayData *tab_rec = iterator.Next(); @@ -691,8 +669,7 @@ int Mir2Lir::AssignFillArrayDataOffset(int offset) } // LIR offset assignment. -int Mir2Lir::AssignInsnOffsets() -{ +int Mir2Lir::AssignInsnOffsets() { LIR* lir; int offset = 0; @@ -720,8 +697,7 @@ int Mir2Lir::AssignInsnOffsets() * Walk the compilation unit and assign offsets to instructions * and literals and compute the total size of the compiled unit. */ -void Mir2Lir::AssignOffsets() -{ +void Mir2Lir::AssignOffsets() { int offset = AssignInsnOffsets(); /* Const values have to be word aligned */ @@ -744,8 +720,7 @@ void Mir2Lir::AssignOffsets() * before sending them off to the assembler. If out-of-range branch distance is * seen rearrange the instructions a bit to correct it. */ -void Mir2Lir::AssembleLIR() -{ +void Mir2Lir::AssembleLIR() { AssignOffsets(); int assembler_retries = 0; /* @@ -791,8 +766,7 @@ void Mir2Lir::AssembleLIR() * all resource flags on this to prevent code motion across * target boundaries. KeyVal is just there for debugging. */ -LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) -{ +LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) { SafeMap::iterator it; it = boundary_map_.find(vaddr); if (it == boundary_map_.end()) { @@ -806,8 +780,7 @@ LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) return new_label; } -void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) -{ +void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) { const uint16_t* table = tab_rec->table; int base_vaddr = tab_rec->vaddr; const int *targets = reinterpret_cast(&table[4]); @@ -818,8 +791,7 @@ void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) } } -void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) -{ +void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) { const uint16_t* table = tab_rec->table; int base_vaddr = tab_rec->vaddr; int entries = table[1]; @@ -830,8 +802,7 @@ void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) } } -void Mir2Lir::ProcessSwitchTables() -{ +void Mir2Lir::ProcessSwitchTables() { GrowableArray::Iterator iterator(&switch_tables_); while (true) { Mir2Lir::SwitchTable *tab_rec = iterator.Next(); @@ -846,7 +817,7 @@ void Mir2Lir::ProcessSwitchTables() } } -void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) +void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { /* * Sparse switch data format: * ushort ident = 0x0200 magic value @@ -856,7 +827,6 @@ void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) * * Total size is (2+size*4) 16-bit code units. */ -{ uint16_t ident = table[0]; int entries = table[1]; const int* keys = reinterpret_cast(&table[2]); @@ -868,7 +838,7 @@ void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) } } -void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) +void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { /* * Packed switch data format: * ushort ident = 0x0100 magic value @@ -878,7 +848,6 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) * * Total size is (4+size*2) 16-bit code units. */ -{ uint16_t ident = table[0]; const int* targets = reinterpret_cast(&table[4]); int entries = table[1]; @@ -897,8 +866,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) * which we split a single Dalvik instruction, only the first MIR op * associated with a Dalvik PC should be entered into the map. */ -LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) -{ +LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) { LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast(inst_str)); if (boundary_map_.find(offset) == boundary_map_.end()) { boundary_map_.Put(offset, res); @@ -906,8 +874,7 @@ LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) return res; } -bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) -{ +bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { bool is_taken; switch (opcode) { case Instruction::IF_EQ: is_taken = (src1 == src2); break; @@ -971,8 +938,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena core_spill_mask_(0), fp_spill_mask_(0), first_lir_insn_(NULL), - last_lir_insn_(NULL) - { + last_lir_insn_(NULL) { promotion_map_ = static_cast (arena_->NewMem((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) * sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc)); @@ -1060,8 +1026,7 @@ int Mir2Lir::ComputeFrameSize() { * Append an LIR instruction to the LIR list maintained by a compilation * unit */ -void Mir2Lir::AppendLIR(LIR* lir) -{ +void Mir2Lir::AppendLIR(LIR* lir) { if (first_lir_insn_ == NULL) { DCHECK(last_lir_insn_ == NULL); last_lir_insn_ = first_lir_insn_ = lir; @@ -1080,8 +1045,7 @@ void Mir2Lir::AppendLIR(LIR* lir) * * prev_lir <-> new_lir <-> current_lir */ -void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) -{ +void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { DCHECK(current_lir->prev != NULL); LIR *prev_lir = current_lir->prev; @@ -1097,8 +1061,7 @@ void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) * * current_lir -> new_lir -> old_next */ -void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) -{ +void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { new_lir->prev = current_lir; new_lir->next = current_lir->next; current_lir->next = new_lir; diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 865b9c5c4d..a34d2a9e76 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -33,8 +33,7 @@ namespace art { * Generate an kPseudoBarrier marker to indicate the boundary of special * blocks. */ -void Mir2Lir::GenBarrier() -{ +void Mir2Lir::GenBarrier() { LIR* barrier = NewLIR0(kPseudoBarrier); /* Mark all resources as being clobbered */ barrier->def_mask = -1; @@ -42,8 +41,7 @@ void Mir2Lir::GenBarrier() // FIXME: need to do some work to split out targets with // condition codes and those without -LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) -{ +LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) { DCHECK_NE(cu_->instruction_set, kMips); LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_); LIR* branch = OpCondBranch(c_code, tgt); @@ -52,8 +50,7 @@ LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) return branch; } -LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) -{ +LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) { LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val); LIR* branch; if (c_code == kCondAl) { @@ -67,8 +64,7 @@ LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKin } /* Perform null-check on a register. */ -LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) -{ +LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && opt_flags & MIR_IGNORE_NULL_CHECK) { return NULL; @@ -78,8 +74,7 @@ LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) /* Perform check on two registers */ LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, - ThrowKind kind) -{ + ThrowKind kind) { LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2); LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); // Remember branch target - will process later @@ -89,8 +84,7 @@ LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, - LIR* fall_through) -{ + LIR* fall_through) { ConditionCode cond; switch (opcode) { case Instruction::IF_EQ: @@ -143,8 +137,7 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, } void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, - LIR* fall_through) -{ + LIR* fall_through) { ConditionCode cond; rl_src = LoadValue(rl_src, kCoreReg); switch (opcode) { @@ -174,8 +167,7 @@ void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_s OpUnconditionalBranch(fall_through); } -void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) -{ +void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (rl_src.location == kLocPhysReg) { OpRegCopy(rl_result.low_reg, rl_src.low_reg); @@ -187,8 +179,7 @@ void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) } void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { rl_src = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); OpKind op = kOpInvalid; @@ -215,8 +206,7 @@ void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, * Note: AllocFromCode will handle checks for errNegativeArraySize. */ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { FlushAllRegs(); /* Everything to home location */ int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, @@ -236,8 +226,7 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, * code throws runtime exception "bad Filled array req" for 'D' and 'J'. * Current code also throws internal unimp if not 'L', '[' or 'I'. */ -void Mir2Lir::GenFilledNewArray(CallInfo* info) -{ +void Mir2Lir::GenFilledNewArray(CallInfo* info) { int elems = info->num_arg_words; int type_idx = info->index; FlushAllRegs(); /* Everything to home location */ @@ -342,8 +331,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) } void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double, - bool is_object) -{ + bool is_object) { int field_offset; int ssb_index; bool is_volatile; @@ -428,8 +416,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do } void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, - bool is_long_or_double, bool is_object) -{ + bool is_long_or_double, bool is_object) { int field_offset; int ssb_index; bool is_volatile; @@ -510,8 +497,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } -void Mir2Lir::HandleSuspendLaunchPads() -{ +void Mir2Lir::HandleSuspendLaunchPads() { int num_elems = suspend_launchpads_.Size(); int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode); for (int i = 0; i < num_elems; i++) { @@ -527,8 +513,7 @@ void Mir2Lir::HandleSuspendLaunchPads() } } -void Mir2Lir::HandleIntrinsicLaunchPads() -{ +void Mir2Lir::HandleIntrinsicLaunchPads() { int num_elems = intrinsic_launchpads_.Size(); for (int i = 0; i < num_elems; i++) { ResetRegPool(); @@ -546,8 +531,7 @@ void Mir2Lir::HandleIntrinsicLaunchPads() } } -void Mir2Lir::HandleThrowLaunchPads() -{ +void Mir2Lir::HandleThrowLaunchPads() { int num_elems = throw_launchpads_.Size(); for (int i = 0; i < num_elems; i++) { ResetRegPool(); @@ -636,8 +620,7 @@ void Mir2Lir::HandleThrowLaunchPads() void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, - bool is_object) -{ + bool is_object) { int field_offset; bool is_volatile; @@ -697,8 +680,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, - bool is_object) -{ + bool is_object) { int field_offset; bool is_volatile; @@ -744,8 +726,7 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, } } -void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) -{ +void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { RegLocation rl_method = LoadCurrMethod(); int res_reg = AllocTemp(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -803,8 +784,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) } } -void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) -{ +void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { /* NOTE: Most strings should be available at compile time */ int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + (sizeof(mirror::String*) * string_idx); @@ -860,8 +840,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) * Let helper function take care of everything. Will * call Class::NewInstanceFromCode(type_idx, method); */ -void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) -{ +void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { FlushAllRegs(); /* Everything to home location */ // alloc will always check for resolution, do we also need to verify // access because the verifier was unable to? @@ -877,8 +856,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) StoreValue(rl_dest, rl_result); } -void Mir2Lir::GenThrow(RegLocation rl_src) -{ +void Mir2Lir::GenThrow(RegLocation rl_src) { FlushAllRegs(); CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); } @@ -1065,8 +1043,7 @@ void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation } } -void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) -{ +void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { bool type_known_final, type_known_abstract, use_declaring_class; bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, @@ -1142,8 +1119,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ } void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { RegLocation rl_result; if (cu_->instruction_set == kThumb2) { /* @@ -1161,7 +1137,7 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des rl_src2 = LoadValueWide(rl_src2, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); // The longs may overlap - use intermediate temp if so - if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){ + if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) { int t_reg = AllocTemp(); OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg); OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg); @@ -1190,8 +1166,7 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift) -{ + RegLocation rl_src1, RegLocation rl_shift) { int func_offset = -1; // Make gcc happy switch (opcode) { @@ -1218,8 +1193,7 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { OpKind op = kOpBkpt; bool is_div_rem = false; bool check_zero = false; @@ -1353,14 +1327,12 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, * or produce corresponding Thumb instructions directly. */ -static bool IsPowerOfTwo(int x) -{ +static bool IsPowerOfTwo(int x) { return (x & (x - 1)) == 0; } // Returns true if no more than two bits are set in 'x'. -static bool IsPopCountLE2(unsigned int x) -{ +static bool IsPopCountLE2(unsigned int x) { x &= x - 1; return (x & (x - 1)) == 0; } @@ -1382,8 +1354,7 @@ static int LowestSetBit(unsigned int x) { // Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' // and store the result in 'rl_dest'. bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { return false; } @@ -1435,8 +1406,7 @@ bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode, // Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' // and store the result in 'rl_dest'. -bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) -{ +bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { // Can we simplify this multiplication? bool power_of_two = false; bool pop_count_le2 = false; @@ -1476,8 +1446,7 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li } void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, - int lit) -{ + int lit) { RegLocation rl_result; OpKind op = static_cast(0); /* Make gcc happy */ int shift_op = false; @@ -1613,8 +1582,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re } void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { RegLocation rl_result; OpKind first_op = kOpBkpt; OpKind second_op = kOpBkpt; @@ -1741,8 +1709,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, } void Mir2Lir::GenConversionCall(int func_offset, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { /* * Don't optimize the register usage since it calls out to support * functions @@ -1767,8 +1734,7 @@ void Mir2Lir::GenConversionCall(int func_offset, } /* Check if we need to check for pending suspend request */ -void Mir2Lir::GenSuspendTest(int opt_flags) -{ +void Mir2Lir::GenSuspendTest(int opt_flags) { if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { return; } @@ -1782,8 +1748,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) } /* Check if we need to check for pending suspend request */ -void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) -{ +void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { OpUnconditionalBranch(target); return; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index e3993e0617..14e395cdac 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -37,14 +37,12 @@ namespace art { * has a memory call operation, part 1 is a NOP for x86. For other targets, * load arguments between the two parts. */ -int Mir2Lir::CallHelperSetup(int helper_offset) -{ +int Mir2Lir::CallHelperSetup(int helper_offset) { return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset); } /* NOTE: if r_tgt is a temp, it will be freed following use */ -LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) -{ +LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) { LIR* call_inst; if (cu_->instruction_set == kX86) { call_inst = OpThreadMem(kOpBlx, helper_offset); @@ -233,8 +231,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset, * ArgLocs is an array of location records describing the incoming arguments * with one location record per word of argument. */ -void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) -{ +void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { /* * Dummy up a RegLocation for the incoming Method* * It will attempt to keep kArg0 live (or copy it to home location @@ -316,8 +313,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t unused, uintptr_t direct_code, uintptr_t direct_method, - InvokeType type) -{ + InvokeType type) { Mir2Lir* cg = static_cast(cu->cg.get()); if (cu->instruction_set != kThumb2) { // Disable sharpening @@ -420,8 +416,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, - InvokeType unused3) -{ + InvokeType unused3) { Mir2Lir* cg = static_cast(cu->cg.get()); /* * This is the fast path in which the target virtual method is @@ -469,8 +464,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t unused, uintptr_t unused2, - uintptr_t direct_method, InvokeType unused4) -{ + uintptr_t direct_method, InvokeType unused4) { Mir2Lir* cg = static_cast(cu->cg.get()); if (cu->instruction_set != kThumb2) { // Disable sharpening @@ -536,8 +530,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline, int state, const MethodReference& target_method, - uint32_t method_idx) -{ + uint32_t method_idx) { Mir2Lir* cg = static_cast(cu->cg.get()); /* * This handles the case in which the base method is not fully @@ -561,8 +554,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, - InvokeType unused3) -{ + InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -570,8 +562,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, - uintptr_t unused2, InvokeType unused3) -{ + uintptr_t unused2, InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -579,8 +570,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, - uintptr_t unused2, InvokeType unused3) -{ + uintptr_t unused2, InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -588,8 +578,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, - uintptr_t unused2, InvokeType unused3) -{ + uintptr_t unused2, InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -599,8 +588,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, const MethodReference& target_method, uint32_t unused, uintptr_t unused2, uintptr_t unused3, - InvokeType unused4) -{ + InvokeType unused4) { int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -609,8 +597,7 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, NextCallInsn next_call_insn, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, - uintptr_t direct_method, InvokeType type, bool skip_this) -{ + uintptr_t direct_method, InvokeType type, bool skip_this) { int last_arg_reg = TargetReg(kArg3); int next_reg = TargetReg(kArg1); int next_arg = 0; @@ -649,8 +636,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, - uintptr_t direct_method, InvokeType type, bool skip_this) -{ + uintptr_t direct_method, InvokeType type, bool skip_this) { RegLocation rl_arg; /* If no arguments, just return */ @@ -749,8 +735,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, - InvokeType type, bool skip_this) -{ + InvokeType type, bool skip_this) { // If we can treat it as non-range (Jumbo ops will use range form) if (info->num_arg_words <= 5) @@ -833,8 +818,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, return call_state; } -RegLocation Mir2Lir::InlineTarget(CallInfo* info) -{ +RegLocation Mir2Lir::InlineTarget(CallInfo* info) { RegLocation res; if (info->result.location == kLocInvalid) { res = GetReturn(false); @@ -844,8 +828,7 @@ RegLocation Mir2Lir::InlineTarget(CallInfo* info) return res; } -RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) -{ +RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) { RegLocation res; if (info->result.location == kLocInvalid) { res = GetReturnWide(false); @@ -855,8 +838,7 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) return res; } -bool Mir2Lir::GenInlinedCharAt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -932,8 +914,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) } // Generates an inlined String.is_empty or String.length. -bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) -{ +bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -961,8 +942,7 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) return true; } -bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -980,8 +960,7 @@ bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) return true; } -bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) -{ +bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1022,8 +1001,7 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) } } -bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1034,8 +1012,7 @@ bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) return true; } -bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1050,8 +1027,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff, * otherwise bails to standard library code. */ -bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) -{ +bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1094,8 +1070,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) } /* Fast string.compareTo(Ljava/lang/string;)I. */ -bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) -{ +bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1211,8 +1186,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, return true; } -bool Mir2Lir::GenIntrinsic(CallInfo* info) -{ +bool Mir2Lir::GenIntrinsic(CallInfo* info) { if (info->opt_flags & MIR_INLINED) { return false; } @@ -1358,8 +1332,7 @@ bool Mir2Lir::GenIntrinsic(CallInfo* info) return false; } -void Mir2Lir::GenInvoke(CallInfo* info) -{ +void Mir2Lir::GenInvoke(CallInfo* info) { if (GenIntrinsic(info)) { return; } diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 6a25c1db45..353910606e 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -27,8 +27,7 @@ namespace art { * Load an immediate value into a fixed or temp register. Target * register is clobbered, and marked in_use. */ -LIR* Mir2Lir::LoadConstant(int r_dest, int value) -{ +LIR* Mir2Lir::LoadConstant(int r_dest, int value) { if (IsTemp(r_dest)) { Clobber(r_dest); MarkInUse(r_dest); @@ -41,8 +40,7 @@ LIR* Mir2Lir::LoadConstant(int r_dest, int value) * promoted floating point register, also copy a zero into the int/ref identity of * that sreg. */ -void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) -{ +void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) { if (rl_dest.fp) { int pmap_index = SRegToPMap(rl_dest.s_reg_low); if (promotion_map_[pmap_index].fp_location == kLocPhysReg) { @@ -77,14 +75,12 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) } /* Load a word at base + displacement. Displacement must be word multiple */ -LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest) -{ +LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest) { return LoadBaseDisp(rBase, displacement, r_dest, kWord, INVALID_SREG); } -LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) -{ +LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) { return StoreBaseDisp(rBase, displacement, r_src, kWord); } @@ -93,8 +89,7 @@ LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) * using this routine, as it doesn't perform any bookkeeping regarding * register liveness. That is the responsibility of the caller. */ -void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) -{ +void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) { rl_src = UpdateLoc(rl_src); if (rl_src.location == kLocPhysReg) { OpRegCopy(r_dest, rl_src.low_reg); @@ -112,8 +107,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) * register. Should be used when loading to a fixed register (for example, * loading arguments to an out of line call. */ -void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) -{ +void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) { Clobber(r_dest); MarkInUse(r_dest); LoadValueDirect(rl_src, r_dest); @@ -125,8 +119,7 @@ void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) * register liveness. That is the responsibility of the caller. */ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo, - int reg_hi) -{ + int reg_hi) { rl_src = UpdateLocWide(rl_src); if (rl_src.location == kLocPhysReg) { OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg); @@ -146,8 +139,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo, * loading arguments to an out of line call. */ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, - int reg_hi) -{ + int reg_hi) { Clobber(reg_lo); Clobber(reg_hi); MarkInUse(reg_lo); @@ -155,8 +147,7 @@ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, LoadValueDirectWide(rl_src, reg_lo, reg_hi); } -RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) -{ +RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) { rl_src = EvalLoc(rl_src, op_kind, false); if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) { LoadValueDirect(rl_src, rl_src.low_reg); @@ -166,8 +157,7 @@ RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) return rl_src; } -void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) -{ +void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) { /* * Sanity checking - should never try to store to the same * ssa name during the compilation of a single instruction @@ -222,8 +212,7 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) } } -RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) -{ +RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) { DCHECK(rl_src.wide); rl_src = EvalLoc(rl_src, op_kind, false); if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) { @@ -235,8 +224,7 @@ RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) return rl_src; } -void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) -{ +void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) { /* * Sanity checking - should never try to store to the same * ssa name during the compilation of a single instruction @@ -299,13 +287,11 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) } /* Utilities to load the current Method* */ -void Mir2Lir::LoadCurrMethodDirect(int r_tgt) -{ +void Mir2Lir::LoadCurrMethodDirect(int r_tgt) { LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt); } -RegLocation Mir2Lir::LoadCurrMethod() -{ +RegLocation Mir2Lir::LoadCurrMethod() { return LoadValue(mir_graph_->GetMethodLoc(), kCoreReg); } diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index ac654d8f21..eb27bf8b5d 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -29,8 +29,7 @@ namespace art { #define LDLD_DISTANCE 4 #define LD_LATENCY 2 -static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) -{ +static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) { int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info); int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info); int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info); @@ -40,8 +39,7 @@ static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) } /* Convert a more expensive instruction (ie load) into a move */ -void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) -{ +void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) { /* Insert a move to replace the load */ LIR* move_lir; move_lir = OpRegCopyNoInsert(dest, src); @@ -72,8 +70,7 @@ void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) * 1) They are must-aliases * 2) The memory location is not written to in between */ -void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) -{ +void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { LIR* this_lir; if (head_lir == tail_lir) return; @@ -268,8 +265,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) * Perform a pass of bottom-up walk, from the second instruction in the * superblock, to try to hoist loads to earlier slots. */ -void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) -{ +void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { LIR* this_lir, *check_lir; /* * Store the list of independent instructions that can be hoisted past. @@ -447,8 +443,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) } } -void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) -{ +void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) { if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) { ApplyLoadStoreElimination(head_lir, tail_lir); } @@ -462,8 +457,7 @@ void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) * Note: new redundant branches may be inserted later, and we'll * use a check in final instruction assembly to nop those out. */ -void Mir2Lir::RemoveRedundantBranches() -{ +void Mir2Lir::RemoveRedundantBranches() { LIR* this_lir; for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) { diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index 2482aa4fbb..dcfb13f549 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -457,8 +457,7 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = { * NOTE: An out-of-range bal isn't supported because it should * never happen with the current PIC model. */ -void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) -{ +void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) { // For conditional branches we'll need to reverse the sense bool unconditional = false; int opcode = lir->opcode; @@ -513,8 +512,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) * instruction. In those cases we will try to substitute a new code * sequence or request that the trace be shortened and retried. */ -AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) -{ +AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) { LIR *lir; AssemblerStatus res = kSuccess; // Assume success @@ -708,8 +706,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) return res; } -int MipsMir2Lir::GetInsnSize(LIR* lir) -{ +int MipsMir2Lir::GetInsnSize(LIR* lir) { return EncodingMap[lir->opcode].size; } diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index eb0302e80f..db5764336a 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -24,8 +24,7 @@ namespace art { void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - SpecialCaseHandler special_case) -{ + SpecialCaseHandler special_case) { // TODO } @@ -61,8 +60,7 @@ void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, * */ void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpSparseSwitchTable(table); @@ -142,8 +140,7 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, * done: */ void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpPackedSwitchTable(table); @@ -227,8 +224,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, * * Total size is 4+(width * size + 1)/2 16-bit code units. */ -void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) -{ +void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; // Add the table to the list - we'll process it later FillArrayData *tab_rec = @@ -270,8 +266,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) /* * TODO: implement fast path to short-circuit thin-lock case */ -void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) -{ +void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rMIPS_ARG0); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -286,8 +281,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) /* * TODO: implement fast path to short-circuit thin-lock case */ -void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) -{ +void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rMIPS_ARG0); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -299,8 +293,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) MarkSafepointPC(call_inst); } -void MipsMir2Lir::GenMoveException(RegLocation rl_dest) -{ +void MipsMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int reset_reg = AllocTemp(); @@ -314,8 +307,7 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) /* * Mark garbage collection card. Skip if the value we're storing is null. */ -void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) -{ +void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { int reg_card_base = AllocTemp(); int reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); @@ -328,8 +320,7 @@ void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) FreeTemp(reg_card_base); FreeTemp(reg_card_no); } -void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) -{ +void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { int spill_count = num_core_spills_ + num_fp_spills_; /* * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register @@ -375,8 +366,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) FreeTemp(rMIPS_ARG3); } -void MipsMir2Lir::GenExitSequence() -{ +void MipsMir2Lir::GenExitSequence() { /* * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't * allocated by the register utilities as temps. diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 8581d5beb6..2e744a2afc 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -22,8 +22,7 @@ namespace art { void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { int op = kMipsNop; RegLocation rl_result; @@ -69,8 +68,7 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, } void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { int op = kMipsNop; RegLocation rl_result; @@ -117,8 +115,7 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, } void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { int op = kMipsNop; int src_reg; RegLocation rl_result; @@ -175,8 +172,7 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, } void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { bool wide = true; int offset = -1; // Make gcc happy. @@ -215,13 +211,11 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, } void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, - bool gt_bias, bool is_double) -{ + bool gt_bias, bool is_double) { UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch"; } -void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -229,8 +223,7 @@ void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) StoreValue(rl_dest, rl_result); } -void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -239,8 +232,7 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) StoreValueWide(rl_dest, rl_result); } -bool MipsMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) -{ +bool MipsMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { // TODO: need Mips implementation return false; } diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 8bfc4e1f91..03a58cc958 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -41,8 +41,7 @@ namespace art { * */ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); int t0 = AllocTemp(); @@ -63,8 +62,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, } LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, - LIR* target) -{ + LIR* target) { LIR* branch; MipsOpCode slt_op; MipsOpCode br_op; @@ -131,8 +129,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, } LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, - int check_value, LIR* target) -{ + int check_value, LIR* target) { LIR* branch; if (check_value != 0) { // TUNING: handle s16 & kCondLt/Mi case using slti @@ -164,8 +161,7 @@ LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, return branch; } -LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) -{ +LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) { if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src)) return OpFpRegCopy(r_dest, r_src); LIR* res = RawLIR(current_dalvik_offset_, kMipsMove, @@ -176,16 +172,14 @@ LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) return res; } -LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src) -{ +LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src) { LIR *res = OpRegCopyNoInsert(r_dest, r_src); AppendLIR(res); return res; } void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, - int src_hi) -{ + int src_hi) { bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi); bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi); assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi)); @@ -215,26 +209,22 @@ void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, } } -void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) -{ +void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for select"; } -void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) -{ +void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch"; } LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code, - int reg1, int base, int offset, ThrowKind kind) -{ + int reg1, int base, int offset, ThrowKind kind) { LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm"; return NULL; } RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, - bool is_div) -{ + bool is_div) { NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_div) { @@ -246,8 +236,7 @@ RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, } RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, - bool is_div) -{ + bool is_div) { int t_reg = AllocTemp(); NewLIR3(kMipsAddiu, t_reg, r_ZERO, lit); NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg); @@ -261,13 +250,11 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, return rl_result; } -void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) -{ +void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { LOG(FATAL) << "Unexpected use of OpLea for Arm"; } -void MipsMir2Lir::OpTlsCmp(int offset, int val) -{ +void MipsMir2Lir::OpTlsCmp(int offset, int val) { LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm"; } @@ -286,22 +273,19 @@ LIR* MipsMir2Lir::OpPcRelLoad(int reg, LIR* target) { return NULL; } -LIR* MipsMir2Lir::OpVldm(int rBase, int count) -{ +LIR* MipsMir2Lir::OpVldm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVldm for Mips"; return NULL; } -LIR* MipsMir2Lir::OpVstm(int rBase, int count) -{ +LIR* MipsMir2Lir::OpVstm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVstm for Mips"; return NULL; } void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) -{ + int first_bit, int second_bit) { int t_reg = AllocTemp(); OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit); OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg); @@ -311,8 +295,7 @@ void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, } } -void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) -{ +void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { int t_reg = AllocTemp(); OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi); GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero); @@ -320,41 +303,35 @@ void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) } // Test suspend flag, return target of taken suspend branch -LIR* MipsMir2Lir::OpTestSuspend(LIR* target) -{ +LIR* MipsMir2Lir::OpTestSuspend(LIR* target) { OpRegImm(kOpSub, rMIPS_SUSPEND, 1); return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target); } // Decrement register and branch on condition -LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) -{ +LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) { OpRegImm(kOpSub, reg, 1); return OpCmpImmBranch(c_code, reg, 0, target); } bool MipsMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips"; return false; } -LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) -{ +LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) { LOG(FATAL) << "Unexpected use of OpIT in Mips"; return NULL; } void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenMulLong for Mips"; } void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -376,8 +353,7 @@ void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, } void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -398,8 +374,7 @@ void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, StoreValueWide(rl_dest, rl_result); } -void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) -{ +void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); /* @@ -420,20 +395,17 @@ void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) } void MipsMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenAndLong for Mips"; } void MipsMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenOrLong for Mips"; } void MipsMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenXorLong for Mips"; } @@ -441,8 +413,7 @@ void MipsMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, * Generate array load */ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_dest, int scale) -{ + RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -513,8 +484,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, * */ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -586,8 +556,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, * */ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); @@ -643,15 +612,13 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, } void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift) -{ + RegLocation rl_src1, RegLocation rl_shift) { // Default implementation is just to ignore the constant case. GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift); } void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { // Default - bail to non-const handler. GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); } diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index cab2c1b53d..bd20e00404 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -36,26 +36,22 @@ static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7, static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7, r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15}; -RegLocation MipsMir2Lir::LocCReturn() -{ +RegLocation MipsMir2Lir::LocCReturn() { RegLocation res = MIPS_LOC_C_RETURN; return res; } -RegLocation MipsMir2Lir::LocCReturnWide() -{ +RegLocation MipsMir2Lir::LocCReturnWide() { RegLocation res = MIPS_LOC_C_RETURN_WIDE; return res; } -RegLocation MipsMir2Lir::LocCReturnFloat() -{ +RegLocation MipsMir2Lir::LocCReturnFloat() { RegLocation res = MIPS_LOC_C_RETURN_FLOAT; return res; } -RegLocation MipsMir2Lir::LocCReturnDouble() -{ +RegLocation MipsMir2Lir::LocCReturnDouble() { RegLocation res = MIPS_LOC_C_RETURN_DOUBLE; return res; } @@ -86,28 +82,24 @@ int MipsMir2Lir::TargetReg(SpecialTargetRegister reg) { } // Create a double from a pair of singles. -int MipsMir2Lir::S2d(int low_reg, int high_reg) -{ +int MipsMir2Lir::S2d(int low_reg, int high_reg) { return MIPS_S2D(low_reg, high_reg); } // Return mask to strip off fp reg flags and bias. -uint32_t MipsMir2Lir::FpRegMask() -{ +uint32_t MipsMir2Lir::FpRegMask() { return MIPS_FP_REG_MASK; } // True if both regs single, both core or both double. -bool MipsMir2Lir::SameRegType(int reg1, int reg2) -{ +bool MipsMir2Lir::SameRegType(int reg1, int reg2) { return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2)); } /* * Decode the register id. */ -uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) -{ +uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) { uint64_t seed; int shift; int reg_id; @@ -123,14 +115,12 @@ uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) return (seed << shift); } -uint64_t MipsMir2Lir::GetPCUseDefEncoding() -{ +uint64_t MipsMir2Lir::GetPCUseDefEncoding() { return ENCODE_MIPS_REG_PC; } -void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir) -{ +void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir) { DCHECK_EQ(cu_->instruction_set, kMips); // Mips-specific resource map setup here. @@ -162,8 +152,7 @@ static const char *mips_reg_name[MIPS_REG_COUNT] = { * Interpret a format string and build a string no longer than size * See format key in Assemble.c. */ -std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) -{ +std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { std::string buf; int i; const char *fmt_end = &fmt[strlen(fmt)]; @@ -255,8 +244,7 @@ std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned cha } // FIXME: need to redo resource maps for MIPS - fix this at that time -void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix) -{ +void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix) { char buf[256]; buf[0] = 0; @@ -306,8 +294,7 @@ void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *pre * machinery is in place, always spill lr. */ -void MipsMir2Lir::AdjustSpillMask() -{ +void MipsMir2Lir::AdjustSpillMask() { core_spill_mask_ |= (1 << r_RA); num_core_spills_++; } @@ -318,13 +305,11 @@ void MipsMir2Lir::AdjustSpillMask() * include any holes in the mask. Associate holes with * Dalvik register INVALID_VREG (0xFFFFU). */ -void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg) -{ +void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg) { LOG(FATAL) << "No support yet for promoted FP regs"; } -void MipsMir2Lir::FlushRegWide(int reg1, int reg2) -{ +void MipsMir2Lir::FlushRegWide(int reg1, int reg2) { RegisterInfo* info1 = GetRegInfo(reg1); RegisterInfo* info2 = GetRegInfo(reg2); DCHECK(info1 && info2 && info1->pair && info2->pair && @@ -345,8 +330,7 @@ void MipsMir2Lir::FlushRegWide(int reg1, int reg2) } } -void MipsMir2Lir::FlushReg(int reg) -{ +void MipsMir2Lir::FlushReg(int reg) { RegisterInfo* info = GetRegInfo(reg); if (info->live && info->dirty) { info->dirty = false; @@ -361,8 +345,7 @@ bool MipsMir2Lir::IsFpReg(int reg) { } /* Clobber all regs that might be used by an external C call */ -void MipsMir2Lir::ClobberCalleeSave() -{ +void MipsMir2Lir::ClobberCalleeSave() { Clobber(r_ZERO); Clobber(r_AT); Clobber(r_V0); @@ -404,29 +387,25 @@ void MipsMir2Lir::ClobberCalleeSave() Clobber(r_F15); } -RegLocation MipsMir2Lir::GetReturnWideAlt() -{ +RegLocation MipsMir2Lir::GetReturnWideAlt() { UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS"; RegLocation res = LocCReturnWide(); return res; } -RegLocation MipsMir2Lir::GetReturnAlt() -{ +RegLocation MipsMir2Lir::GetReturnAlt() { UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS"; RegLocation res = LocCReturn(); return res; } -MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg) -{ +MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg) { return MIPS_FPREG(reg) ? ®_pool_->FPRegs[reg & MIPS_FP_REG_MASK] : ®_pool_->core_regs[reg]; } /* To be used when explicitly managing register use */ -void MipsMir2Lir::LockCallTemps() -{ +void MipsMir2Lir::LockCallTemps() { LockTemp(rMIPS_ARG0); LockTemp(rMIPS_ARG1); LockTemp(rMIPS_ARG2); @@ -434,16 +413,14 @@ void MipsMir2Lir::LockCallTemps() } /* To be used when explicitly managing register use */ -void MipsMir2Lir::FreeCallTemps() -{ +void MipsMir2Lir::FreeCallTemps() { FreeTemp(rMIPS_ARG0); FreeTemp(rMIPS_ARG1); FreeTemp(rMIPS_ARG2); FreeTemp(rMIPS_ARG3); } -void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) -{ +void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #if ANDROID_SMP != 0 NewLIR1(kMipsSync, 0 /* Only stype currently supported */); #endif @@ -454,8 +431,7 @@ void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) * high reg in next byte. */ int MipsMir2Lir::AllocTypedTempPair(bool fp_hint, - int reg_class) -{ + int reg_class) { int high_reg; int low_reg; int res = 0; @@ -473,17 +449,14 @@ int MipsMir2Lir::AllocTypedTempPair(bool fp_hint, return res; } -int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) -{ - if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) -{ +int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { + if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { return AllocTempFloat(); } return AllocTemp(); } -void MipsMir2Lir::CompilerInitializeRegAlloc() -{ +void MipsMir2Lir::CompilerInitializeRegAlloc() { int num_regs = sizeof(core_regs)/sizeof(*core_regs); int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs); int num_temps = sizeof(core_temps)/sizeof(*core_temps); @@ -518,8 +491,7 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() } } -void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) -{ +void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { // No overlap, free both @@ -533,14 +505,12 @@ void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) * ensure that all branch instructions can be restarted if * there is a trap in the shadow. Allocate a temp register. */ -int MipsMir2Lir::LoadHelper(int offset) -{ +int MipsMir2Lir::LoadHelper(int offset) { LoadWordDisp(rMIPS_SELF, offset, r_T9); return r_T9; } -void MipsMir2Lir::SpillCoreRegs() -{ +void MipsMir2Lir::SpillCoreRegs() { if (num_core_spills_ == 0) { return; } @@ -555,8 +525,7 @@ void MipsMir2Lir::SpillCoreRegs() } } -void MipsMir2Lir::UnSpillCoreRegs() -{ +void MipsMir2Lir::UnSpillCoreRegs() { if (num_core_spills_ == 0) { return; } @@ -571,8 +540,7 @@ void MipsMir2Lir::UnSpillCoreRegs() OpRegImm(kOpAdd, rMIPS_SP, frame_size_); } -bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) -{ +bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) { return (lir->opcode == kMipsB); } @@ -592,18 +560,15 @@ Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, return new MipsMir2Lir(cu, mir_graph, arena); } -uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) -{ +uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) { return MipsMir2Lir::EncodingMap[opcode].flags; } -const char* MipsMir2Lir::GetTargetInstName(int opcode) -{ +const char* MipsMir2Lir::GetTargetInstName(int opcode) { return MipsMir2Lir::EncodingMap[opcode].name; } -const char* MipsMir2Lir::GetTargetInstFmt(int opcode) -{ +const char* MipsMir2Lir::GetTargetInstFmt(int opcode) { return MipsMir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 8daafc8d96..089764ff45 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -21,8 +21,7 @@ namespace art { /* This file contains codegen for the MIPS32 ISA. */ -LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) -{ +LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) { int opcode; /* must be both DOUBLE or both not DOUBLE */ DCHECK_EQ(MIPS_DOUBLEREG(r_dest),MIPS_DOUBLEREG(r_src)); @@ -51,23 +50,19 @@ LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) return res; } -bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) -{ +bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) { return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768))); } -bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) -{ +bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) { return false; // TUNING } -bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) -{ +bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) { return false; // TUNING } -bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) -{ +bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) { return false; // TUNING } @@ -80,8 +75,7 @@ bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) * 1) r_dest is freshly returned from AllocTemp or * 2) The codegen is under fixed register usage */ -LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) -{ +LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) { LIR *res; int r_dest_save = r_dest; @@ -112,15 +106,13 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) return res; } -LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) -{ +LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) { LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ ); res->target = target; return res; } -LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) -{ +LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) { MipsOpCode opcode = kMipsNop; switch (op) { case kOpBlx: @@ -136,8 +128,7 @@ LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) } LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1, - int value) -{ + int value) { LIR *res; bool neg = (value < 0); int abs_value = (neg) ? -value : value; @@ -167,8 +158,7 @@ LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1, return res; } -LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) -{ +LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) { MipsOpCode opcode = kMipsNop; switch (op) { case kOpAdd: @@ -209,8 +199,7 @@ LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) return NewLIR3(opcode, r_dest, r_src1, r_src2); } -LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) -{ +LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { LIR *res; MipsOpCode opcode = kMipsNop; bool short_form = true; @@ -298,8 +287,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) return res; } -LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) -{ +LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { MipsOpCode opcode = kMipsNop; LIR *res; switch (op) { @@ -342,8 +330,7 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) return NewLIR2(opcode, r_dest_src1, r_src2); } -LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) -{ +LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { LIR *res; res = LoadConstantNoClobber(r_dest_lo, Low32Bits(value)); LoadConstantNoClobber(r_dest_hi, High32Bits(value)); @@ -352,8 +339,7 @@ LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) /* Load value from base + scaled index. */ LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, - int scale, OpSize size) -{ + int scale, OpSize size) { LIR *first = NULL; LIR *res; MipsOpCode opcode = kMipsNop; @@ -405,8 +391,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, /* store value base base + scaled index. */ LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, - int scale, OpSize size) -{ + int scale, OpSize size) { LIR *first = NULL; MipsOpCode opcode = kMipsNop; int r_new_index = r_index; @@ -452,7 +437,7 @@ LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, } LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, - int r_dest_hi, OpSize size, int s_reg) + int r_dest_hi, OpSize size, int s_reg) { /* * Load value from base + displacement. Optionally perform null check * on base (which must have an associated s_reg and MIR). If not @@ -461,7 +446,6 @@ LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, * and base and dest are the same, spill some other register to * rlp and then restore. */ -{ LIR *res; LIR *load = NULL; LIR *load2 = NULL; @@ -551,21 +535,18 @@ LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, } LIR* MipsMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest, - OpSize size, int s_reg) -{ + OpSize size, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg); } LIR* MipsMir2Lir::LoadBaseDispWide(int rBase, int displacement, - int r_dest_lo, int r_dest_hi, int s_reg) -{ + int r_dest_lo, int r_dest_hi, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg); } LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement, - int r_src, int r_src_hi, OpSize size) -{ + int r_src, int r_src_hi, OpSize size) { LIR *res; LIR *store = NULL; LIR *store2 = NULL; @@ -647,52 +628,44 @@ LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement, } LIR* MipsMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src, - OpSize size) -{ + OpSize size) { return StoreBaseDispBody(rBase, displacement, r_src, -1, size); } LIR* MipsMir2Lir::StoreBaseDispWide(int rBase, int displacement, - int r_src_lo, int r_src_hi) -{ + int r_src_lo, int r_src_hi) { return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong); } -LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) -{ +LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) { LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS"; return NULL; } -LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) -{ +LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) { LOG(FATAL) << "Unexpected use of OpMem for MIPS"; return NULL; } LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement, - int r_src, int r_src_hi, OpSize size, int s_reg) -{ + int r_src, int r_src_hi, OpSize size, int s_reg) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS"; return NULL; } LIR* MipsMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, - int offset) -{ + int offset) { LOG(FATAL) << "Unexpected use of OpRegMem for MIPS"; return NULL; } LIR* MipsMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, - int r_dest, int r_dest_hi, OpSize size, int s_reg) -{ + int r_dest, int r_dest_hi, OpSize size, int s_reg) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS"; return NULL; } -LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) -{ +LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS"; return NULL; } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 4562482a06..b758fb538e 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -26,8 +26,7 @@ namespace art { * load/store utilities here, or target-dependent genXX() handlers * when necessary. */ -void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) -{ +void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) { RegLocation rl_src[3]; RegLocation rl_dest = mir_graph_->GetBadLoc(); RegLocation rl_result = mir_graph_->GetBadLoc(); @@ -659,8 +658,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list } // Process extended MIR instructions -void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) -{ +void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpCopy: { RegLocation rl_src = mir_graph_->GetSrc(mir, 0); @@ -692,8 +690,7 @@ void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) } // Handle the content in each basic block. -bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) -{ +bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { if (bb->block_type == kDead) return false; current_dalvik_offset_ = bb->start_offset; MIR* mir; @@ -787,8 +784,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) return false; } -void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) -{ +void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) { // Find the first DalvikByteCode block. int num_reachable_blocks = mir_graph_->GetNumReachableBlocks(); BasicBlock*bb = NULL; @@ -817,8 +813,7 @@ void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) GenSpecialCase(bb, mir, special_case); } -void Mir2Lir::MethodMIR2LIR() -{ +void Mir2Lir::MethodMIR2LIR() { // Hold the labels of each block. block_label_list_ = static_cast(arena_->NewMem(sizeof(LIR) * mir_graph_->GetNumBlocks(), true, diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index bec86c181e..abb687cb84 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -226,7 +226,7 @@ class Mir2Lir : public Backend { bool first_in_pair; }; - virtual ~Mir2Lir(){}; + virtual ~Mir2Lir() {}; int32_t s4FromSwitchData(const void* switch_data) { return *reinterpret_cast(switch_data); diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 8f43542098..4c91223687 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -27,8 +27,7 @@ namespace art { * not affect the "liveness" of a temp register, which will stay * live until it is either explicitly killed or reallocated. */ -void Mir2Lir::ResetRegPool() -{ +void Mir2Lir::ResetRegPool() { int i; for (i=0; i < reg_pool_->num_core_regs; i++) { if (reg_pool_->core_regs[i].is_temp) @@ -48,8 +47,7 @@ void Mir2Lir::ResetRegPool() * Set up temp & preserved register pools specialized by target. * Note: num_regs may be zero. */ -void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) -{ +void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) { int i; for (i=0; i < num; i++) { regs[i].reg = reg_nums[i]; @@ -62,8 +60,7 @@ void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) } } -void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) -{ +void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) { LOG(INFO) << "================================================"; for (int i = 0; i < num_regs; i++) { LOG(INFO) << StringPrintf( @@ -75,18 +72,15 @@ void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) LOG(INFO) << "================================================"; } -void Mir2Lir::DumpCoreRegPool() -{ +void Mir2Lir::DumpCoreRegPool() { DumpRegPool(reg_pool_->core_regs, reg_pool_->num_core_regs); } -void Mir2Lir::DumpFpRegPool() -{ +void Mir2Lir::DumpFpRegPool() { DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs); } -void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) -{ +void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) { int i; for (i=0; i< num_regs; i++) { if (p[i].s_reg == s_reg) { @@ -110,8 +104,7 @@ void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) * changes (for example: INT_TO_FLOAT v1, v1). Revisit when improved register allocation is * addressed. */ -void Mir2Lir::ClobberSReg(int s_reg) -{ +void Mir2Lir::ClobberSReg(int s_reg) { /* Reset live temp tracking sanity checker */ if (kIsDebugBuild) { if (s_reg == live_sreg_) { @@ -131,8 +124,7 @@ void Mir2Lir::ClobberSReg(int s_reg) * ssa name (above the last original Dalvik register). This function * maps SSA names to positions in the promotion_map array. */ -int Mir2Lir::SRegToPMap(int s_reg) -{ +int Mir2Lir::SRegToPMap(int s_reg) { DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs()); DCHECK_GE(s_reg, 0); int v_reg = mir_graph_->SRegToVReg(s_reg); @@ -146,8 +138,7 @@ int Mir2Lir::SRegToPMap(int s_reg) } } -void Mir2Lir::RecordCorePromotion(int reg, int s_reg) -{ +void Mir2Lir::RecordCorePromotion(int reg, int s_reg) { int p_map_idx = SRegToPMap(s_reg); int v_reg = mir_graph_->SRegToVReg(s_reg); GetRegInfo(reg)->in_use = true; @@ -160,8 +151,7 @@ void Mir2Lir::RecordCorePromotion(int reg, int s_reg) } /* Reserve a callee-save register. Return -1 if none available */ -int Mir2Lir::AllocPreservedCoreReg(int s_reg) -{ +int Mir2Lir::AllocPreservedCoreReg(int s_reg) { int res = -1; RegisterInfo* core_regs = reg_pool_->core_regs; for (int i = 0; i < reg_pool_->num_core_regs; i++) { @@ -174,8 +164,7 @@ int Mir2Lir::AllocPreservedCoreReg(int s_reg) return res; } -void Mir2Lir::RecordFpPromotion(int reg, int s_reg) -{ +void Mir2Lir::RecordFpPromotion(int reg, int s_reg) { int p_map_idx = SRegToPMap(s_reg); int v_reg = mir_graph_->SRegToVReg(s_reg); GetRegInfo(reg)->in_use = true; @@ -189,8 +178,7 @@ void Mir2Lir::RecordFpPromotion(int reg, int s_reg) * even/odd allocation, but go ahead and allocate anything if not * available. If nothing's available, return -1. */ -int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) -{ +int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) { int res = -1; RegisterInfo* FPRegs = reg_pool_->FPRegs; for (int i = 0; i < reg_pool_->num_fp_regs; i++) { @@ -212,8 +200,7 @@ int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) * allocate if we can't meet the requirements for the pair of * s_reg<=sX[even] & (s_reg+1)<= sX+1. */ -int Mir2Lir::AllocPreservedDouble(int s_reg) -{ +int Mir2Lir::AllocPreservedDouble(int s_reg) { int res = -1; // Assume failure int v_reg = mir_graph_->SRegToVReg(s_reg); int p_map_idx = SRegToPMap(s_reg); @@ -269,8 +256,7 @@ int Mir2Lir::AllocPreservedDouble(int s_reg) * single regs (but if can't still attempt to allocate a single, preferring * first to allocate an odd register. */ -int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) -{ +int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) { int res = -1; if (double_start) { res = AllocPreservedDouble(s_reg); @@ -284,8 +270,7 @@ int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) } int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, - bool required) -{ + bool required) { int i; int next = *next_temp; for (i=0; i< num_regs; i++) { @@ -323,8 +308,7 @@ int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, } //REDO: too many assumptions. -int Mir2Lir::AllocTempDouble() -{ +int Mir2Lir::AllocTempDouble() { RegisterInfo* p = reg_pool_->FPRegs; int num_regs = reg_pool_->num_fp_regs; /* Start looking at an even reg */ @@ -377,29 +361,25 @@ int Mir2Lir::AllocTempDouble() } /* Return a temp if one is available, -1 otherwise */ -int Mir2Lir::AllocFreeTemp() -{ +int Mir2Lir::AllocFreeTemp() { return AllocTempBody(reg_pool_->core_regs, reg_pool_->num_core_regs, ®_pool_->next_core_reg, true); } -int Mir2Lir::AllocTemp() -{ +int Mir2Lir::AllocTemp() { return AllocTempBody(reg_pool_->core_regs, reg_pool_->num_core_regs, ®_pool_->next_core_reg, true); } -int Mir2Lir::AllocTempFloat() -{ +int Mir2Lir::AllocTempFloat() { return AllocTempBody(reg_pool_->FPRegs, reg_pool_->num_fp_regs, ®_pool_->next_fp_reg, true); } -Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg) { int i; if (s_reg == -1) return NULL; @@ -413,8 +393,7 @@ Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int return NULL; } -Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) -{ +Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) { RegisterInfo* res = NULL; switch (reg_class) { case kAnyReg: @@ -437,8 +416,7 @@ Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) return res; } -void Mir2Lir::FreeTemp(int reg) -{ +void Mir2Lir::FreeTemp(int reg) { RegisterInfo* p = reg_pool_->core_regs; int num_regs = reg_pool_->num_core_regs; int i; @@ -465,8 +443,7 @@ void Mir2Lir::FreeTemp(int reg) LOG(FATAL) << "Tried to free a non-existant temp: r" << reg; } -Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) { RegisterInfo* p = reg_pool_->core_regs; int num_regs = reg_pool_->num_core_regs; int i; @@ -485,20 +462,17 @@ Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) return NULL; } -Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) { RegisterInfo* p = GetRegInfo(reg); return (p->is_temp) ? p : NULL; } -Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg) { RegisterInfo* p = GetRegInfo(reg); return (p->is_temp) ? NULL : p; } -bool Mir2Lir::IsDirty(int reg) -{ +bool Mir2Lir::IsDirty(int reg) { RegisterInfo* p = GetRegInfo(reg); return p->dirty; } @@ -508,8 +482,7 @@ bool Mir2Lir::IsDirty(int reg) * register. No check is made to see if the register was previously * allocated. Use with caution. */ -void Mir2Lir::LockTemp(int reg) -{ +void Mir2Lir::LockTemp(int reg) { RegisterInfo* p = reg_pool_->core_regs; int num_regs = reg_pool_->num_core_regs; int i; @@ -534,13 +507,11 @@ void Mir2Lir::LockTemp(int reg) LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg; } -void Mir2Lir::ResetDef(int reg) -{ +void Mir2Lir::ResetDef(int reg) { ResetDefBody(GetRegInfo(reg)); } -void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) -{ +void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) { if (start && finish) { LIR *p; DCHECK_EQ(s_reg1, s_reg2); @@ -557,8 +528,7 @@ void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) * on entry start points to the LIR prior to the beginning of the * sequence. */ -void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) -{ +void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) { DCHECK(!rl.wide); DCHECK(start && start->next); DCHECK(finish); @@ -572,8 +542,7 @@ void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) * on entry start points to the LIR prior to the beginning of the * sequence. */ -void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) -{ +void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) { DCHECK(rl.wide); DCHECK(start && start->next); DCHECK(finish); @@ -583,8 +552,7 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) p->def_end = finish; } -RegLocation Mir2Lir::WideToNarrow(RegLocation rl) -{ +RegLocation Mir2Lir::WideToNarrow(RegLocation rl) { DCHECK(rl.wide); if (rl.location == kLocPhysReg) { RegisterInfo* info_lo = GetRegInfo(rl.low_reg); @@ -604,8 +572,7 @@ RegLocation Mir2Lir::WideToNarrow(RegLocation rl) return rl; } -void Mir2Lir::ResetDefLoc(RegLocation rl) -{ +void Mir2Lir::ResetDefLoc(RegLocation rl) { DCHECK(!rl.wide); RegisterInfo* p = IsTemp(rl.low_reg); if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) { @@ -615,8 +582,7 @@ void Mir2Lir::ResetDefLoc(RegLocation rl) ResetDef(rl.low_reg); } -void Mir2Lir::ResetDefLocWide(RegLocation rl) -{ +void Mir2Lir::ResetDefLocWide(RegLocation rl) { DCHECK(rl.wide); RegisterInfo* p_low = IsTemp(rl.low_reg); RegisterInfo* p_high = IsTemp(rl.high_reg); @@ -631,8 +597,7 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) ResetDef(rl.high_reg); } -void Mir2Lir::ResetDefTracking() -{ +void Mir2Lir::ResetDefTracking() { int i; for (i=0; i< reg_pool_->num_core_regs; i++) { ResetDefBody(®_pool_->core_regs[i]); @@ -642,8 +607,7 @@ void Mir2Lir::ResetDefTracking() } } -void Mir2Lir::ClobberAllRegs() -{ +void Mir2Lir::ClobberAllRegs() { int i; for (i=0; i< reg_pool_->num_core_regs; i++) { ClobberBody(®_pool_->core_regs[i]); @@ -654,8 +618,7 @@ void Mir2Lir::ClobberAllRegs() } // Make sure nothing is live and dirty -void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) -{ +void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) { int i; for (i=0; i < num_regs; i++) { if (info[i].live && info[i].dirty) { @@ -668,8 +631,7 @@ void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) } } -void Mir2Lir::FlushAllRegs() -{ +void Mir2Lir::FlushAllRegs() { FlushAllRegsBody(reg_pool_->core_regs, reg_pool_->num_core_regs); FlushAllRegsBody(reg_pool_->FPRegs, @@ -679,8 +641,7 @@ void Mir2Lir::FlushAllRegs() //TUNING: rewrite all of this reg stuff. Probably use an attribute table -bool Mir2Lir::RegClassMatches(int reg_class, int reg) -{ +bool Mir2Lir::RegClassMatches(int reg_class, int reg) { if (reg_class == kAnyReg) { return true; } else if (reg_class == kCoreReg) { @@ -690,8 +651,7 @@ bool Mir2Lir::RegClassMatches(int reg_class, int reg) } } -void Mir2Lir::MarkLive(int reg, int s_reg) -{ +void Mir2Lir::MarkLive(int reg, int s_reg) { RegisterInfo* info = GetRegInfo(reg); if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) { return; /* already live */ @@ -708,20 +668,17 @@ void Mir2Lir::MarkLive(int reg, int s_reg) info->s_reg = s_reg; } -void Mir2Lir::MarkTemp(int reg) -{ +void Mir2Lir::MarkTemp(int reg) { RegisterInfo* info = GetRegInfo(reg); info->is_temp = true; } -void Mir2Lir::UnmarkTemp(int reg) -{ +void Mir2Lir::UnmarkTemp(int reg) { RegisterInfo* info = GetRegInfo(reg); info->is_temp = false; } -void Mir2Lir::MarkPair(int low_reg, int high_reg) -{ +void Mir2Lir::MarkPair(int low_reg, int high_reg) { RegisterInfo* info_lo = GetRegInfo(low_reg); RegisterInfo* info_hi = GetRegInfo(high_reg); info_lo->pair = info_hi->pair = true; @@ -729,8 +686,7 @@ void Mir2Lir::MarkPair(int low_reg, int high_reg) info_hi->partner = low_reg; } -void Mir2Lir::MarkClean(RegLocation loc) -{ +void Mir2Lir::MarkClean(RegLocation loc) { RegisterInfo* info = GetRegInfo(loc.low_reg); info->dirty = false; if (loc.wide) { @@ -739,8 +695,7 @@ void Mir2Lir::MarkClean(RegLocation loc) } } -void Mir2Lir::MarkDirty(RegLocation loc) -{ +void Mir2Lir::MarkDirty(RegLocation loc) { if (loc.home) { // If already home, can't be dirty return; @@ -753,14 +708,12 @@ void Mir2Lir::MarkDirty(RegLocation loc) } } -void Mir2Lir::MarkInUse(int reg) -{ +void Mir2Lir::MarkInUse(int reg) { RegisterInfo* info = GetRegInfo(reg); info->in_use = true; } -void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) -{ +void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) { RegisterInfo* new_info = GetRegInfo(new_reg); RegisterInfo* old_info = GetRegInfo(old_reg); // Target temp status must not change @@ -771,8 +724,7 @@ void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) new_info->reg = new_reg; } -bool Mir2Lir::CheckCorePoolSanity() -{ +bool Mir2Lir::CheckCorePoolSanity() { for (static int i = 0; i < reg_pool_->num_core_regs; i++) { if (reg_pool_->core_regs[i].pair) { static int my_reg = reg_pool_->core_regs[i].reg; @@ -808,8 +760,7 @@ bool Mir2Lir::CheckCorePoolSanity() * if it's worthwhile trying to be more clever here. */ -RegLocation Mir2Lir::UpdateLoc(RegLocation loc) -{ +RegLocation Mir2Lir::UpdateLoc(RegLocation loc) { DCHECK(!loc.wide); DCHECK(CheckCorePoolSanity()); if (loc.location != kLocPhysReg) { @@ -832,8 +783,7 @@ RegLocation Mir2Lir::UpdateLoc(RegLocation loc) } /* see comments for update_loc */ -RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) -{ +RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) { DCHECK(loc.wide); DCHECK(CheckCorePoolSanity()); if (loc.location != kLocPhysReg) { @@ -886,16 +836,14 @@ RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) /* For use in cases we don't know (or care) width */ -RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) -{ +RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) { if (loc.wide) return UpdateLocWide(loc); else return UpdateLoc(loc); } -RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) -{ +RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { DCHECK(loc.wide); int new_regs; int low_reg; @@ -942,8 +890,7 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) return loc; } -RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) -{ +RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { int new_reg; if (loc.wide) @@ -992,15 +939,13 @@ void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts) { } /* qsort callback function, sort descending */ -static int SortCounts(const void *val1, const void *val2) -{ +static int SortCounts(const void *val1, const void *val2) { const Mir2Lir::RefCounts* op1 = reinterpret_cast(val1); const Mir2Lir::RefCounts* op2 = reinterpret_cast(val2); return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1); } -void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) -{ +void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) { LOG(INFO) << msg; for (int i = 0; i < size; i++) { LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count; @@ -1011,8 +956,7 @@ void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) * Note: some portions of this code required even if the kPromoteRegs * optimization is disabled. */ -void Mir2Lir::DoPromotion() -{ +void Mir2Lir::DoPromotion() { int reg_bias = cu_->num_compiler_temps + 1; int dalvik_regs = cu_->num_dalvik_registers; int num_regs = dalvik_regs + reg_bias; @@ -1158,21 +1102,18 @@ void Mir2Lir::DoPromotion() } /* Returns sp-relative offset in bytes for a VReg */ -int Mir2Lir::VRegOffset(int v_reg) -{ +int Mir2Lir::VRegOffset(int v_reg) { return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_, fp_spill_mask_, frame_size_, v_reg); } /* Returns sp-relative offset in bytes for a SReg */ -int Mir2Lir::SRegOffset(int s_reg) -{ +int Mir2Lir::SRegOffset(int s_reg) { return VRegOffset(mir_graph_->SRegToVReg(s_reg)); } /* Mark register usage state and return long retloc */ -RegLocation Mir2Lir::GetReturnWide(bool is_double) -{ +RegLocation Mir2Lir::GetReturnWide(bool is_double) { RegLocation gpr_res = LocCReturnWide(); RegLocation fpr_res = LocCReturnDouble(); RegLocation res = is_double ? fpr_res : gpr_res; @@ -1184,8 +1125,7 @@ RegLocation Mir2Lir::GetReturnWide(bool is_double) return res; } -RegLocation Mir2Lir::GetReturn(bool is_float) -{ +RegLocation Mir2Lir::GetReturn(bool is_float) { RegLocation gpr_res = LocCReturn(); RegLocation fpr_res = LocCReturnFloat(); RegLocation res = is_float ? fpr_res : gpr_res; @@ -1198,8 +1138,7 @@ RegLocation Mir2Lir::GetReturn(bool is_float) return res; } -void Mir2Lir::SimpleRegAlloc() -{ +void Mir2Lir::SimpleRegAlloc() { DoPromotion(); if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) { diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index d60be72c31..1aeb39ae4b 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -23,8 +23,7 @@ namespace art { void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - SpecialCaseHandler special_case) -{ + SpecialCaseHandler special_case) { // TODO } @@ -33,8 +32,7 @@ void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, * pairs. */ void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpSparseSwitchTable(table); @@ -69,8 +67,7 @@ void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, * done: */ void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpPackedSwitchTable(table); @@ -130,8 +127,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, * * Total size is 4+(width * size + 1)/2 16-bit code units. */ -void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) -{ +void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; // Add the table to the list - we'll process it later FillArrayData *tab_rec = @@ -156,8 +152,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) rX86_ARG1, true); } -void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) -{ +void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rCX); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -174,8 +169,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) branch->target = NewLIR0(kPseudoTargetLabel); } -void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) -{ +void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rAX); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -195,8 +189,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) branch2->target = NewLIR0(kPseudoTargetLabel); } -void X86Mir2Lir::GenMoveException(RegLocation rl_dest) -{ +void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset); @@ -207,8 +200,7 @@ void X86Mir2Lir::GenMoveException(RegLocation rl_dest) /* * Mark garbage collection card. Skip if the value we're storing is null. */ -void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) -{ +void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { int reg_card_base = AllocTemp(); int reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); @@ -222,8 +214,7 @@ void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) FreeTemp(reg_card_no); } -void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) -{ +void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { /* * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register * allocation mechanism know so it doesn't try to use any of them when diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 906b4cc759..f2ecf6c959 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -349,8 +349,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, OpCondBranch(ccode, taken); } -void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -358,8 +357,7 @@ void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) StoreValue(rl_dest, rl_result); } -void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 97d9d2deed..3be24df565 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -27,8 +27,7 @@ namespace art { * Perform register memory operation. */ LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, - int reg1, int base, int offset, ThrowKind kind) -{ + int reg1, int base, int offset, ThrowKind kind) { LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, base, offset); OpRegMem(kOpCmp, reg1, base, offset); @@ -45,8 +44,7 @@ LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, * x > y return 1 */ void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { FlushAllRegs(); LockCallTemps(); // Prepare for explicit register usage LoadValueDirectWideFixed(rl_src1, r0, r1); @@ -88,8 +86,7 @@ X86ConditionCode X86ConditionEncoding(ConditionCode cond) { } LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, - LIR* target) -{ + LIR* target) { NewLIR2(kX86Cmp32RR, src1, src2); X86ConditionCode cc = X86ConditionEncoding(cond); LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , @@ -99,8 +96,7 @@ LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, } LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, - int check_value, LIR* target) -{ + int check_value, LIR* target) { if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) { // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode NewLIR2(kX86Test32RR, reg, reg); @@ -113,8 +109,7 @@ LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, return branch; } -LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) -{ +LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) { if (X86_FPREG(r_dest) || X86_FPREG(r_src)) return OpFpRegCopy(r_dest, r_src); LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR, @@ -125,16 +120,14 @@ LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) return res; } -LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src) -{ +LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src) { LIR *res = OpRegCopyNoInsert(r_dest, r_src); AppendLIR(res); return res; } void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, - int src_lo, int src_hi) -{ + int src_lo, int src_hi) { bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi); bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi); assert(X86_FPREG(src_lo) == X86_FPREG(src_hi)); @@ -168,8 +161,7 @@ void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, } } -void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) -{ +void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect"; } @@ -213,21 +205,18 @@ void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { } RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo, - int lit, bool is_div) -{ + int lit, bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRemLit for x86"; return rl_dest; } RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo, - int reg_hi, bool is_div) -{ + int reg_hi, bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRem for x86"; return rl_dest; } -bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) -{ +bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { DCHECK_EQ(cu_->instruction_set, kX86); RegLocation rl_src1 = info->args[0]; RegLocation rl_src2 = info->args[1]; @@ -247,13 +236,11 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) return true; } -void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) -{ +void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset); } -void X86Mir2Lir::OpTlsCmp(int offset, int val) -{ +void X86Mir2Lir::OpTlsCmp(int offset, int val) { NewLIR2(kX86Cmp16TI8, offset, val); } @@ -267,22 +254,19 @@ LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) { return NULL; } -LIR* X86Mir2Lir::OpVldm(int rBase, int count) -{ +LIR* X86Mir2Lir::OpVldm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVldm for x86"; return NULL; } -LIR* X86Mir2Lir::OpVstm(int rBase, int count) -{ +LIR* X86Mir2Lir::OpVstm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVstm for x86"; return NULL; } void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) -{ + int first_bit, int second_bit) { int t_reg = AllocTemp(); OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit); OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg); @@ -292,8 +276,7 @@ void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, } } -void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) -{ +void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { int t_reg = AllocTemp(); OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi); GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero); @@ -301,40 +284,34 @@ void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) } // Test suspend flag, return target of taken suspend branch -LIR* X86Mir2Lir::OpTestSuspend(LIR* target) -{ +LIR* X86Mir2Lir::OpTestSuspend(LIR* target) { OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0); return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target); } // Decrement register and branch on condition -LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) -{ +LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) { OpRegImm(kOpSub, reg, 1); return OpCmpImmBranch(c_code, reg, 0, target); } bool X86Mir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { LOG(FATAL) << "Unexpected use of smallLiteralDive in x86"; return false; } -LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) -{ +LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) { LOG(FATAL) << "Unexpected use of OpIT in x86"; return NULL; } void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenX86Long for x86"; } void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -350,8 +327,7 @@ void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, } void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -367,8 +343,7 @@ void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, } void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -384,8 +359,7 @@ void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, } void X86Mir2Lir::GenOrLong(RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -401,8 +375,7 @@ void X86Mir2Lir::GenOrLong(RegLocation rl_dest, } void X86Mir2Lir::GenXorLong(RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -417,8 +390,7 @@ void X86Mir2Lir::GenXorLong(RegLocation rl_dest, StoreValueWide(rl_dest, rl_result); } -void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) -{ +void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { FlushAllRegs(); LockCallTemps(); // Prepare for explicit register usage LoadValueDirectWideFixed(rl_src, r0, r1); @@ -447,8 +419,7 @@ void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) { * Generate array load */ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_dest, int scale) -{ + RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -495,8 +466,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, * */ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -539,8 +509,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, * */ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); @@ -590,15 +559,13 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, } void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift) -{ + RegLocation rl_src1, RegLocation rl_shift) { // Default implementation is just to ignore the constant case. GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift); } void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { // Default - bail to non-const handler. GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); } diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index c421ef3f11..5b64a6b5c3 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -45,26 +45,22 @@ namespace art { #endif }; -RegLocation X86Mir2Lir::LocCReturn() -{ +RegLocation X86Mir2Lir::LocCReturn() { RegLocation res = X86_LOC_C_RETURN; return res; } -RegLocation X86Mir2Lir::LocCReturnWide() -{ +RegLocation X86Mir2Lir::LocCReturnWide() { RegLocation res = X86_LOC_C_RETURN_WIDE; return res; } -RegLocation X86Mir2Lir::LocCReturnFloat() -{ +RegLocation X86Mir2Lir::LocCReturnFloat() { RegLocation res = X86_LOC_C_RETURN_FLOAT; return res; } -RegLocation X86Mir2Lir::LocCReturnDouble() -{ +RegLocation X86Mir2Lir::LocCReturnDouble() { RegLocation res = X86_LOC_C_RETURN_DOUBLE; return res; } @@ -95,28 +91,24 @@ int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { } // Create a double from a pair of singles. -int X86Mir2Lir::S2d(int low_reg, int high_reg) -{ +int X86Mir2Lir::S2d(int low_reg, int high_reg) { return X86_S2D(low_reg, high_reg); } // Return mask to strip off fp reg flags and bias. -uint32_t X86Mir2Lir::FpRegMask() -{ +uint32_t X86Mir2Lir::FpRegMask() { return X86_FP_REG_MASK; } // True if both regs single, both core or both double. -bool X86Mir2Lir::SameRegType(int reg1, int reg2) -{ +bool X86Mir2Lir::SameRegType(int reg1, int reg2) { return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2)); } /* * Decode the register id. */ -uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) -{ +uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) { uint64_t seed; int shift; int reg_id; @@ -131,8 +123,7 @@ uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) return (seed << shift); } -uint64_t X86Mir2Lir::GetPCUseDefEncoding() -{ +uint64_t X86Mir2Lir::GetPCUseDefEncoding() { /* * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be * able to clean up some of the x86/Arm_Mips differences @@ -141,8 +132,7 @@ uint64_t X86Mir2Lir::GetPCUseDefEncoding() return 0ULL; } -void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir) -{ +void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir) { DCHECK_EQ(cu_->instruction_set, kX86); // X86-specific resource map setup here. @@ -263,8 +253,7 @@ std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char return buf; } -void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) -{ +void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) { char buf[256]; buf[0] = 0; @@ -317,16 +306,14 @@ void X86Mir2Lir::AdjustSpillMask() { * include any holes in the mask. Associate holes with * Dalvik register INVALID_VREG (0xFFFFU). */ -void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) -{ +void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) { UNIMPLEMENTED(WARNING) << "MarkPreservedSingle"; #if 0 LOG(FATAL) << "No support yet for promoted FP regs"; #endif } -void X86Mir2Lir::FlushRegWide(int reg1, int reg2) -{ +void X86Mir2Lir::FlushRegWide(int reg1, int reg2) { RegisterInfo* info1 = GetRegInfo(reg1); RegisterInfo* info2 = GetRegInfo(reg2); DCHECK(info1 && info2 && info1->pair && info2->pair && @@ -347,8 +334,7 @@ void X86Mir2Lir::FlushRegWide(int reg1, int reg2) } } -void X86Mir2Lir::FlushReg(int reg) -{ +void X86Mir2Lir::FlushReg(int reg) { RegisterInfo* info = GetRegInfo(reg); if (info->live && info->dirty) { info->dirty = false; @@ -363,8 +349,7 @@ bool X86Mir2Lir::IsFpReg(int reg) { } /* Clobber all regs that might be used by an external C call */ -void X86Mir2Lir::ClobberCalleeSave() -{ +void X86Mir2Lir::ClobberCalleeSave() { Clobber(rAX); Clobber(rCX); Clobber(rDX); @@ -382,8 +367,7 @@ RegLocation X86Mir2Lir::GetReturnWideAlt() { return res; } -RegLocation X86Mir2Lir::GetReturnAlt() -{ +RegLocation X86Mir2Lir::GetReturnAlt() { RegLocation res = LocCReturn(); res.low_reg = rDX; Clobber(rDX); @@ -391,15 +375,13 @@ RegLocation X86Mir2Lir::GetReturnAlt() return res; } -X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg) -{ +X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg) { return X86_FPREG(reg) ? ®_pool_->FPRegs[reg & X86_FP_REG_MASK] : ®_pool_->core_regs[reg]; } /* To be used when explicitly managing register use */ -void X86Mir2Lir::LockCallTemps() -{ +void X86Mir2Lir::LockCallTemps() { LockTemp(rX86_ARG0); LockTemp(rX86_ARG1); LockTemp(rX86_ARG2); @@ -407,16 +389,14 @@ void X86Mir2Lir::LockCallTemps() } /* To be used when explicitly managing register use */ -void X86Mir2Lir::FreeCallTemps() -{ +void X86Mir2Lir::FreeCallTemps() { FreeTemp(rX86_ARG0); FreeTemp(rX86_ARG1); FreeTemp(rX86_ARG2); FreeTemp(rX86_ARG3); } -void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) -{ +void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #if ANDROID_SMP != 0 // TODO: optimize fences NewLIR0(kX86Mfence); @@ -427,8 +407,7 @@ void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) * high reg in next byte. */ int X86Mir2Lir::AllocTypedTempPair(bool fp_hint, - int reg_class) -{ + int reg_class) { int high_reg; int low_reg; int res = 0; @@ -485,8 +464,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() { } void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, - RegLocation rl_free) -{ + RegLocation rl_free) { if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { // No overlap, free both @@ -525,8 +503,7 @@ void X86Mir2Lir::UnSpillCoreRegs() { } } -bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) -{ +bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); } @@ -547,24 +524,20 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, } // Not used in x86 -int X86Mir2Lir::LoadHelper(int offset) -{ +int X86Mir2Lir::LoadHelper(int offset) { LOG(FATAL) << "Unexpected use of LoadHelper in x86"; return INVALID_REG; } -uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) -{ +uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { return X86Mir2Lir::EncodingMap[opcode].flags; } -const char* X86Mir2Lir::GetTargetInstName(int opcode) -{ +const char* X86Mir2Lir::GetTargetInstName(int opcode) { return X86Mir2Lir::EncodingMap[opcode].name; } -const char* X86Mir2Lir::GetTargetInstFmt(int opcode) -{ +const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { return X86Mir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index fb07ff1e22..6376e3b87a 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -22,8 +22,7 @@ namespace art { /* This file contains codegen for the X86 ISA */ -LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) -{ +LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) { int opcode; /* must be both DOUBLE or both not DOUBLE */ DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src)); @@ -49,23 +48,19 @@ LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) return res; } -bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) -{ +bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { return true; } -bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) -{ +bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { return false; } -bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) -{ +bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { return true; } -bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) -{ +bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) { return false; // TUNING } @@ -78,8 +73,7 @@ bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) * 1) r_dest is freshly returned from AllocTemp or * 2) The codegen is under fixed register usage */ -LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) -{ +LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) { int r_dest_save = r_dest; if (X86_FPREG(r_dest)) { if (value == 0) { @@ -105,23 +99,20 @@ LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) return res; } -LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) -{ +LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ ); res->target = target; return res; } -LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) -{ +LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, X86ConditionEncoding(cc)); branch->target = target; return branch; } -LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) -{ +LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpNeg: opcode = kX86Neg32R; break; @@ -133,8 +124,7 @@ LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) return NewLIR1(opcode, r_dest_src); } -LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) -{ +LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { X86OpCode opcode = kX86Bkpt; bool byte_imm = IS_SIMM8(value); DCHECK(!X86_FPREG(r_dest_src1)); @@ -160,8 +150,7 @@ LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) return NewLIR2(opcode, r_dest_src1, value); } -LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) -{ +LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { X86OpCode opcode = kX86Nop; bool src2_must_be_cx = false; switch (op) { @@ -207,8 +196,7 @@ LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) } LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, - int offset) -{ + int offset) { X86OpCode opcode = kX86Nop; switch (op) { // X86 binary opcodes @@ -231,8 +219,7 @@ LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, } LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, - int r_src2) -{ + int r_src2) { if (r_dest != r_src1 && r_dest != r_src2) { if (op == kOpAdd) { // lea special case, except can't encode rbp as base if (r_src1 == r_src2) { @@ -280,8 +267,7 @@ LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, } LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src, - int value) -{ + int value) { if (op == kOpMul) { X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI; return NewLIR3(opcode, r_dest, r_src, value); @@ -306,8 +292,7 @@ LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src, return OpRegImm(op, r_dest, value); } -LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) -{ +LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpBlx: opcode = kX86CallT; break; @@ -318,8 +303,7 @@ LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) return NewLIR1(opcode, thread_offset); } -LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) -{ +LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpBlx: opcode = kX86CallM; break; @@ -330,8 +314,7 @@ LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) return NewLIR2(opcode, rBase, disp); } -LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) -{ +LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); LIR *res; @@ -558,23 +541,20 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, /* store value base base + scaled index. */ LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, - int scale, OpSize size) -{ + int scale, OpSize size) { return StoreBaseIndexedDisp(rBase, r_index, scale, 0, r_src, INVALID_REG, size, INVALID_SREG); } LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement, - int r_src, OpSize size) -{ + int r_src, OpSize size) { return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, r_src, INVALID_REG, size, INVALID_SREG); } LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement, - int r_src_lo, int r_src_hi) -{ + int r_src_lo, int r_src_hi) { return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, r_src_lo, r_src_hi, kLong, INVALID_SREG); } diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index 41820720d8..ccd2454a49 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -37,8 +37,7 @@ BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) { return bb; } -BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) -{ +BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) { BasicBlock* res = NeedsVisit(bb->fall_through); if (res == NULL) { res = NeedsVisit(bb->taken); @@ -57,15 +56,13 @@ BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) return res; } -void MIRGraph::MarkPreOrder(BasicBlock* block) -{ +void MIRGraph::MarkPreOrder(BasicBlock* block) { block->visited = true; /* Enqueue the pre_order block id */ dfs_order_->Insert(block->id); } -void MIRGraph::RecordDFSOrders(BasicBlock* block) -{ +void MIRGraph::RecordDFSOrders(BasicBlock* block) { std::vector succ; MarkPreOrder(block); succ.push_back(block); @@ -84,8 +81,7 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) } /* Sort the blocks by the Depth-First-Search */ -void MIRGraph::ComputeDFSOrders() -{ +void MIRGraph::ComputeDFSOrders() { /* Initialize or reset the DFS pre_order list */ if (dfs_order_ == NULL) { dfs_order_ = new (arena_) GrowableArray(arena_, GetNumBlocks(), kGrowableArrayDfsOrder); @@ -115,8 +111,7 @@ void MIRGraph::ComputeDFSOrders() * Mark block bit on the per-Dalvik register vector to denote that Dalvik * register idx is defined in BasicBlock bb. */ -bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) -{ +bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) { if (bb->data_flow_info == NULL) return false; ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v); @@ -129,8 +124,7 @@ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) return true; } -void MIRGraph::ComputeDefBlockMatrix() -{ +void MIRGraph::ComputeDefBlockMatrix() { int num_registers = cu_->num_dalvik_registers; /* Allocate num_dalvik_registers bit vector pointers */ def_block_matrix_ = static_cast @@ -203,8 +197,7 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) { } void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb, - const BasicBlock* succ_bb) -{ + const BasicBlock* succ_bb) { /* * TODO - evaluate whether phi will ever need to be inserted into exit * blocks. @@ -217,8 +210,7 @@ void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb, } /* Worker function to compute the dominance frontier */ -bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) -{ +bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { /* Calculate DF_local */ if (bb->taken) { CheckForDominanceFrontier(bb, bb->taken); @@ -257,8 +249,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) } /* Worker function for initializing domination-related data structures */ -void MIRGraph::InitializeDominationInfo(BasicBlock* bb) -{ +void MIRGraph::InitializeDominationInfo(BasicBlock* bb) { int num_total_blocks = GetBasicBlockListCount(); if (bb->dominators == NULL ) { @@ -284,8 +275,7 @@ void MIRGraph::InitializeDominationInfo(BasicBlock* bb) * Given the ordering of i_dom_list, this common parent represents the * last element of the intersection of block1 and block2 dominators. */ -int MIRGraph::FindCommonParent(int block1, int block2) -{ +int MIRGraph::FindCommonParent(int block1, int block2) { while (block1 != block2) { while (block1 < block2) { block1 = i_dom_list_[block1]; @@ -300,8 +290,7 @@ int MIRGraph::FindCommonParent(int block1, int block2) } /* Worker function to compute each block's immediate dominator */ -bool MIRGraph::ComputeblockIDom(BasicBlock* bb) -{ +bool MIRGraph::ComputeblockIDom(BasicBlock* bb) { /* Special-case entry block */ if (bb == GetEntryBlock()) { return false; @@ -343,8 +332,7 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) } /* Worker function to compute each block's domintors */ -bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) -{ +bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) { if (bb == GetEntryBlock()) { bb->dominators->ClearAllBits(); } else { @@ -354,8 +342,7 @@ bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) return false; } -bool MIRGraph::SetDominators(BasicBlock* bb) -{ +bool MIRGraph::SetDominators(BasicBlock* bb) { if (bb != GetEntryBlock()) { int idom_dfs_idx = i_dom_list_[bb->dfs_id]; DCHECK_NE(idom_dfs_idx, NOTVISITED); @@ -369,8 +356,7 @@ bool MIRGraph::SetDominators(BasicBlock* bb) } /* Compute dominators, immediate dominator, and dominance fronter */ -void MIRGraph::ComputeDominators() -{ +void MIRGraph::ComputeDominators() { int num_reachable_blocks = num_reachable_blocks_; int num_total_blocks = GetBasicBlockListCount(); @@ -435,8 +421,7 @@ void MIRGraph::ComputeDominators() * This is probably not general enough to be placed in BitVector.[ch]. */ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, - const ArenaBitVector* src2) -{ + const ArenaBitVector* src2) { if (dest->GetStorageSize() != src1->GetStorageSize() || dest->GetStorageSize() != src2->GetStorageSize() || dest->IsExpandable() != src1->IsExpandable() || @@ -455,8 +440,7 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src * The calculated result is used for phi-node pruning - where we only need to * insert a phi node if the variable is live-in to the block. */ -bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) -{ +bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_; if (bb->data_flow_info == NULL) return false; @@ -487,8 +471,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) } /* Insert phi nodes to for each variable to the dominance frontiers */ -void MIRGraph::InsertPhiNodes() -{ +void MIRGraph::InsertPhiNodes() { int dalvik_reg; ArenaBitVector* phi_blocks = new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapPhi); @@ -569,8 +552,7 @@ void MIRGraph::InsertPhiNodes() * Worker function to insert phi-operands with latest SSA names from * predecessor blocks */ -bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) -{ +bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { MIR *mir; std::vector uses; std::vector incoming_arc; @@ -622,8 +604,7 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) return true; } -void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) -{ +void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { if (block->visited || block->hidden) return; block->visited = true; @@ -663,8 +644,7 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) } /* Perform SSA transformation for the whole method */ -void MIRGraph::SSATransformation() -{ +void MIRGraph::SSATransformation() { /* Compute the DFS order */ ComputeDFSOrders(); diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index adbda5c18f..8df6dd93e7 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -72,8 +72,7 @@ bool MIRGraph::SetHigh(int index, bool is_high) { * as it doesn't propagate. We're guaranteed at least one pass through * the cfg. */ -bool MIRGraph::InferTypeAndSize(BasicBlock* bb) -{ +bool MIRGraph::InferTypeAndSize(BasicBlock* bb) { MIR *mir; bool changed = false; // Did anything change? @@ -333,8 +332,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "}; -void MIRGraph::DumpRegLocTable(RegLocation* table, int count) -{ +void MIRGraph::DumpRegLocTable(RegLocation* table, int count) { //FIXME: Quick-specific. Move to Quick (and make a generic version for MIRGraph? Mir2Lir* cg = static_cast(cu_->cg.get()); if (cg != NULL) { @@ -374,8 +372,7 @@ static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, * allocation is done on the fly. We also do some initialization and * type inference here. */ -void MIRGraph::BuildRegLocations() -{ +void MIRGraph::BuildRegLocations() { int i; RegLocation* loc; diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 9e71dff464..d1d21b1d03 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -356,14 +356,14 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet jni_compiler_(NULL), compiler_enable_auto_elf_loading_(NULL), compiler_get_method_code_addr_(NULL), - support_boot_image_fixup_(true) -{ + support_boot_image_fixup_(true) { + CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key"); // TODO: more work needed to combine initializations and allow per-method backend selection typedef void (*InitCompilerContextFn)(CompilerDriver&); InitCompilerContextFn init_compiler_context; - if (compiler_backend_ == kPortable){ + if (compiler_backend_ == kPortable) { // Initialize compiler_context_ init_compiler_context = reinterpret_cast(ArtInitCompilerContext); compiler_ = reinterpret_cast(ArtCompileMethod); @@ -1411,10 +1411,7 @@ class ParallelCompilationManager { begin_(begin), end_(end), callback_(callback), - stripe_(stripe) - { - - } + stripe_(stripe) {} virtual void Run(Thread* self) { for (size_t i = begin_; i < end_; i += stripe_) { @@ -2095,7 +2092,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } if (!is_black_listed) { LOG(INFO) << "Initializing: " << descriptor; - if (StringPiece(descriptor) == "Ljava/lang/Void;"){ + if (StringPiece(descriptor) == "Ljava/lang/Void;") { // Hand initialize j.l.Void to avoid Dex file operations in un-started runtime. mirror::ObjectArray* fields = klass->GetSFields(); CHECK_EQ(fields->GetLength(), 1); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 8d32a915dd..7fd1a7cb10 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -108,7 +108,7 @@ bool ImageWriter::Write(const std::string& image_filename, return false; } #ifndef NDEBUG - { + { // NOLINT(whitespace/braces) ScopedObjectAccess soa(Thread::Current()); CheckNonImageClassesRemoved(); } diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 560a146052..4b6967faa0 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -68,7 +68,7 @@ class JniCompilerTest : public CommonTest { void SetUpForTest(bool direct, const char* method_name, const char* method_sig, void* native_fnptr) { // Initialize class loader and compile method when runtime not started. - if (!runtime_->IsStarted()){ + if (!runtime_->IsStarted()) { { ScopedObjectAccess soa(Thread::Current()); class_loader_ = LoadDex("MyClassNatives"); diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc index 28405f67d4..976aa8f50d 100644 --- a/compiler/llvm/runtime_support_builder.cc +++ b/compiler/llvm/runtime_support_builder.cc @@ -38,8 +38,7 @@ using namespace runtime_support; RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb) - : context_(context), module_(module), irb_(irb) -{ + : context_(context), module_(module), irb_(irb) { memset(target_runtime_support_func_, 0, sizeof(target_runtime_support_func_)); #define GET_RUNTIME_SUPPORT_FUNC_DECL(ID, NAME) \ do { \ diff --git a/jdwpspy/Common.h b/jdwpspy/Common.h index 33f1a670ea..30a49fba76 100644 --- a/jdwpspy/Common.h +++ b/jdwpspy/Common.h @@ -26,16 +26,14 @@ typedef uint64_t u8; /* * Get 1 byte. (Included to make the code more legible.) */ -INLINE u1 get1(unsigned const char* pSrc) -{ +INLINE u1 get1(unsigned const char* pSrc) { return *pSrc; } /* * Get 2 big-endian bytes. */ -INLINE u2 get2BE(unsigned char const* pSrc) -{ +INLINE u2 get2BE(unsigned char const* pSrc) { u2 result; result = *pSrc++ << 8; @@ -47,8 +45,7 @@ INLINE u2 get2BE(unsigned char const* pSrc) /* * Get 4 big-endian bytes. */ -INLINE u4 get4BE(unsigned char const* pSrc) -{ +INLINE u4 get4BE(unsigned char const* pSrc) { u4 result; result = *pSrc++ << 24; @@ -62,8 +59,7 @@ INLINE u4 get4BE(unsigned char const* pSrc) /* * Get 8 big-endian bytes. */ -INLINE u8 get8BE(unsigned char const* pSrc) -{ +INLINE u8 get8BE(unsigned char const* pSrc) { u8 result; result = (u8) *pSrc++ << 56; diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index fbec826af2..bb4b5c5dab 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -465,7 +465,7 @@ ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) : #if ART_USE_FUTEXES , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0) #endif -{ +{ // NOLINT(whitespace/braces) #if !ART_USE_FUTEXES CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL)); #endif diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 04979016e3..2a55e3138b 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -169,7 +169,7 @@ void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, msg.str().c_str()); } -void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...){ +void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) { va_list args; va_start(args, fmt); ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, fmt, &args); @@ -222,7 +222,7 @@ void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, msg.str().c_str()); } -void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...){ +void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...) { va_list args; va_start(args, fmt); ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args); diff --git a/runtime/compiled_method.cc b/runtime/compiled_method.cc index 757a324155..49706ae8eb 100644 --- a/runtime/compiled_method.cc +++ b/runtime/compiled_method.cc @@ -19,8 +19,7 @@ namespace art { CompiledCode::CompiledCode(InstructionSet instruction_set, const std::vector& code) - : instruction_set_(instruction_set), code_(code) -{ + : instruction_set_(instruction_set), code_(code) { CHECK_NE(code.size(), 0U); } @@ -118,8 +117,7 @@ CompiledMethod::CompiledMethod(InstructionSet instruction_set, const std::vector& native_gc_map) : CompiledCode(instruction_set, code), frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask), - gc_map_(native_gc_map) -{ + gc_map_(native_gc_map) { DCHECK_EQ(vmap_table.size(), static_cast(__builtin_popcount(core_spill_mask) + __builtin_popcount(fp_spill_mask))); diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 1e37dcde07..cd34c3c89e 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -252,7 +252,7 @@ DexFile::~DexFile() { class ScopedJniMonitorLock { public: - ScopedJniMonitorLock(JNIEnv* env, jobject locked) : env_(env), locked_(locked){ + ScopedJniMonitorLock(JNIEnv* env, jobject locked) : env_(env), locked_(locked) { env->MonitorEnter(locked_); } ~ScopedJniMonitorLock() { diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h index d2ad989395..c3424dcdff 100644 --- a/runtime/dex_instruction.h +++ b/runtime/dex_instruction.h @@ -82,7 +82,7 @@ class Instruction { // TODO: the code layout below is deliberate to avoid this enum being picked up by // generate-operator-out.py. enum Code - { + { // NOLINT(whitespace/braces) #define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode, #include "dex_instruction_list.h" DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM) diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 3cee1b7355..f7d776fbfb 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -49,8 +49,7 @@ void LargeObjectSpace::CopyLiveToMarked() { LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) : LargeObjectSpace(name), - lock_("large object map space lock", kAllocSpaceLock) -{ + lock_("large object map space lock", kAllocSpaceLock) { } @@ -274,7 +273,7 @@ mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) { return reinterpret_cast(addr); } -void FreeListSpace::Dump(std::ostream& os) const{ +void FreeListSpace::Dump(std::ostream& os) const { os << GetName() << " -" << " begin: " << reinterpret_cast(Begin()) << " end: " << reinterpret_cast(End()); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 16e04a5a3f..2fb272cef4 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -2574,7 +2574,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); break; } - case Instruction::SHL_INT_2ADDR:{ + case Instruction::SHL_INT_2ADDR: { PREAMBLE(); uint32_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc index 88a9dc1aa6..58ef5f7bc8 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/abstract_method.cc @@ -262,7 +262,7 @@ void AbstractMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JV Runtime* runtime = Runtime::Current(); // Call the invoke stub, passing everything as arguments. - if (UNLIKELY(!runtime->IsStarted())){ + if (UNLIKELY(!runtime->IsStarted())) { LOG(INFO) << "Not invoking " << PrettyMethod(this) << " for a runtime that isn't started"; if (result != NULL) { result->SetJ(0); diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index d323c3333b..52906a2650 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -75,7 +75,7 @@ inline AbstractMethod* Class::GetDirectMethod(int32_t i) const } inline void Class::SetDirectMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* direct_methods = GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); @@ -308,13 +308,13 @@ inline size_t Class::NumInstanceFields() const { } inline Field* Class::GetInstanceField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_NE(NumInstanceFields(), 0U); return GetIFields()->Get(i); } inline void Class::SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* ifields= GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); ifields->Set(i, f); diff --git a/runtime/native/dalvik_system_Zygote.cc b/runtime/native/dalvik_system_Zygote.cc index 9b995f421d..e6b4513972 100644 --- a/runtime/native/dalvik_system_Zygote.cc +++ b/runtime/native/dalvik_system_Zygote.cc @@ -492,7 +492,7 @@ static pid_t ForkAndSpecializeCommon(JNIEnv* env, uid_t uid, gid_t gid, jintArra SetSchedulerPolicy(); #if defined(HAVE_ANDROID_OS) - { + { // NOLINT(whitespace/braces) const char* se_info_c_str = NULL; UniquePtr se_info; if (java_se_info != NULL) { diff --git a/runtime/oat/runtime/support_jni.cc b/runtime/oat/runtime/support_jni.cc index 8f0f7ca93d..25f6930dd0 100644 --- a/runtime/oat/runtime/support_jni.cc +++ b/runtime/oat/runtime/support_jni.cc @@ -104,7 +104,7 @@ static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { } extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(Thread::Current() == self); // TODO: this code is specific to ARM // On entry the stack pointed by sp is: diff --git a/runtime/oat/runtime/x86/context_x86.cc b/runtime/oat/runtime/x86/context_x86.cc index ceb10bd6ca..c728ae97ec 100644 --- a/runtime/oat/runtime/x86/context_x86.cc +++ b/runtime/oat/runtime/x86/context_x86.cc @@ -61,7 +61,7 @@ void X86Context::SmashCallerSaves() { gprs_[EBX] = NULL; } -void X86Context::SetGPR(uint32_t reg, uintptr_t value){ +void X86Context::SetGPR(uint32_t reg, uintptr_t value) { CHECK_LT(reg, static_cast(kNumberOfCpuRegisters)); CHECK_NE(gprs_[reg], &gZero); CHECK(gprs_[reg] != NULL); diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 0f29915a9b..bb8341ee9f 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -358,8 +358,7 @@ OatFile::OatMethod::OatMethod(const byte* base, fp_spill_mask_(fp_spill_mask), mapping_table_offset_(mapping_table_offset), vmap_table_offset_(vmap_table_offset), - native_gc_map_offset_(gc_map_offset) -{ + native_gc_map_offset_(gc_map_offset) { #ifndef NDEBUG if (mapping_table_offset_ != 0) { // implies non-native, non-stub code if (vmap_table_offset_ == 0) { diff --git a/runtime/runtime.cc b/runtime/runtime.cc index e5fb46fa5b..14d4c8a592 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -566,7 +566,7 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b Trace::SetDefaultClockSource(kProfilerClockSourceDual); } else if (option == "-small") { parsed->small_mode_ = true; - }else if (option == "-sea_ir") { + } else if (option == "-sea_ir") { parsed->sea_ir_mode_ = true; } else if (StartsWith(option, "-small-mode-methods-max:")) { parsed->small_mode_method_threshold_ = ParseIntegerOrDie(option); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index f0f6f1844d..784a7caadf 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -60,7 +60,7 @@ void* ThreadPoolWorker::Callback(void* arg) { return NULL; } -void ThreadPool::AddTask(Thread* self, Task* task){ +void ThreadPool::AddTask(Thread* self, Task* task) { MutexLock mu(self, task_queue_lock_); tasks_.push_back(task); // If we have any waiters, signal one. @@ -173,7 +173,7 @@ void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) { } } -size_t ThreadPool::GetTaskCount(Thread* self){ +size_t ThreadPool::GetTaskCount(Thread* self) { MutexLock mu(self, task_queue_lock_); return tasks_.size(); } -- cgit v1.2.3-59-g8ed1b From 0cd7ec2dcd8d7ba30bf3ca420b40dac52849876c Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 23:40:20 -0700 Subject: Fix cpplint whitespace/blank_line issues Change-Id: Ice937e95e23dd622c17054551d4ae4cebd0ef8a2 --- Android.mk | 6 +- compiler/dex/arena_allocator.h | 3 - compiler/dex/arena_bit_vector.h | 1 - compiler/dex/backend.h | 2 - compiler/dex/dataflow_iterator.h | 3 - compiler/dex/growable_array.h | 1 - compiler/dex/local_value_numbering.cc | 1 - compiler/dex/local_value_numbering.h | 1 - compiler/dex/mir_graph.cc | 1 - compiler/dex/mir_graph.h | 1 - compiler/dex/portable/mir_to_gbc.cc | 6 +- compiler/dex/portable/mir_to_gbc.h | 1 - compiler/dex/quick/arm/assemble_arm.cc | 1 - compiler/dex/quick/codegen_util.cc | 3 - compiler/dex/quick/gen_invoke.cc | 1 - compiler/dex/quick/local_optimizations.cc | 45 +++++++---- compiler/dex/quick/mips/codegen_mips.h | 2 - compiler/dex/quick/mir_to_lir.h | 1 - compiler/dex/quick/x86/codegen_x86.h | 1 - compiler/dex/ssa_transformation.cc | 97 ++++++++++++++++-------- compiler/driver/compiler_driver.cc | 2 +- compiler/elf_writer_mclinker.cc | 1 - compiler/elf_writer_mclinker.h | 1 - compiler/elf_writer_test.cc | 1 - compiler/jni/portable/jni_compiler.cc | 9 +-- compiler/jni/quick/x86/calling_convention_x86.cc | 1 - compiler/llvm/gbc_expander.cc | 5 -- compiler/llvm/ir_builder.h | 2 - compiler/llvm/llvm_compilation_unit.cc | 1 - compiler/oat_writer.cc | 1 - dex2oat/dex2oat.cc | 1 - runtime/atomic_integer.h | 3 +- runtime/barrier.cc | 2 +- runtime/barrier_test.cc | 9 +-- runtime/base/histogram-inl.h | 1 - runtime/base/histogram.h | 1 - runtime/base/timing_logger.h | 3 - runtime/debugger.cc | 2 - runtime/dex_method_iterator.h | 1 - runtime/gc/accounting/heap_bitmap-inl.h | 1 - runtime/gc/accounting/heap_bitmap.h | 1 - runtime/gc/accounting/space_bitmap.cc | 4 +- runtime/gc/accounting/space_bitmap.h | 1 + runtime/gc/collector/garbage_collector.h | 1 - runtime/gc/space/image_space.h | 1 - runtime/gc/space/large_object_space.cc | 8 +- runtime/gc/space/large_object_space.h | 2 +- runtime/image_test.cc | 1 - runtime/interpreter/interpreter.cc | 1 - runtime/jdwp/jdwp_handler.cc | 1 - runtime/mirror/abstract_method.h | 8 +- runtime/mirror/class.cc | 4 +- runtime/oat/runtime/argument_visitor.h | 3 +- runtime/oat_file.cc | 1 - runtime/runtime_support_llvm.cc | 2 - runtime/runtime_support_llvm.h | 3 - runtime/stack.h | 3 +- runtime/thread.cc | 4 +- runtime/thread_pool.cc | 13 +--- runtime/thread_pool.h | 4 +- runtime/thread_pool_test.cc | 4 +- runtime/trace.h | 1 + runtime/verifier/method_verifier.cc | 1 - runtime/verifier/reg_type.h | 4 + runtime/verifier/reg_type_test.cc | 4 +- runtime/verifier/register_line.cc | 1 - test/ReferenceMap/stack_walk_refmap_jni.cc | 2 +- test/StackWalk/stack_walk_jni.cc | 2 +- 68 files changed, 134 insertions(+), 177 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/Android.mk b/Android.mk index 27bd894f13..971eb2f202 100644 --- a/Android.mk +++ b/Android.mk @@ -334,15 +334,15 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline \ - $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens \ + $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art-aspirational" to see warnings we would like to fix .PHONY: cpplint-art-aspirational cpplint-art-aspirational: ./art/tools/cpplint.py \ --filter=-whitespace/comments,-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references \ - $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) + $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) ######################################################################## # targets to switch back and forth from libdvm to libart diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 0ad859ea9a..cd2141a3c3 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -28,7 +28,6 @@ namespace art { class ArenaAllocator { public: - // Type of allocation for memory tuning. enum ArenaAllocKind { kAllocMisc, @@ -57,7 +56,6 @@ class ArenaAllocator { void DumpMemStats(std::ostream& os) const; private: - // Variable-length allocation block. struct ArenaMemBlock { size_t block_size; @@ -77,7 +75,6 @@ class ArenaAllocator { uint32_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds. uint32_t lost_bytes_; // Lost memory at end of too-small region uint32_t num_allocations_; - }; // ArenaAllocator diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h index 7e5c436f4c..de30859bfd 100644 --- a/compiler/dex/arena_bit_vector.h +++ b/compiler/dex/arena_bit_vector.h @@ -30,7 +30,6 @@ namespace art { */ class ArenaBitVector { public: - class Iterator { public: explicit Iterator(ArenaBitVector* bit_vector) diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h index 7fa8e9992a..acfec42352 100644 --- a/compiler/dex/backend.h +++ b/compiler/dex/backend.h @@ -23,7 +23,6 @@ namespace art { class Backend { - public: virtual ~Backend() {}; virtual void Materialize() = 0; @@ -32,7 +31,6 @@ class Backend { protected: explicit Backend(ArenaAllocator* arena) : arena_(arena) {}; ArenaAllocator* const arena_; - }; // Class Backend } // namespace art diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 19468698f9..e427862956 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -41,7 +41,6 @@ namespace art { */ class DataflowIterator { public: - virtual ~DataflowIterator() {} // Return the next BasicBlock* to visit. @@ -81,7 +80,6 @@ namespace art { GrowableArray* block_id_list_; int idx_; bool changed_; - }; // DataflowIterator class ReachableNodesIterator : public DataflowIterator { @@ -106,7 +104,6 @@ namespace art { class PostOrderDfsIterator : public DataflowIterator { public: - PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative) : DataflowIterator(mir_graph, is_iterative, 0, mir_graph->GetNumReachableBlocks(), false) { diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h index 6d26bc216d..3bfbcd4edf 100644 --- a/compiler/dex/growable_array.h +++ b/compiler/dex/growable_array.h @@ -46,7 +46,6 @@ enum OatListKind { template class GrowableArray { public: - class Iterator { public: explicit Iterator(GrowableArray* g_list) diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index b783f3ed52..35d29235f2 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -509,7 +509,6 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { AdvanceMemoryVersion(NO_VALUE, field_ref); } break; - } return res; } diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h index 09ed7aec8d..d29600a479 100644 --- a/compiler/dex/local_value_numbering.h +++ b/compiler/dex/local_value_numbering.h @@ -135,7 +135,6 @@ class LocalValueNumbering { ValueMap value_map_; MemoryVersionMap memory_version_map_; std::set null_checked_; - }; } // namespace art diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index a9af477d2a..0b3fa46faa 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -804,7 +804,6 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) { if (bb->successor_block_list.block_list_type == kPackedSwitch || bb->successor_block_list.block_list_type == kSparseSwitch) { - GrowableArray::Iterator iter(bb->successor_block_list.blocks); succ_id = 0; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index f86e13016d..f6011e06e6 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -553,7 +553,6 @@ class MIRGraph { static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; private: - int FindCommonParent(int block1, int block2); void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, const ArenaBitVector* src2); diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 4317d1e354..cfd3dafbee 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -74,7 +74,6 @@ void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) { ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder); DCHECK(inst != NULL); inst->eraseFromParent(); - } void MirConverter::DefineValue(::llvm::Value* val, int s_reg) { @@ -1580,8 +1579,7 @@ void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) { /* Extended MIR instructions like PHI */ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, - ::llvm::BasicBlock* llvm_bb) { - + ::llvm::BasicBlock* llvm_bb) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpPhi: { // The llvm Phi node already emitted - just DefineValue() here. @@ -1706,7 +1704,6 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { HandlePhiNodes(bb, llvm_bb); for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { - SetDexOffset(mir->offset); int opcode = mir->dalvikInsn.opcode; @@ -1795,7 +1792,6 @@ char RemapShorty(char shorty_type) { } ::llvm::FunctionType* MirConverter::GetFunctionType() { - // Get return type ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0])); diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h index 278631466f..2b681f6097 100644 --- a/compiler/dex/portable/mir_to_gbc.h +++ b/compiler/dex/portable/mir_to_gbc.h @@ -41,7 +41,6 @@ Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_gr llvm::LlvmCompilationUnit* const llvm_compilation_unit); class MirConverter : public Backend { - public: // TODO: flesh out and integrate into new world order. MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index f4aa1f3212..0649c9f319 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1007,7 +1007,6 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) { AssemblerStatus res = kSuccess; // Assume success for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { - if (lir->opcode < 0) { /* 1 means padding is needed */ if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index e169dc8f54..8698b1f9ed 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -969,7 +969,6 @@ void Mir2Lir::Materialize() { /* Method is not empty */ if (first_lir_insn_) { - // mark the targets of switch statement case labels ProcessSwitchTables(); @@ -979,9 +978,7 @@ void Mir2Lir::Materialize() { if (cu_->verbose) { CodegenDump(); } - } - } CompiledMethod* Mir2Lir::GetCompiledMethod() { diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 14e395cdac..fd8f86b5fc 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -736,7 +736,6 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { - // If we can treat it as non-range (Jumbo ops will use range form) if (info->num_arg_words <= 5) return GenDalvikArgsNoRange(info, call_state, pcrLabel, diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index eb27bf8b5d..2e9c845d05 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -73,11 +73,14 @@ void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) { void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { LIR* this_lir; - if (head_lir == tail_lir) return; + if (head_lir == tail_lir) { + return; + } for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) { - - if (is_pseudo_opcode(this_lir->opcode)) continue; + if (is_pseudo_opcode(this_lir->opcode)) { + continue; + } int sink_distance = 0; @@ -110,7 +113,9 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { * Currently only eliminate redundant ld/st for constant and Dalvik * register accesses. */ - if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue; + if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) { + continue; + } uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; uint64_t stop_use_reg_mask; @@ -127,12 +132,13 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) { - /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue; + if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) { + continue; + } uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM; uint64_t alias_condition = this_mem_mask & check_mem_mask; @@ -274,12 +280,15 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { LIR* prev_inst_list[MAX_HOIST_DISTANCE]; /* Empty block */ - if (head_lir == tail_lir) return; + if (head_lir == tail_lir) { + return; + } /* Start from the second instruction */ for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) { - - if (is_pseudo_opcode(this_lir->opcode)) continue; + if (is_pseudo_opcode(this_lir->opcode)) { + continue; + } uint64_t target_flags = GetTargetInstFlags(this_lir->opcode); /* Skip non-interesting instructions */ @@ -312,12 +321,13 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { /* Try to hoist the load to a good spot */ for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) { - /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop) continue; + if (check_lir->flags.is_nop) { + continue; + } uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM; uint64_t alias_condition = stop_use_all_mask & check_mem_mask; @@ -355,7 +365,9 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { */ if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { prev_inst_list[next_slot++] = check_lir; - if (next_slot == MAX_HOIST_DISTANCE) break; + if (next_slot == MAX_HOIST_DISTANCE) { + break; + } } /* Found a new place to put the load - move it here */ @@ -400,12 +412,16 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * If the first instruction is a load, don't hoist anything * above it since it is unlikely to be beneficial. */ - if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue; + if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) { + continue; + } /* * If the remaining number of slots is less than LD_LATENCY, * insert the hoisted load here. */ - if (slot < LD_LATENCY) break; + if (slot < LD_LATENCY) { + break; + } } // Don't look across a barrier label @@ -461,7 +477,6 @@ void Mir2Lir::RemoveRedundantBranches() { LIR* this_lir; for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) { - /* Branch to the next instruction */ if (IsUnconditionalBranch(this_lir)) { LIR* next_lir = this_lir; diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 376ad7f10e..802ff625c9 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -24,7 +24,6 @@ namespace art { class MipsMir2Lir : public Mir2Lir { public: - MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); // Required for target - codegen utilities. @@ -175,7 +174,6 @@ class MipsMir2Lir : public Mir2Lir { private: void ConvertShortToLongBranch(LIR* lir); - }; } // namespace art diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index abb687cb84..41e5a2d988 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -166,7 +166,6 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, #define is_pseudo_opcode(opcode) (static_cast(opcode) < 0) class Mir2Lir : public Backend { - public: struct SwitchTable { int offset; diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 4fa9dfb4d9..edb5ae57c2 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -24,7 +24,6 @@ namespace art { class X86Mir2Lir : public Mir2Lir { public: - X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); // Required for target - codegen helpers. diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index ccd2454a49..3a0cbcc67c 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -46,9 +46,13 @@ BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *sbi = iterator.Next(); - if (sbi == NULL) break; + if (sbi == NULL) { + break; + } res = NeedsVisit(sbi->block); - if (res != NULL) break; + if (res != NULL) { + break; + } } } } @@ -112,12 +116,16 @@ void MIRGraph::ComputeDFSOrders() { * register idx is defined in BasicBlock bb. */ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) { - if (bb->data_flow_info == NULL) return false; + if (bb->data_flow_info == NULL) { + return false; + } ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v); while (true) { int idx = iterator.Next(); - if (idx == -1) break; + if (idx == -1) { + break; + } /* Block bb defines register idx */ def_block_matrix_[idx]->SetBit(bb->id); } @@ -222,7 +230,9 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; CheckForDominanceFrontier(bb, succ_bb); } @@ -233,13 +243,17 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { while (true) { //TUNING: hot call to BitVectorIteratorNext int dominated_idx = bv_iterator.Next(); - if (dominated_idx == -1) break; + if (dominated_idx == -1) { + break; + } BasicBlock* dominated_bb = GetBasicBlock(dominated_idx); ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier); while (true) { //TUNING: hot call to BitVectorIteratorNext int df_up_idx = df_iterator.Next(); - if (df_up_idx == -1) break; + if (df_up_idx == -1) { + break; + } BasicBlock* df_up_block = GetBasicBlock(df_up_idx); CheckForDominanceFrontier(bb, df_up_block); } @@ -313,7 +327,9 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) { /* Scan the rest of the predecessors */ while (true) { BasicBlock* pred_bb = iter.Next(); - if (!pred_bb) break; + if (!pred_bb) { + break; + } if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) { continue; } else { @@ -443,7 +459,9 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_; - if (bb->data_flow_info == NULL) return false; + if (bb->data_flow_info == NULL) { + return false; + } temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v); if (bb->taken && bb->taken->data_flow_info) ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v, @@ -455,7 +473,9 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; if (succ_bb->data_flow_info) { ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v, @@ -504,25 +524,27 @@ void MIRGraph::InsertPhiNodes() { while (true) { int idx = iterator.Next(); - if (idx == -1) break; - BasicBlock* def_bb = GetBasicBlock(idx); + if (idx == -1) { + break; + } + BasicBlock* def_bb = GetBasicBlock(idx); - /* Merge the dominance frontier to tmp_blocks */ - //TUNING: hot call to Union(). - if (def_bb->dom_frontier != NULL) { - tmp_blocks->Union(def_bb->dom_frontier); - } + /* Merge the dominance frontier to tmp_blocks */ + //TUNING: hot call to Union(). + if (def_bb->dom_frontier != NULL) { + tmp_blocks->Union(def_bb->dom_frontier); } - if (!phi_blocks->Equal(tmp_blocks)) { - change = true; - phi_blocks->Copy(tmp_blocks); - - /* - * Iterate through the original blocks plus the new ones in - * the dominance frontier. - */ - input_blocks->Copy(phi_blocks); - input_blocks->Union(def_block_matrix_[dalvik_reg]); + } + if (!phi_blocks->Equal(tmp_blocks)) { + change = true; + phi_blocks->Copy(tmp_blocks); + + /* + * Iterate through the original blocks plus the new ones in + * the dominance frontier. + */ + input_blocks->Copy(phi_blocks); + input_blocks->Union(def_block_matrix_[dalvik_reg]); } } while (change); @@ -533,10 +555,14 @@ void MIRGraph::InsertPhiNodes() { ArenaBitVector::Iterator iterator(phi_blocks); while (true) { int idx = iterator.Next(); - if (idx == -1) break; + if (idx == -1) { + break; + } BasicBlock* phi_bb = GetBasicBlock(idx); /* Variable will be clobbered before being used - no need for phi */ - if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) continue; + if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) { + continue; + } MIR *phi = static_cast(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo)); phi->dalvikInsn.opcode = static_cast(kMirOpPhi); @@ -572,7 +598,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { GrowableArray::Iterator iter(bb->predecessors); while (true) { BasicBlock* pred_bb = iter.Next(); - if (!pred_bb) break; + if (!pred_bb) { + break; + } int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg]; uses.push_back(ssa_reg); incoming_arc.push_back(pred_bb->id); @@ -605,8 +633,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { } void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { - - if (block->visited || block->hidden) return; + if (block->visited || block->hidden) { + return; + } block->visited = true; /* Process this block */ @@ -632,7 +661,9 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { GrowableArray::Iterator iterator(block->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; DoDFSPreOrderSSARename(succ_bb); /* Restore SSA map snapshot */ diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index d1d21b1d03..f1082db9bc 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1402,7 +1402,6 @@ class ParallelCompilationManager { } private: - class ForAllClosure : public Task { public: ForAllClosure(ParallelCompilationManager* manager, size_t begin, size_t end, Callback* callback, @@ -1423,6 +1422,7 @@ class ParallelCompilationManager { virtual void Finalize() { delete this; } + private: const ParallelCompilationManager* const manager_; const size_t begin_; diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc index 472a606cc6..05f3b025e7 100644 --- a/compiler/elf_writer_mclinker.cc +++ b/compiler/elf_writer_mclinker.cc @@ -307,7 +307,6 @@ void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool i // TODO: ownership of libm_lib_input? mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib); CHECK(libm_lib_input_input != NULL); - } #endif diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h index 468fa9a84f..3b33bc4986 100644 --- a/compiler/elf_writer_mclinker.h +++ b/compiler/elf_writer_mclinker.h @@ -38,7 +38,6 @@ class CompiledCode; class ElfWriterMclinker : public ElfWriter { public: - // Write an ELF file. Returns true on success, false on failure. static bool Create(File* file, std::vector& oat_contents, diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc index 4a02b61242..e48806ecc4 100644 --- a/compiler/elf_writer_test.cc +++ b/compiler/elf_writer_test.cc @@ -22,7 +22,6 @@ namespace art { class ElfWriterTest : public CommonTest { - protected: virtual void SetUp() { ReserveImageSpace(); diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc index 44d0c2d215..57b8a315a1 100644 --- a/compiler/jni/portable/jni_compiler.cc +++ b/compiler/jni/portable/jni_compiler.cc @@ -46,11 +46,10 @@ using namespace runtime_support; JniCompiler::JniCompiler(LlvmCompilationUnit* cunit, const CompilerDriver& driver, const DexCompilationUnit* dex_compilation_unit) -: cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), - context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), - dex_compilation_unit_(dex_compilation_unit), - func_(NULL), elf_func_idx_(0) { - + : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), + context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), + dex_compilation_unit_(dex_compilation_unit), + func_(NULL), elf_func_idx_(0) { // Check: Ensure that JNI compiler will only get "native" method CHECK(dex_compilation_unit->IsNative()); } diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc index b671bd190c..45dd42960c 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.cc +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -159,7 +159,6 @@ size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() { // count JNIEnv* and return pc (pushed after Method*) size_t total_args = static_args + param_args + 2; return total_args; - } } // namespace x86 diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc index b139e322f1..94cc9731aa 100644 --- a/compiler/llvm/gbc_expander.cc +++ b/compiler/llvm/gbc_expander.cc @@ -361,7 +361,6 @@ class GBCExpanderPass : public llvm::FunctionPass { llvm::Value* ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id, llvm::CallInst& call_inst); - }; char GBCExpanderPass::ID = 0; @@ -710,7 +709,6 @@ llvm::Value* GBCExpanderPass::EmitLoadArrayLength(llvm::Value* array) { art::mirror::Array::LengthOffset().Int32Value(), irb_.getJIntTy(), kTBAAConstJObject); - } llvm::Value* @@ -751,7 +749,6 @@ EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx, llvm::Value* this_addr) { llvm::Value* GBCExpanderPass::EmitArrayGEP(llvm::Value* array_addr, llvm::Value* index_value, JType elem_jty) { - int data_offset; if (elem_jty == kLong || elem_jty == kDouble || (elem_jty == kObject && sizeof(uint64_t) == sizeof(art::mirror::Object*))) { @@ -1426,7 +1423,6 @@ llvm::Value* GBCExpanderPass::Expand_LongCompare(llvm::Value* src1_value, llvm:: llvm::Value* GBCExpanderPass::EmitCompareResultSelection(llvm::Value* cmp_eq, llvm::Value* cmp_lt) { - llvm::Constant* zero = irb_.getJInt(0); llvm::Constant* pos1 = irb_.getJInt(1); llvm::Constant* neg1 = irb_.getJInt(-1); @@ -2437,7 +2433,6 @@ EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx, llvm::Value* this_addr, uint32_t dex_pc, bool is_fast_path) { - llvm::Function* runtime_func = NULL; switch (invoke_type) { diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h index 65da005e9b..c81ba278a8 100644 --- a/compiler/llvm/ir_builder.h +++ b/compiler/llvm/ir_builder.h @@ -219,7 +219,6 @@ class IRBuilder : public LLVMIRBuilder { ::llvm::Value* CreatePtrDisp(::llvm::Value* base, ::llvm::Value* offset, ::llvm::PointerType* ret_ty) { - ::llvm::Value* base_int = CreatePtrToInt(base, getPtrEquivIntTy()); ::llvm::Value* result_int = CreateAdd(base_int, offset); ::llvm::Value* result = CreateIntToPtr(result_int, ret_ty); @@ -232,7 +231,6 @@ class IRBuilder : public LLVMIRBuilder { ::llvm::Value* count, ::llvm::Value* offset, ::llvm::PointerType* ret_ty) { - ::llvm::Value* block_offset = CreateMul(bs, count); ::llvm::Value* total_offset = CreateAdd(block_offset, offset); diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc index dfb572477e..1f2b977921 100644 --- a/compiler/llvm/llvm_compilation_unit.cc +++ b/compiler/llvm/llvm_compilation_unit.cc @@ -166,7 +166,6 @@ void LlvmCompilationUnit::DumpBitcodeToString(std::string& str_buffer) { } bool LlvmCompilationUnit::Materialize() { - const bool kDumpBitcode = false; if (kDumpBitcode) { // Dump the bitcode for debugging diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 0bfa4ec328..4c32506d43 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -83,7 +83,6 @@ OatWriter::OatWriter(const std::vector& dex_files, size_oat_dex_file_methods_offsets_(0), size_oat_class_status_(0), size_oat_class_method_offsets_(0) { - size_t offset = InitOatHeader(); offset = InitOatDexFiles(offset); offset = InitDexFiles(offset); diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 541c916936..9e23d3e7d3 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -440,7 +440,6 @@ static size_t OpenDexFiles(const std::vector& dex_filenames, // during development when fatal aborts lead to a cascade of failures // that result in a deadlock. class WatchDog { - // WatchDog defines its own CHECK_PTHREAD_CALL to avoid using Log which uses locks #undef CHECK_PTHREAD_CALL #define CHECK_WATCH_DOG_PTHREAD_CALL(call, args, what) \ diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h index 117e837bdb..6711722672 100644 --- a/runtime/atomic_integer.h +++ b/runtime/atomic_integer.h @@ -70,10 +70,11 @@ class AtomicInteger { bool success = android_atomic_cas(expected_value, new_value, &value_) == 0; return success; } + private: volatile int32_t value_; }; -} +} // namespace art #endif // ART_RUNTIME_ATOMIC_INTEGER_H_ diff --git a/runtime/barrier.cc b/runtime/barrier.cc index 250d468adb..a64499848e 100644 --- a/runtime/barrier.cc +++ b/runtime/barrier.cc @@ -60,4 +60,4 @@ Barrier::~Barrier() { CHECK(!count_) << "Attempted to destroy barrier with non zero count"; } -} +} // namespace art diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc index d26ae9e20f..298ae569fb 100644 --- a/runtime/barrier_test.cc +++ b/runtime/barrier_test.cc @@ -32,9 +32,7 @@ class CheckWaitTask : public Task { : barrier_(barrier), count1_(count1), count2_(count2), - count3_(count3) { - - } + count3_(count3) {} void Run(Thread* self) { LOG(INFO) << "Before barrier 1 " << *self; @@ -50,6 +48,7 @@ class CheckWaitTask : public Task { virtual void Finalize() { delete this; } + private: Barrier* const barrier_; AtomicInteger* const count1_; @@ -100,9 +99,7 @@ class CheckPassTask : public Task { CheckPassTask(Barrier* barrier, AtomicInteger* count, size_t subtasks) : barrier_(barrier), count_(count), - subtasks_(subtasks) { - - } + subtasks_(subtasks) {} void Run(Thread* self) { for (size_t i = 0; i < subtasks_; ++i) { diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index bbca60308a..d572cf9cba 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -212,7 +212,6 @@ inline double Histogram::Percentile(double per) const { DCHECK_GT(cumulative_perc_.size(), 0ull); size_t idx, upper_idx = 0, lower_idx = 0; for (idx = 0; idx < cumulative_perc_.size(); idx++) { - if (per <= cumulative_perc_[idx]) { upper_idx = idx; break; diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h index dfb556bd79..33a1e6518b 100644 --- a/runtime/base/histogram.h +++ b/runtime/base/histogram.h @@ -30,7 +30,6 @@ namespace art { // Designed to be simple and used with timing logger in art. template class Histogram { - const double kAdjust; const Value kBucketWidth; const size_t kInitialBucketCount; diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index 816cbeadec..0f00a046e5 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -50,9 +50,7 @@ namespace base { } // namespace base class CumulativeLogger { - public: - explicit CumulativeLogger(const std::string& name); void prepare_stats(); ~CumulativeLogger(); @@ -68,7 +66,6 @@ class CumulativeLogger { void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_); private: - void AddPair(const std::string &label, uint64_t delta_time) EXCLUSIVE_LOCKS_REQUIRED(lock_); void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_); diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 5a31c87935..b502c9ab58 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -155,7 +155,6 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object); } - } gDebugInstrumentationListener; // JDWP is allowed unless the Zygote forbids it. @@ -761,7 +760,6 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectI JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector& class_ids, std::vector& counts) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::vector classes; counts.clear(); for (size_t i = 0; i < class_ids.size(); ++i) { diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h index e915d77e6d..1975e48330 100644 --- a/runtime/dex_method_iterator.h +++ b/runtime/dex_method_iterator.h @@ -120,7 +120,6 @@ class DexMethodIterator { } private: - ClassDataItemIterator& GetIterator() const { CHECK(it_.get() != NULL); return *it_.get(); diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h index 76226041d1..5edea95dc3 100644 --- a/runtime/gc/accounting/heap_bitmap-inl.h +++ b/runtime/gc/accounting/heap_bitmap-inl.h @@ -40,7 +40,6 @@ inline void HeapBitmap::Visit(const Visitor& visitor) { SpaceSetMap* set = *it; set->Visit(visitor); } - } } // namespace accounting diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index f4b725c4e9..1710579619 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -106,7 +106,6 @@ class HeapBitmap { explicit HeapBitmap(Heap* heap) : heap_(heap) {} private: - const Heap* const heap_; void AddContinuousSpaceBitmap(SpaceBitmap* bitmap); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 19f1128963..6edc067cc7 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -64,9 +64,7 @@ SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size } // Clean up any resources associated with the bitmap. -SpaceBitmap::~SpaceBitmap() { - -} +SpaceBitmap::~SpaceBitmap() {} void SpaceBitmap::SetHeapLimit(uintptr_t new_end) { DCHECK(IsAligned(new_end)); diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 5a1bfe3250..bf4c1ed9af 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -174,6 +174,7 @@ class SpaceBitmap { const size_t index = OffsetToIndex(offset); return &bitmap_begin_[index]; } + private: // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1, // however, we document that this is expected on heap_end_ diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index a22faac43b..1684664eff 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -79,7 +79,6 @@ class GarbageCollector { void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); protected: - // The initial phase. Done without mutators paused. virtual void InitializePhase() = 0; diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index fde2b419ac..bdda9fa4b1 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -78,7 +78,6 @@ class ImageSpace : public MemMapSpace { void Dump(std::ostream& os) const; private: - // Tries to initialize an ImageSpace from the given image path, // returning NULL on error. // diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index f7d776fbfb..6aedd9cf2b 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -49,9 +49,7 @@ void LargeObjectSpace::CopyLiveToMarked() { LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) : LargeObjectSpace(name), - lock_("large object map space lock", kAllocSpaceLock) { - -} + lock_("large object map space lock", kAllocSpaceLock) {} LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { return new LargeObjectMapSpace(name); @@ -147,9 +145,7 @@ FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* beg AddFreeChunk(begin_, end_ - begin_, NULL); } -FreeListSpace::~FreeListSpace() { - -} +FreeListSpace::~FreeListSpace() {} void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) { Chunk* chunk = ChunkFromAddr(address); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index db845db4e6..20a48673b6 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -60,7 +60,6 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); protected: - explicit LargeObjectSpace(const std::string& name); // Approximate number of bytes which have been allocated into the space. @@ -165,6 +164,7 @@ class FreeListSpace : public LargeObjectSpace { DCHECK(m_previous == NULL || (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this)); } + private: size_t m_size; Chunk* m_previous; diff --git a/runtime/image_test.cc b/runtime/image_test.cc index 9ab1d7475b..ee50118b06 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -31,7 +31,6 @@ namespace art { class ImageTest : public CommonTest { - protected: virtual void SetUp() { ReserveImageSpace(); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 2fb272cef4..45314c231b 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -366,7 +366,6 @@ static void InterpreterJni(Thread* self, AbstractMethod* method, StringPiece sho { ScopedThreadStateChange tsc(self, kNative); jresult = fn(soa.Env(), rcvr.get(), arg0.get()); - } result->SetL(soa.Decode(jresult)); ScopedThreadStateChange tsc(self, kNative); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index 8ef146c096..e141496c3b 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -361,7 +361,6 @@ static JdwpError VM_Capabilities(JdwpState*, Request&, ExpandBuf* reply) static JdwpError VM_CapabilitiesNew(JdwpState*, Request& request, ExpandBuf* reply) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // The first few capabilities are the same as those reported by the older call. VM_Capabilities(NULL, request, reply); diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index d909058e0d..bbebecebb4 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -497,13 +497,9 @@ class MANAGED AbstractMethod : public Object { DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod); }; -class MANAGED Method : public AbstractMethod { +class MANAGED Method : public AbstractMethod {}; -}; - -class MANAGED Constructor : public AbstractMethod { - -}; +class MANAGED Constructor : public AbstractMethod {}; class MANAGED AbstractMethodClass : public Class { private: diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 2d2130c39e..e490d97f80 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -320,13 +320,11 @@ bool Class::IsFieldClass() const { Class* java_lang_Class = GetClass(); Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass(); return this == java_lang_reflect_Field; - } bool Class::IsMethodClass() const { return (this == AbstractMethod::GetMethodClass()) || - (this == AbstractMethod::GetConstructorClass()); - + (this == AbstractMethod::GetConstructorClass()); } void Class::SetClassLoader(ClassLoader* new_class_loader) { diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h index d92ff19d13..aaf93f7db7 100644 --- a/runtime/oat/runtime/argument_visitor.h +++ b/runtime/oat/runtime/argument_visitor.h @@ -199,7 +199,6 @@ class QuickArgumentVisitor { uint64_t low_half = *reinterpret_cast(GetParamAddress()); uint64_t high_half = *reinterpret_cast(stack_args_); return (low_half & 0xffffffffULL) | (high_half << 32); - } void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -244,6 +243,6 @@ class QuickArgumentVisitor { bool is_split_long_or_double_; }; -} +} // namespace art #endif // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_ diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index bb8341ee9f..6562633bc3 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -122,7 +122,6 @@ OatFile::~OatFile() { } bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base) { - char* absolute_path = realpath(elf_filename.c_str(), NULL); if (absolute_path == NULL) { return false; diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc index cbdefe8a03..d703db29d5 100644 --- a/runtime/runtime_support_llvm.cc +++ b/runtime/runtime_support_llvm.cc @@ -50,7 +50,6 @@ using namespace art; extern "C" { - class ShadowFrameCopyVisitor : public StackVisitor { public: explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), @@ -844,5 +843,4 @@ void art_portable_proxy_invoke_handler_from_code(mirror::AbstractMethod* proxy_m void art_portable_constructor_barrier() { LOG(FATAL) << "Implemented by IRBuilder."; } - } // extern "C" diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h index 566f7bcb16..43ea953a96 100644 --- a/runtime/runtime_support_llvm.h +++ b/runtime/runtime_support_llvm.h @@ -18,13 +18,10 @@ #define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ extern "C" { - //---------------------------------------------------------------------------- // Runtime Support Function Lookup Callback //---------------------------------------------------------------------------- - void* art_portable_find_runtime_support_func(void* context, const char* name); - } // extern "C" #endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ diff --git a/runtime/stack.h b/runtime/stack.h index 0e2c4c5b86..99ba898362 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -554,7 +554,6 @@ class StackVisitor { static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const; void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -567,6 +566,7 @@ class StackVisitor { size_t num_frames_; // Depth of the frame we're currently at. size_t cur_depth_; + protected: Context* const context_; }; @@ -638,6 +638,7 @@ class VmapTable { spill_shifts--; // wind back one as we want the last match return spill_shifts; } + private: const uint16_t* table_; }; diff --git a/runtime/thread.cc b/runtime/thread.cc index dd55195c15..a1fb862a17 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2104,9 +2104,7 @@ class ReferenceMapVisitor : public StackVisitor { class RootCallbackVisitor { public: - RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) { - - } + RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {} void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const { visitor_(obj, arg_); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index 784a7caadf..067ef2d5d8 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -180,10 +180,7 @@ size_t ThreadPool::GetTaskCount(Thread* self) { WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size) - : ThreadPoolWorker(thread_pool, name, stack_size), - task_(NULL) { - -} + : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {} void WorkStealingWorker::Run() { Thread* self = Thread::Current(); @@ -254,9 +251,7 @@ void WorkStealingWorker::Run() { } } -WorkStealingWorker::~WorkStealingWorker() { - -} +WorkStealingWorker::~WorkStealingWorker() {} WorkStealingThreadPool::WorkStealingThreadPool(size_t num_threads) : ThreadPool(0), @@ -288,8 +283,6 @@ WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) { return NULL; } -WorkStealingThreadPool::~WorkStealingThreadPool() { - -} +WorkStealingThreadPool::~WorkStealingThreadPool() {} } // namespace art diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index b9f185d5f9..7b626fbbe1 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -124,9 +124,7 @@ class ThreadPool { class WorkStealingTask : public Task { public: - WorkStealingTask() : ref_count_(0) { - - } + WorkStealingTask() : ref_count_(0) {} size_t GetRefCount() const { return ref_count_; diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc index 9b66318d8d..98178bc40c 100644 --- a/runtime/thread_pool_test.cc +++ b/runtime/thread_pool_test.cc @@ -105,9 +105,7 @@ class TreeTask : public Task { TreeTask(ThreadPool* const thread_pool, AtomicInteger* count, int depth) : thread_pool_(thread_pool), count_(count), - depth_(depth) { - - } + depth_(depth) {} void Run(Thread* self) { if (depth_ > 1) { diff --git a/runtime/trace.h b/runtime/trace.h index 5bd6a8d5ca..bd9c140d26 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -78,6 +78,7 @@ class Trace : public instrumentation::InstrumentationListener { mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, mirror::Throwable* exception_object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: explicit Trace(File* trace_file, int buffer_size, int flags); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 5a70f2a696..ff7f594501 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -3749,7 +3749,6 @@ MethodVerifier::MethodSafeCastSet* MethodVerifier::GenerateSafeCastSet() { } MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() { - // It is risky to rely on reg_types for sharpening in cases of soft // verification, we might end up sharpening to a wrong implementation. Just abort. if (!failure_messages_.empty()) { diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index c66e7cb514..5b806c47e5 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -309,6 +309,7 @@ class ConflictType : public RegType { // Destroy the singleton instance. static void Destroy(); + private: ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -338,6 +339,7 @@ class UndefinedType : public RegType { // Destroy the singleton instance. static void Destroy(); + private: UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -875,6 +877,7 @@ class UnresolvedSuperClass : public UnresolvedType { } std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -909,6 +912,7 @@ class UnresolvedMergedType : public UnresolvedType { } std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index f37edff6ac..d2c9dd6ba7 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -414,7 +414,6 @@ TEST_F(RegTypeReferenceTest, Dump) { EXPECT_EQ(expected, unresolved_merged.Dump()); } - TEST_F(RegTypeReferenceTest, JavalangString) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then check for the same effect when using @@ -433,8 +432,8 @@ TEST_F(RegTypeReferenceTest, JavalangString) { const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull); EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference()); EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference()); - } + TEST_F(RegTypeReferenceTest, JavalangObject) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then I am checking for the same effect when using @@ -474,7 +473,6 @@ TEST_F(RegTypeReferenceTest, Merging) { TEST_F(RegTypeTest, ConstPrecision) { - // Tests creating primitive types types. ScopedObjectAccess soa(Thread::Current()); RegTypeCache cache_new(true); diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc index 3a2145b9bb..d2abaac6f7 100644 --- a/runtime/verifier/register_line.cc +++ b/runtime/verifier/register_line.cc @@ -254,7 +254,6 @@ void RegisterLine::CopyResultRegister2(uint32_t vdst) { SetRegisterTypeWide(vdst, type_l, type_h); // also sets the high result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId(); result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId(); - } } diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 9ef4a59dc1..492916ed90 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -280,4 +280,4 @@ extern "C" JNIEXPORT jint JNICALL Java_ReferenceMap_refmap(JNIEnv*, jobject, jin return count + 1; } -} +} // namespace art diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 4b472daa5e..fc156b15d1 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -127,4 +127,4 @@ extern "C" JNIEXPORT jint JNICALL Java_StackWalk2_refmap2(JNIEnv*, jobject, jint return count + 1; } -} +} // namespace art -- cgit v1.2.3-59-g8ed1b From 0177fe200efc1bf4d433955ee7920c683fdf5901 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Sun, 21 Jul 2013 12:21:36 -0700 Subject: Remove CompilerDriver::IsDebuggingSupported Change-Id: Ib67e3ef67462fe5dae81148f7fe8cc76b3887f11 --- compiler/driver/compiler_driver.cc | 4 +--- compiler/driver/compiler_driver.h | 8 +------- dex2oat/dex2oat.cc | 16 ++++------------ runtime/common_test.h | 2 +- runtime/oat_test.cc | 3 +-- 5 files changed, 8 insertions(+), 25 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index f1082db9bc..6558f8acfe 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -335,8 +335,7 @@ extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver, CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set, bool image, DescriptorSet* image_classes, - size_t thread_count, bool support_debugging, - bool dump_stats, bool dump_timings) + size_t thread_count, bool dump_stats, bool dump_timings) : compiler_backend_(compiler_backend), instruction_set_(instruction_set), freezing_constructor_lock_("freezing constructor lock"), @@ -345,7 +344,6 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet image_(image), image_classes_(image_classes), thread_count_(thread_count), - support_debugging_(support_debugging), start_ns_(0), stats_(new AOTCompilationStats), dump_stats_(dump_stats), diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 80cc89b95f..902fda7f0c 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -72,8 +72,7 @@ class CompilerDriver { // classes. explicit CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set, bool image, DescriptorSet* image_classes, - size_t thread_count, bool support_debugging, - bool dump_stats, bool dump_timings); + size_t thread_count, bool dump_stats, bool dump_timings); ~CompilerDriver(); @@ -84,10 +83,6 @@ class CompilerDriver { void CompileOne(const mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsDebuggingSupported() { - return support_debugging_; - } - InstructionSet GetInstructionSet() const { return instruction_set_; } @@ -362,7 +357,6 @@ class CompilerDriver { UniquePtr image_classes_; size_t thread_count_; - bool support_debugging_; uint64_t start_ns_; UniquePtr stats_; diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 9e23d3e7d3..75e1afe09a 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -149,14 +149,13 @@ static void Usage(const char* fmt, ...) { class Dex2Oat { public: static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, CompilerBackend compiler_backend, - InstructionSet instruction_set, size_t thread_count, bool support_debugging) + InstructionSet instruction_set, size_t thread_count) SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) { if (!CreateRuntime(options, instruction_set)) { *p_dex2oat = NULL; return false; } - *p_dex2oat = new Dex2Oat(Runtime::Current(), compiler_backend, instruction_set, thread_count, - support_debugging); + *p_dex2oat = new Dex2Oat(Runtime::Current(), compiler_backend, instruction_set, thread_count); return true; } @@ -249,7 +248,6 @@ class Dex2Oat { image, image_classes.release(), thread_count_, - support_debugging_, dump_stats, dump_timings)); @@ -336,12 +334,11 @@ class Dex2Oat { private: explicit Dex2Oat(Runtime* runtime, CompilerBackend compiler_backend, InstructionSet instruction_set, - size_t thread_count, bool support_debugging) + size_t thread_count) : compiler_backend_(compiler_backend), instruction_set_(instruction_set), runtime_(runtime), thread_count_(thread_count), - support_debugging_(support_debugging), start_ns_(NanoTime()) { } @@ -402,7 +399,6 @@ class Dex2Oat { Runtime* runtime_; size_t thread_count_; - bool support_debugging_; uint64_t start_ns_; DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat); @@ -596,7 +592,6 @@ static int dex2oat(int argc, char** argv) { std::string android_root; std::vector runtime_args; int thread_count = sysconf(_SC_NPROCESSORS_CONF); - bool support_debugging = false; #if defined(ART_USE_PORTABLE_COMPILER) CompilerBackend compiler_backend = kPortable; #else @@ -643,8 +638,6 @@ static int dex2oat(int argc, char** argv) { if (!ParseInt(oat_fd_str, &oat_fd)) { Usage("could not parse --oat-fd argument '%s' as an integer", oat_fd_str); } - } else if (option == "-g") { - support_debugging = true; } else if (option == "--watch-dog") { watch_dog_enabled = true; } else if (option == "--no-watch-dog") { @@ -866,8 +859,7 @@ static int dex2oat(int argc, char** argv) { Dex2Oat* p_dex2oat; - if (!Dex2Oat::Create(&p_dex2oat, options, compiler_backend, instruction_set, thread_count, - support_debugging)) { + if (!Dex2Oat::Create(&p_dex2oat, options, compiler_backend, instruction_set, thread_count)) { LOG(ERROR) << "Failed to create dex2oat"; return EXIT_FAILURE; } diff --git a/runtime/common_test.h b/runtime/common_test.h index 03a45aa20b..13626a5eac 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -349,7 +349,7 @@ class CommonTest : public testing::Test { class_linker_->FixupDexCaches(runtime_->GetResolutionMethod()); compiler_driver_.reset(new CompilerDriver(compiler_backend, instruction_set, true, new CompilerDriver::DescriptorSet, - 2, false, true, true)); + 2, true, true)); } // We typically don't generate an image in unit tests, disable this optimization by default. compiler_driver_->SetSupportBootImageFixup(false); diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc index 70c2e9e88e..7f24564eb2 100644 --- a/runtime/oat_test.cc +++ b/runtime/oat_test.cc @@ -74,8 +74,7 @@ TEST_F(OatTest, WriteRead) { #else CompilerBackend compiler_backend = kQuick; #endif - compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, false, - true, true)); + compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, true, true)); jobject class_loader = NULL; if (compile) { compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath()); -- cgit v1.2.3-59-g8ed1b From 4560248d4c85cade7f4fc7b30c3fb41b95a04a7f Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Sun, 21 Jul 2013 22:07:55 -0700 Subject: Move TimingLogger creation to dex2oat Change-Id: I4fdb6afd4ce2ac0d91c6c968893606d593b6ea18 --- compiler/driver/compiler_driver.cc | 18 ++------- compiler/driver/compiler_driver.h | 8 ++-- compiler/driver/compiler_driver_test.cc | 5 ++- dex2oat/dex2oat.cc | 65 +++++++++++++++++++++++---------- runtime/common_test.h | 5 ++- runtime/image_test.cc | 3 +- runtime/oat_test.cc | 8 ++-- 7 files changed, 68 insertions(+), 44 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 6558f8acfe..b1b205e067 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -335,7 +335,7 @@ extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver, CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set, bool image, DescriptorSet* image_classes, - size_t thread_count, bool dump_stats, bool dump_timings) + size_t thread_count, bool dump_stats) : compiler_backend_(compiler_backend), instruction_set_(instruction_set), freezing_constructor_lock_("freezing constructor lock"), @@ -347,7 +347,6 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet start_ns_(0), stats_(new AOTCompilationStats), dump_stats_(dump_stats), - dump_timings_(dump_timings), compiler_library_(NULL), compiler_(NULL), compiler_context_(NULL), @@ -495,20 +494,12 @@ const std::vector* CompilerDriver::CreateInterpreterToQuickEntry() cons } void CompilerDriver::CompileAll(jobject class_loader, - const std::vector& dex_files) { + const std::vector& dex_files, + TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); - UniquePtr thread_pool(new ThreadPool(thread_count_)); - TimingLogger timings("compiler", false); - PreCompile(class_loader, dex_files, *thread_pool.get(), timings); - Compile(class_loader, dex_files, *thread_pool.get(), timings); - - if (dump_timings_ && timings.GetTotalNs() > MsToNs(1000)) { - LOG(INFO) << Dumpable(timings); - } - if (dump_stats_) { stats_->Dump(); } @@ -537,7 +528,7 @@ static bool IsDexToDexCompilationAllowed(mirror::ClassLoader* class_loader, return klass->IsVerified(); } -void CompilerDriver::CompileOne(const mirror::AbstractMethod* method) { +void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); Thread* self = Thread::Current(); jobject jclass_loader; @@ -560,7 +551,6 @@ void CompilerDriver::CompileOne(const mirror::AbstractMethod* method) { dex_files.push_back(dex_file); UniquePtr thread_pool(new ThreadPool(1U)); - TimingLogger timings("CompileOne", false); PreCompile(jclass_loader, dex_files, *thread_pool.get(), timings); uint32_t method_idx = method->GetDexMethodIndex(); diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 902fda7f0c..1799057ea6 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -72,15 +72,16 @@ class CompilerDriver { // classes. explicit CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set, bool image, DescriptorSet* image_classes, - size_t thread_count, bool dump_stats, bool dump_timings); + size_t thread_count, bool dump_stats); ~CompilerDriver(); - void CompileAll(jobject class_loader, const std::vector& dex_files) + void CompileAll(jobject class_loader, const std::vector& dex_files, + TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); // Compile a single Method - void CompileOne(const mirror::AbstractMethod* method) + void CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); InstructionSet GetInstructionSet() const { @@ -362,7 +363,6 @@ class CompilerDriver { UniquePtr stats_; bool dump_stats_; - bool dump_timings_; typedef void (*CompilerCallbackFn)(CompilerDriver& driver); typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver); diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index 6a160f75c6..78cacaf08e 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -36,7 +36,10 @@ namespace art { class CompilerDriverTest : public CommonTest { protected: void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { - compiler_driver_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader)); + TimingLogger timings("CompilerDriverTest::CompileAll", false); + compiler_driver_->CompileAll(class_loader, + Runtime::Current()->GetCompileTimeClassPath(class_loader), + timings); MakeAllExecutable(class_loader); } diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 75e1afe09a..995f6d4052 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -83,8 +83,8 @@ static void Usage(const char* fmt, ...) { UsageError(" containing a classes.dex file to compile."); UsageError(" Example: --zip-fd=5"); UsageError(""); - UsageError(" --zip-location=: specifies a symbolic name for the file corresponding"); - UsageError(" to the file descriptor specified by --zip-fd."); + UsageError(" --zip-location=: specifies a symbolic name for the file"); + UsageError(" corresponding to the file descriptor specified by --zip-fd."); UsageError(" Example: --zip-location=/system/app/Calculator.apk"); UsageError(""); UsageError(" --oat-file=: specifies the oat output destination via a filename."); @@ -148,8 +148,11 @@ static void Usage(const char* fmt, ...) { class Dex2Oat { public: - static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, CompilerBackend compiler_backend, - InstructionSet instruction_set, size_t thread_count) + static bool Create(Dex2Oat** p_dex2oat, + Runtime::Options& options, + CompilerBackend compiler_backend, + InstructionSet instruction_set, + size_t thread_count) SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) { if (!CreateRuntime(options, instruction_set)) { *p_dex2oat = NULL; @@ -161,13 +164,15 @@ class Dex2Oat { ~Dex2Oat() { delete runtime_; - LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_) << " (threads: " << thread_count_ << ")"; + LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_) + << " (threads: " << thread_count_ << ")"; } - // Reads the class names (java.lang.Object) and returns as set of class descriptors (Ljava/lang/Object;) + // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;) CompilerDriver::DescriptorSet* ReadImageClassesFromFile(const char* image_classes_filename) { - UniquePtr image_classes_file(new std::ifstream(image_classes_filename, std::ifstream::in)); + UniquePtr image_classes_file(new std::ifstream(image_classes_filename, + std::ifstream::in)); if (image_classes_file.get() == NULL) { LOG(ERROR) << "Failed to open image classes file " << image_classes_filename; return NULL; @@ -191,8 +196,9 @@ class Dex2Oat { return image_classes.release(); } - // Reads the class names (java.lang.Object) and returns as set of class descriptors (Ljava/lang/Object;) - CompilerDriver::DescriptorSet* ReadImageClassesFromZip(const std::string& zip_filename, const char* image_classes_filename) { + // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;) + CompilerDriver::DescriptorSet* ReadImageClassesFromZip(const std::string& zip_filename, + const char* image_classes_filename) { UniquePtr zip_archive(ZipArchive::Open(zip_filename)); if (zip_archive.get() == NULL) { LOG(ERROR) << "Failed to open zip file " << zip_filename; @@ -224,7 +230,7 @@ class Dex2Oat { bool image, UniquePtr& image_classes, bool dump_stats, - bool dump_timings) + TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // SirtRef and ClassLoader creation needs to come after Runtime::Create jobject class_loader = NULL; @@ -248,8 +254,7 @@ class Dex2Oat { image, image_classes.release(), thread_count_, - dump_stats, - dump_timings)); + dump_stats)); if (compiler_backend_ == kPortable) { driver->SetBitcodeFileName(bitcode_filename); @@ -258,7 +263,8 @@ class Dex2Oat { Thread::Current()->TransitionFromRunnableToSuspended(kNative); - driver->CompileAll(class_loader, dex_files); + timings.AddSplit("dex2oat Setup"); + driver->CompileAll(class_loader, dex_files, timings); Thread::Current()->TransitionFromSuspendedToRunnable(); @@ -294,11 +300,13 @@ class Dex2Oat { LOG(ERROR) << "Failed to create oat file " << oat_file->GetPath(); return NULL; } + timings.AddSplit("dex2oat OatWriter"); if (!driver->WriteElf(android_root, is_host, dex_files, oat_contents, oat_file)) { LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath(); return NULL; } + timings.AddSplit("dex2oat ElfWriter"); return driver.release(); } @@ -333,7 +341,9 @@ class Dex2Oat { } private: - explicit Dex2Oat(Runtime* runtime, CompilerBackend compiler_backend, InstructionSet instruction_set, + explicit Dex2Oat(Runtime* runtime, + CompilerBackend compiler_backend, + InstructionSet instruction_set, size_t thread_count) : compiler_backend_(compiler_backend), instruction_set_(instruction_set), @@ -365,7 +375,8 @@ class Dex2Oat { // Appends to dex_files any elements of class_path that it doesn't already // contain. This will open those dex files as necessary. - static void OpenClassPathFiles(const std::string& class_path, std::vector& dex_files) { + static void OpenClassPathFiles(const std::string& class_path, + std::vector& dex_files) { std::vector parsed; Split(class_path, ':', parsed); // Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained. @@ -384,7 +395,8 @@ class Dex2Oat { } // Returns true if dex_files has a dex with the named location. - static bool DexFilesContains(const std::vector& dex_files, const std::string& location) { + static bool DexFilesContains(const std::vector& dex_files, + const std::string& location) { for (size_t i = 0; i < dex_files.size(); ++i) { if (dex_files[i]->GetLocation() == location) { return true; @@ -564,6 +576,8 @@ const unsigned int WatchDog::kWatchDogWarningSeconds; const unsigned int WatchDog::kWatchDogTimeoutSeconds; static int dex2oat(int argc, char** argv) { + TimingLogger timings("compiler", false); + InitLogging(argv); // Skip over argv[0]. @@ -937,7 +951,7 @@ static int dex2oat(int argc, char** argv) { image, image_classes, dump_stats, - dump_timings)); + timings)); if (compiler.get() == NULL) { LOG(ERROR) << "Failed to create oat file: " << oat_location; @@ -959,7 +973,7 @@ static int dex2oat(int argc, char** argv) { // | alloc spaces | // +--------------+ // - // There are several constraints on the loading of the imag and boot.oat. + // There are several constraints on the loading of the image and boot.oat. // // 1. The image is expected to be loaded at an absolute address and // contains Objects with absolute pointers within the image. @@ -977,7 +991,7 @@ static int dex2oat(int argc, char** argv) { // // 1. We have already created that oat file above with // CreateOatFile. Originally this was just our own proprietary file - // but now it is contained within an ELF dynamic object (aka .so + // but now it is contained within an ELF dynamic object (aka an .so // file). The Compiler returned by CreateOatFile provides // PatchInformation for references to oat code and Methods that need // to be update once we know where the oat file will be located @@ -1003,6 +1017,7 @@ static int dex2oat(int argc, char** argv) { oat_unstripped, oat_location, *compiler.get()); + timings.AddSplit("dex2oat ImageWriter"); Thread::Current()->TransitionFromSuspendedToRunnable(); LOG(INFO) << "Image written successfully: " << image_filename; if (!image_creation_success) { @@ -1011,9 +1026,13 @@ static int dex2oat(int argc, char** argv) { } if (is_host) { + if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) { + LOG(INFO) << Dumpable(timings); + } return EXIT_SUCCESS; } +#if ART_USE_PORTABLE_COMPILER // We currently only generate symbols on Portable // If we don't want to strip in place, copy from unstripped location to stripped location. // We need to strip after image creation because FixupElf needs to use .strtab. if (oat_unstripped != oat_stripped) { @@ -1031,6 +1050,7 @@ static int dex2oat(int argc, char** argv) { CHECK(write_ok); } oat_file.reset(out.release()); + timings.AddSplit("dex2oat OatFile copy"); LOG(INFO) << "Oat file copied successfully (stripped): " << oat_stripped; } @@ -1038,12 +1058,19 @@ static int dex2oat(int argc, char** argv) { off_t seek_actual = lseek(oat_file->Fd(), 0, SEEK_SET); CHECK_EQ(0, seek_actual); ElfStripper::Strip(oat_file.get()); + timings.AddSplit("dex2oat ElfStripper"); // We wrote the oat file successfully, and want to keep it. LOG(INFO) << "Oat file written successfully (stripped): " << oat_location; +#endif // ART_USE_PORTABLE_COMPILER + + if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) { + LOG(INFO) << Dumpable(timings); + } return EXIT_SUCCESS; } + } // namespace art int main(int argc, char** argv) { diff --git a/runtime/common_test.h b/runtime/common_test.h index 13626a5eac..09ad7fd7b7 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -349,7 +349,7 @@ class CommonTest : public testing::Test { class_linker_->FixupDexCaches(runtime_->GetResolutionMethod()); compiler_driver_.reset(new CompilerDriver(compiler_backend, instruction_set, true, new CompilerDriver::DescriptorSet, - 2, true, true)); + 2, true)); } // We typically don't generate an image in unit tests, disable this optimization by default. compiler_driver_->SetSupportBootImageFixup(false); @@ -473,7 +473,8 @@ class CommonTest : public testing::Test { void CompileMethod(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); - compiler_driver_->CompileOne(method); + TimingLogger timings("CommonTest::CompileMethod", false); + compiler_driver_->CompileOne(method, timings); MakeExecutable(method); } diff --git a/runtime/image_test.cc b/runtime/image_test.cc index ee50118b06..11218ad513 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -45,7 +45,8 @@ TEST_F(ImageTest, WriteRead) { { jobject class_loader = NULL; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath()); + TimingLogger timings("ImageTest::WriteRead", false); + compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); ScopedObjectAccess soa(Thread::Current()); VectorOutputStream output_stream(tmp_elf.GetFilename(), oat_contents); diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc index 7f24564eb2..9a6bc19b13 100644 --- a/runtime/oat_test.cc +++ b/runtime/oat_test.cc @@ -74,10 +74,11 @@ TEST_F(OatTest, WriteRead) { #else CompilerBackend compiler_backend = kQuick; #endif - compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, true, true)); + compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, true)); jobject class_loader = NULL; if (compile) { - compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath()); + TimingLogger timings("OatTest::WriteRead", false); + compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); } ScopedObjectAccess soa(Thread::Current()); @@ -99,7 +100,8 @@ TEST_F(OatTest, WriteRead) { ASSERT_TRUE(success_elf); if (compile) { // OatWriter strips the code, regenerate to compare - compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath()); + TimingLogger timings("CommonTest::WriteRead", false); + compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); } UniquePtr oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false)); ASSERT_TRUE(oat_file.get() != NULL); -- cgit v1.2.3-59-g8ed1b From c50d8e11a098cc5c6239aa86b47d4fcf8cbb4899 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Tue, 23 Jul 2013 22:35:16 -0700 Subject: Remove OatWriter buffering to memory for ElfWriterQuick This allows the oat contents to be directly written to the file. Change-Id: Ibc7ddf57477b152f07784b52f7334be73fd22833 --- compiler/driver/compiler_driver.cc | 6 +- compiler/driver/compiler_driver.h | 3 +- compiler/elf_writer.h | 3 +- compiler/elf_writer_mclinker.cc | 15 ++- compiler/elf_writer_mclinker.h | 4 +- compiler/elf_writer_quick.cc | 23 ++-- compiler/elf_writer_quick.h | 4 +- compiler/image_writer.cc | 4 + compiler/oat_writer.cc | 215 ++++++++++++++++++++----------------- compiler/oat_writer.h | 51 +++++---- dex2oat/dex2oat.cc | 27 ++--- runtime/image_test.cc | 20 ++-- runtime/oat_test.cc | 26 ++--- 13 files changed, 211 insertions(+), 190 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 1f667308bd..ea2291cb44 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -2343,13 +2343,13 @@ bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex bool CompilerDriver::WriteElf(const std::string& android_root, bool is_host, const std::vector& dex_files, - std::vector& oat_contents, + OatWriter& oat_writer, art::File* file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(ART_USE_PORTABLE_COMPILER) - return art::ElfWriterMclinker::Create(file, oat_contents, dex_files, android_root, is_host, *this); + return art::ElfWriterMclinker::Create(file, oat_writer, dex_files, android_root, is_host, *this); #else - return art::ElfWriterQuick::Create(file, oat_contents, dex_files, android_root, is_host, *this); + return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, *this); #endif } void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set, diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 1799057ea6..f3f72dd3c7 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -39,6 +39,7 @@ namespace art { class AOTCompilationStats; class ParallelCompilationManager; class DexCompilationUnit; +class OatWriter; class TimingLogger; enum CompilerBackend { @@ -192,7 +193,7 @@ class CompilerDriver { bool WriteElf(const std::string& android_root, bool is_host, const std::vector& dex_files, - std::vector& oat_contents, + OatWriter& oat_writer, File* file); // TODO: move to a common home for llvm helpers once quick/portable are merged diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h index 0dfce6e40f..0ef4185431 100644 --- a/compiler/elf_writer.h +++ b/compiler/elf_writer.h @@ -33,6 +33,7 @@ namespace art { class CompilerDriver; class DexFile; class ElfFile; +class OatWriter; class ElfWriter { public: @@ -49,7 +50,7 @@ class ElfWriter { ElfWriter(const CompilerDriver& driver, File* elf_file); virtual ~ElfWriter(); - virtual bool Write(std::vector& oat_contents, + virtual bool Write(OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host) diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc index 05f3b025e7..2a9bc35559 100644 --- a/compiler/elf_writer_mclinker.cc +++ b/compiler/elf_writer_mclinker.cc @@ -36,7 +36,9 @@ #include "mirror/abstract_method.h" #include "mirror/abstract_method-inl.h" #include "mirror/object-inl.h" +#include "oat_writer.h" #include "scoped_thread_state_change.h" +#include "vector_output_stream.h" namespace art { @@ -46,19 +48,25 @@ ElfWriterMclinker::ElfWriterMclinker(const CompilerDriver& driver, File* elf_fil ElfWriterMclinker::~ElfWriterMclinker() {} bool ElfWriterMclinker::Create(File* elf_file, - std::vector& oat_contents, + OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host, const CompilerDriver& driver) { ElfWriterMclinker elf_writer(driver, elf_file); - return elf_writer.Write(oat_contents, dex_files, android_root, is_host); + return elf_writer.Write(oat_writer, dex_files, android_root, is_host); } -bool ElfWriterMclinker::Write(std::vector& oat_contents, +bool ElfWriterMclinker::Write(OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host) { + std::vector oat_contents; + oat_contents.reserve(oat_writer.GetSize()); + VectorOutputStream output_stream("oat contents", oat_contents); + CHECK(oat_writer.Write(output_stream)); + CHECK_EQ(oat_writer.GetSize(), oat_contents.size()); + Init(); AddOatInput(oat_contents); #if defined(ART_USE_PORTABLE_COMPILER) @@ -68,6 +76,7 @@ bool ElfWriterMclinker::Write(std::vector& oat_contents, if (!Link()) { return false; } + oat_contents.clear(); #if defined(ART_USE_PORTABLE_COMPILER) FixupOatMethodOffsets(dex_files); #endif diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h index 3b33bc4986..bdadf8f206 100644 --- a/compiler/elf_writer_mclinker.h +++ b/compiler/elf_writer_mclinker.h @@ -40,7 +40,7 @@ class ElfWriterMclinker : public ElfWriter { public: // Write an ELF file. Returns true on success, false on failure. static bool Create(File* file, - std::vector& oat_contents, + OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host, @@ -48,7 +48,7 @@ class ElfWriterMclinker : public ElfWriter { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); protected: - virtual bool Write(std::vector& oat_contents, + virtual bool Write(OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host) diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc index 9de96d271e..f2db5d834c 100644 --- a/compiler/elf_writer_quick.cc +++ b/compiler/elf_writer_quick.cc @@ -19,8 +19,10 @@ #include "base/logging.h" #include "base/unix_file/fd_file.h" #include "driver/compiler_driver.h" +#include "file_output_stream.h" #include "globals.h" #include "oat.h" +#include "oat_writer.h" #include "utils.h" namespace art { @@ -31,16 +33,16 @@ ElfWriterQuick::ElfWriterQuick(const CompilerDriver& driver, File* elf_file) ElfWriterQuick::~ElfWriterQuick() {} bool ElfWriterQuick::Create(File* elf_file, - std::vector& oat_contents, + OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host, const CompilerDriver& driver) { ElfWriterQuick elf_writer(driver, elf_file); - return elf_writer.Write(oat_contents, dex_files, android_root, is_host); + return elf_writer.Write(oat_writer, dex_files, android_root, is_host); } -bool ElfWriterQuick::Write(std::vector& oat_contents, +bool ElfWriterQuick::Write(OatWriter& oat_writer, const std::vector& dex_files_unused, const std::string& android_root_unused, bool is_host_unused) { @@ -193,9 +195,9 @@ bool ElfWriterQuick::Write(std::vector& oat_contents, // .rodata uint32_t oat_data_alignment = kPageSize; uint32_t oat_data_offset = expected_offset = RoundUp(expected_offset, oat_data_alignment); - const OatHeader* oat_header = reinterpret_cast(&oat_contents[0]); - CHECK(oat_header->IsValid()); - uint32_t oat_data_size = oat_header->GetExecutableOffset(); + const OatHeader& oat_header = oat_writer.GetOatHeader(); + CHECK(oat_header.IsValid()); + uint32_t oat_data_size = oat_header.GetExecutableOffset(); expected_offset += oat_data_size; if (debug) { LOG(INFO) << "oat_data_offset=" << oat_data_offset << std::hex << " " << oat_data_offset; @@ -206,9 +208,9 @@ bool ElfWriterQuick::Write(std::vector& oat_contents, uint32_t oat_exec_alignment = kPageSize; CHECK_ALIGNED(expected_offset, kPageSize); uint32_t oat_exec_offset = expected_offset = RoundUp(expected_offset, oat_exec_alignment); - uint32_t oat_exec_size = oat_contents.size() - oat_data_size; + uint32_t oat_exec_size = oat_writer.GetSize() - oat_data_size; expected_offset += oat_exec_size; - CHECK_EQ(oat_data_offset + oat_contents.size(), expected_offset); + CHECK_EQ(oat_data_offset + oat_writer.GetSize(), expected_offset); if (debug) { LOG(INFO) << "oat_exec_offset=" << oat_exec_offset << std::hex << " " << oat_exec_offset; LOG(INFO) << "oat_exec_size=" << oat_exec_size << std::hex << " " << oat_exec_size; @@ -617,13 +619,14 @@ bool ElfWriterQuick::Write(std::vector& oat_contents, << " for " << elf_file_->GetPath(); return false; } - if (!elf_file_->WriteFully(&oat_contents[0], oat_contents.size())) { + FileOutputStream output_stream(elf_file_); + if (!oat_writer.Write(output_stream)) { PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file_->GetPath(); return false; } // .dynamic - DCHECK_LE(oat_data_offset + oat_contents.size(), dynamic_offset); + DCHECK_LE(oat_data_offset + oat_writer.GetSize(), dynamic_offset); if (static_cast(dynamic_offset) != lseek(elf_file_->Fd(), dynamic_offset, SEEK_SET)) { PLOG(ERROR) << "Failed to seek to .dynamic offset " << dynamic_offset << " for " << elf_file_->GetPath(); diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h index a15c239de9..f36d06f79d 100644 --- a/compiler/elf_writer_quick.h +++ b/compiler/elf_writer_quick.h @@ -25,7 +25,7 @@ class ElfWriterQuick : public ElfWriter { public: // Write an ELF file. Returns true on success, false on failure. static bool Create(File* file, - std::vector& oat_contents, + OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host, @@ -33,7 +33,7 @@ class ElfWriterQuick : public ElfWriter { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); protected: - virtual bool Write(std::vector& oat_contents, + virtual bool Write(OatWriter& oat_writer, const std::vector& dex_files, const std::string& android_root, bool is_host) diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index f395428c7d..1612f7eb33 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -85,6 +85,10 @@ bool ImageWriter::Write(const std::string& image_filename, return false; } oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location); + if (oat_file_ == NULL) { + LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location; + return false; + } class_linker->RegisterOatFile(*oat_file_); interpreter_to_interpreter_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToInterpreterEntryOffset(); interpreter_to_quick_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToQuickEntryOffset(); diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index da05c49e0e..5eb837b25c 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -35,20 +35,6 @@ namespace art { -bool OatWriter::Create(OutputStream& output_stream, - const std::vector& dex_files, - uint32_t image_file_location_oat_checksum, - uint32_t image_file_location_oat_begin, - const std::string& image_file_location, - const CompilerDriver& driver) { - OatWriter oat_writer(dex_files, - image_file_location_oat_checksum, - image_file_location_oat_begin, - image_file_location, - &driver); - return oat_writer.Write(output_stream); -} - OatWriter::OatWriter(const std::vector& dex_files, uint32_t image_file_location_oat_checksum, uint32_t image_file_location_oat_begin, @@ -89,6 +75,7 @@ OatWriter::OatWriter(const std::vector& dex_files, offset = InitOatClasses(offset); offset = InitOatCode(offset); offset = InitOatCodeDexFiles(offset); + size_ = offset; CHECK_EQ(dex_files_->size(), oat_dex_files_.size()); CHECK(image_file_location.empty() == compiler->IsImage()); @@ -190,7 +177,8 @@ size_t OatWriter::InitOatCode(size_t offset) { if (compiler_driver_->IsImage()) { InstructionSet instruction_set = compiler_driver_->GetInstructionSet(); oat_header_->SetInterpreterToInterpreterEntryOffset(offset); - interpreter_to_interpreter_entry_.reset(compiler_driver_->CreateInterpreterToInterpreterEntry()); + interpreter_to_interpreter_entry_.reset( + compiler_driver_->CreateInterpreterToInterpreterEntry()); offset += interpreter_to_interpreter_entry_->size(); offset = CompiledCode::AlignCode(offset, instruction_set); @@ -336,7 +324,8 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, mapping_table_offset = (mapping_table_size == 0) ? 0 : offset; // Deduplicate mapping tables - SafeMap*, uint32_t>::iterator mapping_iter = mapping_table_offsets_.find(&mapping_table); + SafeMap*, uint32_t>::iterator mapping_iter = + mapping_table_offsets_.find(&mapping_table); if (mapping_iter != mapping_table_offsets_.end()) { mapping_table_offset = mapping_iter->second; } else { @@ -350,7 +339,8 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, vmap_table_offset = (vmap_table_size == 0) ? 0 : offset; // Deduplicate vmap tables - SafeMap*, uint32_t>::iterator vmap_iter = vmap_table_offsets_.find(&vmap_table); + SafeMap*, uint32_t>::iterator vmap_iter = + vmap_table_offsets_.find(&vmap_table); if (vmap_iter != vmap_table_offsets_.end()) { vmap_table_offset = vmap_iter->second; } else { @@ -382,7 +372,8 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, #endif // Deduplicate GC maps - SafeMap*, uint32_t>::iterator gc_map_iter = gc_map_offsets_.find(&gc_map); + SafeMap*, uint32_t>::iterator gc_map_iter = + gc_map_offsets_.find(&gc_map); if (gc_map_iter != gc_map_offsets_.end()) { gc_map_offset = gc_map_iter->second; } else { @@ -392,14 +383,14 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, } } - oat_class->method_offsets_[class_def_method_index] - = OatMethodOffsets(code_offset, - frame_size_in_bytes, - core_spill_mask, - fp_spill_mask, - mapping_table_offset, - vmap_table_offset, - gc_map_offset); + oat_class->method_offsets_[class_def_method_index] = + OatMethodOffsets(code_offset, + frame_size_in_bytes, + core_spill_mask, + fp_spill_mask, + mapping_table_offset, + vmap_table_offset, + gc_map_offset); if (compiler_driver_->IsImage()) { ClassLinker* linker = Runtime::Current()->GetClassLinker(); @@ -428,12 +419,16 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, } #define DCHECK_OFFSET() \ - DCHECK_EQ(static_cast(offset), out.Seek(0, kSeekCurrent)) + DCHECK_EQ(static_cast(file_offset + relative_offset), out.Seek(0, kSeekCurrent)) \ + << "file_offset=" << file_offset << " relative_offset=" << relative_offset #define DCHECK_OFFSET_() \ - DCHECK_EQ(static_cast(offset_), out.Seek(0, kSeekCurrent)) + DCHECK_EQ(static_cast(file_offset + offset_), out.Seek(0, kSeekCurrent)) \ + << "file_offset=" << file_offset << " offset_=" << offset_ bool OatWriter::Write(OutputStream& out) { + const size_t file_offset = out.Seek(0, kSeekCurrent); + if (!out.WriteFully(oat_header_, sizeof(*oat_header_))) { PLOG(ERROR) << "Failed to write oat header to " << out.GetLocation(); return false; @@ -446,19 +441,19 @@ bool OatWriter::Write(OutputStream& out) { } size_oat_header_image_file_location_ += image_file_location_.size(); - if (!WriteTables(out)) { + if (!WriteTables(out, file_offset)) { LOG(ERROR) << "Failed to write oat tables to " << out.GetLocation(); return false; } - size_t code_offset = WriteCode(out); - if (code_offset == 0) { + size_t relative_offset = WriteCode(out, file_offset); + if (relative_offset == 0) { LOG(ERROR) << "Failed to write oat code to " << out.GetLocation(); return false; } - code_offset = WriteCodeDexFiles(out, code_offset); - if (code_offset == 0) { + relative_offset = WriteCodeDexFiles(out, file_offset, relative_offset); + if (relative_offset == 0) { LOG(ERROR) << "Failed to write oat code for dex files to " << out.GetLocation(); return false; } @@ -495,21 +490,25 @@ bool OatWriter::Write(OutputStream& out) { #undef DO_STAT LOG(INFO) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)"; \ - CHECK_EQ(size_total, static_cast(out.Seek(0, kSeekCurrent))); + CHECK_EQ(file_offset + size_total, static_cast(out.Seek(0, kSeekCurrent))); + CHECK_EQ(size_, size_total); } + CHECK_EQ(file_offset + size_, static_cast(out.Seek(0, kSeekCurrent))); + CHECK_EQ(size_, relative_offset); + return true; } -bool OatWriter::WriteTables(OutputStream& out) { +bool OatWriter::WriteTables(OutputStream& out, const size_t file_offset) { for (size_t i = 0; i != oat_dex_files_.size(); ++i) { - if (!oat_dex_files_[i]->Write(this, out)) { + if (!oat_dex_files_[i]->Write(this, out, file_offset)) { PLOG(ERROR) << "Failed to write oat dex information to " << out.GetLocation(); return false; } } for (size_t i = 0; i != oat_dex_files_.size(); ++i) { - uint32_t expected_offset = oat_dex_files_[i]->dex_file_offset_; + uint32_t expected_offset = file_offset + oat_dex_files_[i]->dex_file_offset_; off_t actual_offset = out.Seek(expected_offset, kSeekSet); if (static_cast(actual_offset) != expected_offset) { const DexFile* dex_file = (*dex_files_)[i]; @@ -519,13 +518,14 @@ bool OatWriter::WriteTables(OutputStream& out) { } const DexFile* dex_file = (*dex_files_)[i]; if (!out.WriteFully(&dex_file->GetHeader(), dex_file->GetHeader().file_size_)) { - PLOG(ERROR) << "Failed to write dex file " << dex_file->GetLocation() << " to " << out.GetLocation(); + PLOG(ERROR) << "Failed to write dex file " << dex_file->GetLocation() + << " to " << out.GetLocation(); return false; } size_dex_file_ += dex_file->GetHeader().file_size_; } for (size_t i = 0; i != oat_classes_.size(); ++i) { - if (!oat_classes_[i]->Write(this, out)) { + if (!oat_classes_[i]->Write(this, out, file_offset)) { PLOG(ERROR) << "Failed to write oat methods information to " << out.GetLocation(); return false; } @@ -533,27 +533,29 @@ bool OatWriter::WriteTables(OutputStream& out) { return true; } -size_t OatWriter::WriteCode(OutputStream& out) { - uint32_t offset = oat_header_->GetExecutableOffset(); +size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) { + size_t relative_offset = oat_header_->GetExecutableOffset(); off_t new_offset = out.Seek(size_executable_offset_alignment_, kSeekCurrent); - if (static_cast(new_offset) != offset) { + size_t expected_file_offset = file_offset + relative_offset; + if (static_cast(new_offset) != expected_file_offset) { PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset - << " Expected: " << offset << " File: " << out.GetLocation(); + << " Expected: " << expected_file_offset << " File: " << out.GetLocation(); return 0; } DCHECK_OFFSET(); if (compiler_driver_->IsImage()) { InstructionSet instruction_set = compiler_driver_->GetInstructionSet(); - if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0], interpreter_to_interpreter_entry_->size())) { + if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0], + interpreter_to_interpreter_entry_->size())) { PLOG(ERROR) << "Failed to write interpreter to interpreter entry to " << out.GetLocation(); return false; } size_interpreter_to_interpreter_entry_ += interpreter_to_interpreter_entry_->size(); - offset += interpreter_to_interpreter_entry_->size(); + relative_offset += interpreter_to_interpreter_entry_->size(); DCHECK_OFFSET(); - uint32_t aligned_offset = CompiledCode::AlignCode(offset, instruction_set); - uint32_t alignment_padding = aligned_offset - offset; + uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); + uint32_t alignment_padding = aligned_offset - relative_offset; out.Seek(alignment_padding, kSeekCurrent); size_stubs_alignment_ += alignment_padding; if (!out.WriteFully(&(*interpreter_to_quick_entry_)[0], interpreter_to_quick_entry_->size())) { @@ -561,60 +563,67 @@ size_t OatWriter::WriteCode(OutputStream& out) { return false; } size_interpreter_to_quick_entry_ += interpreter_to_quick_entry_->size(); - offset += alignment_padding + interpreter_to_quick_entry_->size(); + relative_offset += alignment_padding + interpreter_to_quick_entry_->size(); DCHECK_OFFSET(); - aligned_offset = CompiledCode::AlignCode(offset, instruction_set); - alignment_padding = aligned_offset - offset; + aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); + alignment_padding = aligned_offset - relative_offset; out.Seek(alignment_padding, kSeekCurrent); size_stubs_alignment_ += alignment_padding; - if (!out.WriteFully(&(*portable_resolution_trampoline_)[0], portable_resolution_trampoline_->size())) { + if (!out.WriteFully(&(*portable_resolution_trampoline_)[0], + portable_resolution_trampoline_->size())) { PLOG(ERROR) << "Failed to write portable resolution trampoline to " << out.GetLocation(); return false; } size_portable_resolution_trampoline_ += portable_resolution_trampoline_->size(); - offset += alignment_padding + portable_resolution_trampoline_->size(); + relative_offset += alignment_padding + portable_resolution_trampoline_->size(); DCHECK_OFFSET(); - aligned_offset = CompiledCode::AlignCode(offset, instruction_set); - alignment_padding = aligned_offset - offset; + aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); + alignment_padding = aligned_offset - relative_offset; out.Seek(alignment_padding, kSeekCurrent); size_stubs_alignment_ += alignment_padding; - if (!out.WriteFully(&(*quick_resolution_trampoline_)[0], quick_resolution_trampoline_->size())) { + if (!out.WriteFully(&(*quick_resolution_trampoline_)[0], + quick_resolution_trampoline_->size())) { PLOG(ERROR) << "Failed to write quick resolution trampoline to " << out.GetLocation(); return false; } size_quick_resolution_trampoline_ += quick_resolution_trampoline_->size(); - offset += alignment_padding + quick_resolution_trampoline_->size(); + relative_offset += alignment_padding + quick_resolution_trampoline_->size(); DCHECK_OFFSET(); } - return offset; + return relative_offset; } -size_t OatWriter::WriteCodeDexFiles(OutputStream& out, size_t code_offset) { +size_t OatWriter::WriteCodeDexFiles(OutputStream& out, + const size_t file_offset, + size_t relative_offset) { size_t oat_class_index = 0; for (size_t i = 0; i != oat_dex_files_.size(); ++i) { const DexFile* dex_file = (*dex_files_)[i]; CHECK(dex_file != NULL); - code_offset = WriteCodeDexFile(out, code_offset, oat_class_index, *dex_file); - if (code_offset == 0) { + relative_offset = WriteCodeDexFile(out, file_offset, relative_offset, oat_class_index, + *dex_file); + if (relative_offset == 0) { return 0; } } - return code_offset; + return relative_offset; } -size_t OatWriter::WriteCodeDexFile(OutputStream& out, size_t code_offset, size_t& oat_class_index, +size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset, + size_t relative_offset, size_t& oat_class_index, const DexFile& dex_file) { for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs(); class_def_index++, oat_class_index++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - code_offset = WriteCodeClassDef(out, code_offset, oat_class_index, dex_file, class_def); - if (code_offset == 0) { + relative_offset = WriteCodeClassDef(out, file_offset, relative_offset, oat_class_index, + dex_file, class_def); + if (relative_offset == 0) { return 0; } } - return code_offset; + return relative_offset; } void OatWriter::ReportWriteFailure(const char* what, uint32_t method_idx, @@ -624,13 +633,15 @@ void OatWriter::ReportWriteFailure(const char* what, uint32_t method_idx, } size_t OatWriter::WriteCodeClassDef(OutputStream& out, - size_t code_offset, size_t oat_class_index, + const size_t file_offset, + size_t relative_offset, + size_t oat_class_index, const DexFile& dex_file, const DexFile::ClassDef& class_def) { const byte* class_data = dex_file.GetClassData(class_def); if (class_data == NULL) { // ie. an empty class such as a marker interface - return code_offset; + return relative_offset; } ClassDataItemIterator it(dex_file, class_data); // Skip fields @@ -644,27 +655,29 @@ size_t OatWriter::WriteCodeClassDef(OutputStream& out, size_t class_def_method_index = 0; while (it.HasNextDirectMethod()) { bool is_static = (it.GetMemberAccessFlags() & kAccStatic) != 0; - code_offset = WriteCodeMethod(out, code_offset, oat_class_index, class_def_method_index, - is_static, it.GetMemberIndex(), dex_file); - if (code_offset == 0) { + relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index, + class_def_method_index, is_static, it.GetMemberIndex(), + dex_file); + if (relative_offset == 0) { return 0; } class_def_method_index++; it.Next(); } while (it.HasNextVirtualMethod()) { - code_offset = WriteCodeMethod(out, code_offset, oat_class_index, class_def_method_index, - false, it.GetMemberIndex(), dex_file); - if (code_offset == 0) { + relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index, + class_def_method_index, false, it.GetMemberIndex(), dex_file); + if (relative_offset == 0) { return 0; } class_def_method_index++; it.Next(); } - return code_offset; + return relative_offset; } -size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_class_index, +size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset, + size_t relative_offset, size_t oat_class_index, size_t class_def_method_index, bool is_static, uint32_t method_idx, const DexFile& dex_file) { const CompiledMethod* compiled_method = @@ -676,26 +689,27 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c if (compiled_method != NULL) { // ie. not an abstract method #if !defined(ART_USE_PORTABLE_COMPILER) - uint32_t aligned_offset = compiled_method->AlignCode(offset); - uint32_t aligned_code_delta = aligned_offset - offset; + uint32_t aligned_offset = compiled_method->AlignCode(relative_offset); + uint32_t aligned_code_delta = aligned_offset - relative_offset; if (aligned_code_delta != 0) { off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent); size_code_alignment_ += aligned_code_delta; - if (static_cast(new_offset) != aligned_offset) { + uint32_t expected_offset = file_offset + aligned_offset; + if (static_cast(new_offset) != expected_offset) { PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset - << " Expected: " << aligned_offset << " File: " << out.GetLocation(); + << " Expected: " << expected_offset << " File: " << out.GetLocation(); return 0; } - offset += aligned_code_delta; + relative_offset += aligned_code_delta; DCHECK_OFFSET(); } - DCHECK_ALIGNED(offset, kArmAlignment); + DCHECK_ALIGNED(relative_offset, kArmAlignment); const std::vector& code = compiled_method->GetCode(); uint32_t code_size = code.size() * sizeof(code[0]); CHECK_NE(code_size, 0U); // Deduplicate code arrays - size_t code_offset = offset + sizeof(code_size) + compiled_method->CodeDelta(); + size_t code_offset = relative_offset + sizeof(code_size) + compiled_method->CodeDelta(); SafeMap*, uint32_t>::iterator code_iter = code_offsets_.find(&code); if (code_iter != code_offsets_.end() && code_offset != method_offsets.code_offset_) { DCHECK(code_iter->second == method_offsets.code_offset_) @@ -707,14 +721,14 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c return 0; } size_code_size_ += sizeof(code_size); - offset += sizeof(code_size); + relative_offset += sizeof(code_size); DCHECK_OFFSET(); if (!out.WriteFully(&code[0], code_size)) { ReportWriteFailure("method code", method_idx, dex_file, out); return 0; } size_code_ += code_size; - offset += code_size; + relative_offset += code_size; } DCHECK_OFFSET(); #endif @@ -726,20 +740,20 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c SafeMap*, uint32_t>::iterator mapping_iter = mapping_table_offsets_.find(&mapping_table); if (mapping_iter != mapping_table_offsets_.end() && - offset != method_offsets.mapping_table_offset_) { + relative_offset != method_offsets.mapping_table_offset_) { DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0) || mapping_iter->second == method_offsets.mapping_table_offset_) << PrettyMethod(method_idx, dex_file); } else { DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0) - || offset == method_offsets.mapping_table_offset_) + || relative_offset == method_offsets.mapping_table_offset_) << PrettyMethod(method_idx, dex_file); if (!out.WriteFully(&mapping_table[0], mapping_table_size)) { ReportWriteFailure("mapping table", method_idx, dex_file, out); return 0; } size_mapping_table_ += mapping_table_size; - offset += mapping_table_size; + relative_offset += mapping_table_size; } DCHECK_OFFSET(); @@ -750,20 +764,20 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c SafeMap*, uint32_t>::iterator vmap_iter = vmap_table_offsets_.find(&vmap_table); if (vmap_iter != vmap_table_offsets_.end() && - offset != method_offsets.vmap_table_offset_) { + relative_offset != method_offsets.vmap_table_offset_) { DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0) || vmap_iter->second == method_offsets.vmap_table_offset_) << PrettyMethod(method_idx, dex_file); } else { DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0) - || offset == method_offsets.vmap_table_offset_) + || relative_offset == method_offsets.vmap_table_offset_) << PrettyMethod(method_idx, dex_file); if (!out.WriteFully(&vmap_table[0], vmap_table_size)) { ReportWriteFailure("vmap table", method_idx, dex_file, out); return 0; } size_vmap_table_ += vmap_table_size; - offset += vmap_table_size; + relative_offset += vmap_table_size; } DCHECK_OFFSET(); @@ -774,25 +788,25 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c SafeMap*, uint32_t>::iterator gc_map_iter = gc_map_offsets_.find(&gc_map); if (gc_map_iter != gc_map_offsets_.end() && - offset != method_offsets.gc_map_offset_) { + relative_offset != method_offsets.gc_map_offset_) { DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0) || gc_map_iter->second == method_offsets.gc_map_offset_) << PrettyMethod(method_idx, dex_file); } else { DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0) - || offset == method_offsets.gc_map_offset_) + || relative_offset == method_offsets.gc_map_offset_) << PrettyMethod(method_idx, dex_file); if (!out.WriteFully(&gc_map[0], gc_map_size)) { ReportWriteFailure("GC map", method_idx, dex_file, out); return 0; } size_gc_map_ += gc_map_size; - offset += gc_map_size; + relative_offset += gc_map_size; } DCHECK_OFFSET(); } - return offset; + return relative_offset; } OatWriter::OatDexFile::OatDexFile(size_t offset, const DexFile& dex_file) { @@ -822,7 +836,9 @@ void OatWriter::OatDexFile::UpdateChecksum(OatHeader& oat_header) const { sizeof(methods_offsets_[0]) * methods_offsets_.size()); } -bool OatWriter::OatDexFile::Write(OatWriter* oat_writer, OutputStream& out) const { +bool OatWriter::OatDexFile::Write(OatWriter* oat_writer, + OutputStream& out, + const size_t file_offset) const { DCHECK_OFFSET_(); if (!out.WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) { PLOG(ERROR) << "Failed to write dex file location length to " << out.GetLocation(); @@ -881,14 +897,16 @@ void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const { sizeof(method_offsets_[0]) * method_offsets_.size()); } -bool OatWriter::OatClass::Write(OatWriter* oat_writer, OutputStream& out) const { +bool OatWriter::OatClass::Write(OatWriter* oat_writer, + OutputStream& out, + const size_t file_offset) const { DCHECK_OFFSET_(); if (!out.WriteFully(&status_, sizeof(status_))) { PLOG(ERROR) << "Failed to write class status to " << out.GetLocation(); return false; } oat_writer->size_oat_class_status_ += sizeof(status_); - DCHECK_EQ(static_cast(GetOatMethodOffsetsOffsetFromOatHeader(0)), + DCHECK_EQ(static_cast(file_offset + GetOatMethodOffsetsOffsetFromOatHeader(0)), out.Seek(0, kSeekCurrent)); if (!out.WriteFully(&method_offsets_[0], sizeof(method_offsets_[0]) * method_offsets_.size())) { @@ -896,7 +914,8 @@ bool OatWriter::OatClass::Write(OatWriter* oat_writer, OutputStream& out) const return false; } oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size(); - DCHECK_EQ(static_cast(GetOatMethodOffsetsOffsetFromOatHeader(method_offsets_.size())), + DCHECK_EQ(static_cast(file_offset + + GetOatMethodOffsetsOffsetFromOatHeader(method_offsets_.size())), out.Seek(0, kSeekCurrent)); return true; } diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index ea7156ea49..f2c5626b4d 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -62,23 +62,25 @@ class OutputStream; // class OatWriter { public: - // Write an oat file. Returns true on success, false on failure. - static bool Create(OutputStream& out, - const std::vector& dex_files, - uint32_t image_file_location_oat_checksum, - uint32_t image_file_location_oat_begin, - const std::string& image_file_location, - const CompilerDriver& compiler) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - private: OatWriter(const std::vector& dex_files, uint32_t image_file_location_oat_checksum, uint32_t image_file_location_oat_begin, const std::string& image_file_location, const CompilerDriver* compiler) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + const OatHeader& GetOatHeader() const { + return *oat_header_; + } + + size_t GetSize() const { + return size_; + } + + bool Write(OutputStream& out); + ~OatWriter(); + private: size_t InitOatHeader(); size_t InitOatDexFiles(size_t offset); size_t InitDexFiles(size_t offset); @@ -101,17 +103,17 @@ class OatWriter { uint32_t method_idx, const DexFile*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool Write(OutputStream& out); - bool WriteTables(OutputStream& out); - size_t WriteCode(OutputStream& out); - size_t WriteCodeDexFiles(OutputStream& out, size_t offset); - size_t WriteCodeDexFile(OutputStream& out, size_t offset, size_t& oat_class_index, - const DexFile& dex_file); - size_t WriteCodeClassDef(OutputStream& out, size_t offset, size_t oat_class_index, - const DexFile& dex_file, const DexFile::ClassDef& class_def); - size_t WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_class_index, - size_t class_def_method_index, bool is_static, uint32_t method_idx, - const DexFile& dex_file); + bool WriteTables(OutputStream& out, const size_t file_offset); + size_t WriteCode(OutputStream& out, const size_t file_offset); + size_t WriteCodeDexFiles(OutputStream& out, const size_t file_offset, size_t relative_offset); + size_t WriteCodeDexFile(OutputStream& out, const size_t file_offset, size_t relative_offset, + size_t& oat_class_index, const DexFile& dex_file); + size_t WriteCodeClassDef(OutputStream& out, const size_t file_offset, size_t relative_offset, + size_t oat_class_index, const DexFile& dex_file, + const DexFile::ClassDef& class_def); + size_t WriteCodeMethod(OutputStream& out, const size_t file_offset, size_t relative_offset, + size_t oat_class_index, size_t class_def_method_index, bool is_static, + uint32_t method_idx, const DexFile& dex_file); void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file, OutputStream& out) const; @@ -121,7 +123,7 @@ class OatWriter { explicit OatDexFile(size_t offset, const DexFile& dex_file); size_t SizeOf() const; void UpdateChecksum(OatHeader& oat_header) const; - bool Write(OatWriter* oat_writer, OutputStream& out) const; + bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const; // Offset of start of OatDexFile from beginning of OatHeader. It is // used to validate file position when writing. @@ -145,7 +147,7 @@ class OatWriter { size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const; size_t SizeOf() const; void UpdateChecksum(OatHeader& oat_header) const; - bool Write(OatWriter* oat_writer, OutputStream& out) const; + bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const; // Offset of start of OatClass from beginning of OatHeader. It is // used to validate file position when writing. For Portable, it @@ -167,6 +169,9 @@ class OatWriter { // note OatFile does not take ownership of the DexFiles const std::vector* dex_files_; + // Size required for Oat data structures. + size_t size_; + // dependencies on the image. uint32_t image_file_location_oat_checksum_; uint32_t image_file_location_oat_begin_; diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 0e32f0b9f2..1a6a98a2af 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -282,27 +282,14 @@ class Dex2Oat { } } - std::vector oat_contents; - // TODO: change ElfWriterQuick to not require the creation of oat_contents. The old pre-mclinker - // OatWriter streamed directly to disk. The new could can be adapted to do it as follows: - // 1.) use first pass of OatWriter to calculate size of oat structure, - // 2.) call ElfWriterQuick with pointer to OatWriter instead of contents, - // 3.) have ElfWriterQuick call back to OatWriter to stream generate the output directly in - // place in the elf file. - oat_contents.reserve(5 * MB); - VectorOutputStream vector_output_stream(oat_file->GetPath(), oat_contents); - if (!OatWriter::Create(vector_output_stream, - dex_files, - image_file_location_oat_checksum, - image_file_location_oat_data_begin, - image_file_location, - *driver.get())) { - LOG(ERROR) << "Failed to create oat file " << oat_file->GetPath(); - return NULL; - } + OatWriter oat_writer(dex_files, + image_file_location_oat_checksum, + image_file_location_oat_data_begin, + image_file_location, + driver.get()); timings.AddSplit("dex2oat OatWriter"); - if (!driver->WriteElf(android_root, is_host, dex_files, oat_contents, oat_file)) { + if (!driver->WriteElf(android_root, is_host, dex_files, oat_writer, oat_file)) { LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath(); return NULL; } @@ -1019,10 +1006,10 @@ static int dex2oat(int argc, char** argv) { *compiler.get()); timings.AddSplit("dex2oat ImageWriter"); Thread::Current()->TransitionFromSuspendedToRunnable(); - LOG(INFO) << "Image written successfully: " << image_filename; if (!image_creation_success) { return EXIT_FAILURE; } + LOG(INFO) << "Image written successfully: " << image_filename; } if (is_host) { diff --git a/runtime/image_test.cc b/runtime/image_test.cc index 11218ad513..75eead4d8f 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -41,7 +41,6 @@ class ImageTest : public CommonTest { TEST_F(ImageTest, WriteRead) { ScratchFile tmp_elf; { - std::vector oat_contents; { jobject class_loader = NULL; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -49,17 +48,14 @@ TEST_F(ImageTest, WriteRead) { compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); ScopedObjectAccess soa(Thread::Current()); - VectorOutputStream output_stream(tmp_elf.GetFilename(), oat_contents); - bool success_oat = OatWriter::Create(output_stream, class_linker->GetBootClassPath(), - 0, 0, "", *compiler_driver_.get()); - ASSERT_TRUE(success_oat); - - bool success_elf = compiler_driver_->WriteElf(GetTestAndroidRoot(), - !kIsTargetBuild, - class_linker->GetBootClassPath(), - oat_contents, - tmp_elf.GetFile()); - ASSERT_TRUE(success_elf); + OatWriter oat_writer(class_linker->GetBootClassPath(), + 0, 0, "", compiler_driver_.get()); + bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(), + !kIsTargetBuild, + class_linker->GetBootClassPath(), + oat_writer, + tmp_elf.GetFile()); + ASSERT_TRUE(success); } } // Workound bug that mcld::Linker::emit closes tmp_elf by reopening as tmp_oat. diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc index 9a6bc19b13..3f2e43e985 100644 --- a/runtime/oat_test.cc +++ b/runtime/oat_test.cc @@ -83,21 +83,17 @@ TEST_F(OatTest, WriteRead) { ScopedObjectAccess soa(Thread::Current()); ScratchFile tmp; - std::vector oat_contents; - VectorOutputStream output_stream(tmp.GetFilename(), oat_contents); - bool success_oat = OatWriter::Create(output_stream, - class_linker->GetBootClassPath(), - 42U, - 4096U, - "lue.art", - *compiler_driver_.get()); - ASSERT_TRUE(success_oat); - bool success_elf = compiler_driver_->WriteElf(GetTestAndroidRoot(), - !kIsTargetBuild, - class_linker->GetBootClassPath(), - oat_contents, - tmp.GetFile()); - ASSERT_TRUE(success_elf); + OatWriter oat_writer(class_linker->GetBootClassPath(), + 42U, + 4096U, + "lue.art", + compiler_driver_.get()); + bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(), + !kIsTargetBuild, + class_linker->GetBootClassPath(), + oat_writer, + tmp.GetFile()); + ASSERT_TRUE(success); if (compile) { // OatWriter strips the code, regenerate to compare TimingLogger timings("CommonTest::WriteRead", false); -- cgit v1.2.3-59-g8ed1b From 8f20ff4354fd3fdd7604a299b7bf4446e5f4584c Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 24 Jul 2013 13:15:42 -0700 Subject: Update blacklist with HttpsURLConnection$DefaultHolder Change-Id: Ib0a8b9511dc8e63d14f11ba48b80dfb78e32d25e --- compiler/driver/compiler_driver.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index ea2291cb44..486c536956 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1981,7 +1981,7 @@ static const char* class_initializer_black_list[] = { "Ljavax/security/cert/X509Certificate$1;", // Calls VMClassLoader.getBootClassPathSize. "Ljavax/microedition/khronos/egl/EGL10;", // Requires EGLContext. "Ljavax/microedition/khronos/egl/EGLContext;", // Requires com.google.android.gles_jni.EGLImpl. - "Ljavax/net/ssl/HttpsURLConnection;", // Calls SSLSocketFactory.getDefault -> java.security.Security.getProperty. + "Ljavax/net/ssl/HttpsURLConnection$DefaultHolder;", // Calls VMClassLoader.getBootClassPathSize(). "Ljavax/xml/datatype/DatatypeConstants;", // Calls OsConstants.initConstants. "Ljavax/xml/datatype/FactoryFinder;", // Calls OsConstants.initConstants. "Ljavax/xml/namespace/QName;", // Calls OsConstants.initConstants. -- cgit v1.2.3-59-g8ed1b From d792cc1569c3505d68352c11a72447419ee6eaaf Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Thu, 25 Jul 2013 10:50:39 -0700 Subject: Add art support for Zygote NoPreloadHolder which black lists early initialization. Change-Id: I9c68f6d7a1f230aba6382b1331d413d4cb92be12 --- compiler/driver/compiler_driver.cc | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 486c536956..c2a1312354 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1981,7 +1981,6 @@ static const char* class_initializer_black_list[] = { "Ljavax/security/cert/X509Certificate$1;", // Calls VMClassLoader.getBootClassPathSize. "Ljavax/microedition/khronos/egl/EGL10;", // Requires EGLContext. "Ljavax/microedition/khronos/egl/EGLContext;", // Requires com.google.android.gles_jni.EGLImpl. - "Ljavax/net/ssl/HttpsURLConnection$DefaultHolder;", // Calls VMClassLoader.getBootClassPathSize(). "Ljavax/xml/datatype/DatatypeConstants;", // Calls OsConstants.initConstants. "Ljavax/xml/datatype/FactoryFinder;", // Calls OsConstants.initConstants. "Ljavax/xml/namespace/QName;", // Calls OsConstants.initConstants. @@ -2071,11 +2070,14 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } if (!klass->IsInitialized()) { if (can_init_static_fields) { - bool is_black_listed = false; - for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) { - if (StringPiece(descriptor) == class_initializer_black_list[i]) { - is_black_listed = true; - break; + // NoPreloadHolder inner class implies this should not be initialized early. + bool is_black_listed = StringPiece(descriptor).ends_with("$NoPreloadHolder;"); + if (!is_black_listed) { + for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) { + if (StringPiece(descriptor) == class_initializer_black_list[i]) { + is_black_listed = true; + break; + } } } if (!is_black_listed) { -- cgit v1.2.3-59-g8ed1b From 6f28d91aab952e3244fbb4e707fa38f85538f374 Mon Sep 17 00:00:00 2001 From: Anwar Ghuloum Date: Wed, 24 Jul 2013 15:02:53 -0700 Subject: Add systrace support to NewTimingLogger, migrate compiler timing logging to NewTimingLogger Rpleaced old TimingLogger by NewTimingLogger, renamed NewTimingLogger to TimingLogger, added systrace support to TimingLogger. Tests passing, phone booting, systrace working. Change-Id: I2aeffb8bcb7f0fd979d8a2a3a8bcfbaa02413679 --- compiler/driver/compiler_driver.cc | 41 ++++++++--------- compiler/driver/compiler_driver.h | 26 +++++------ compiler/driver/compiler_driver_test.cc | 2 +- dex2oat/dex2oat.cc | 22 ++++----- runtime/base/timing_logger.cc | 77 +++++++------------------------- runtime/base/timing_logger.h | 35 +++------------ runtime/common_test.h | 2 +- runtime/gc/collector/garbage_collector.h | 4 +- runtime/gc/collector/mark_sweep.cc | 2 +- runtime/gc/heap.cc | 6 +-- runtime/gc/heap.h | 4 +- runtime/image_test.cc | 2 +- runtime/oat_test.cc | 4 +- 13 files changed, 83 insertions(+), 144 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index c2a1312354..2aa2a98efb 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -495,7 +495,7 @@ const std::vector* CompilerDriver::CreateInterpreterToQuickEntry() cons void CompilerDriver::CompileAll(jobject class_loader, const std::vector& dex_files, - TimingLogger& timings) { + base::TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); UniquePtr thread_pool(new ThreadPool(thread_count_)); PreCompile(class_loader, dex_files, *thread_pool.get(), timings); @@ -528,7 +528,7 @@ static bool IsDexToDexCompilationAllowed(mirror::ClassLoader* class_loader, return klass->IsVerified(); } -void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) { +void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, base::TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); Thread* self = Thread::Current(); jobject jclass_loader; @@ -572,7 +572,7 @@ void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogg } void CompilerDriver::Resolve(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -581,7 +581,7 @@ void CompilerDriver::Resolve(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { LoadImageClasses(timings); Resolve(class_loader, dex_files, thread_pool, timings); @@ -666,12 +666,13 @@ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg) } // Make a list of descriptors for classes to include in the image -void CompilerDriver::LoadImageClasses(TimingLogger& timings) +void CompilerDriver::LoadImageClasses(base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_) { if (image_classes_.get() == NULL) { return; } + timings.NewSplit("LoadImageClasses"); // Make a first class to load all classes explicitly listed in the file Thread* self = Thread::Current(); ScopedObjectAccess soa(self); @@ -726,7 +727,6 @@ void CompilerDriver::LoadImageClasses(TimingLogger& timings) class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get()); CHECK_NE(image_classes_->size(), 0U); - timings.AddSplit("LoadImageClasses"); } static void MaybeAddToImageClasses(mirror::Class* klass, CompilerDriver::DescriptorSet* image_classes) @@ -758,11 +758,13 @@ void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get()); } -void CompilerDriver::UpdateImageClasses(TimingLogger& timings) { +void CompilerDriver::UpdateImageClasses(base::TimingLogger& timings) { if (image_classes_.get() == NULL) { return; } + timings.NewSplit("UpdateImageClasses"); + // Update image_classes_ with classes for objects created by methods. Thread* self = Thread::Current(); const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter"); @@ -772,7 +774,6 @@ void CompilerDriver::UpdateImageClasses(TimingLogger& timings) { heap->FlushAllocStack(); heap->GetLiveBitmap()->Walk(FindClinitImageClassesCallback, this); self->EndAssertNoThreadSuspension(old_cause); - timings.AddSplit("UpdateImageClasses"); } void CompilerDriver::RecordClassStatus(ClassReference ref, CompiledClass* compiled_class) { @@ -1551,22 +1552,22 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i } void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); // TODO: we could resolve strings here, although the string table is largely filled with class // and method names. + timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " Types").c_str())); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_); - timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types"); + timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " MethodsAndFields").c_str())); context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_); - timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields"); } void CompilerDriver::Verify(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -1620,11 +1621,11 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_ } void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { + timings.NewSplit(strdup(("Verify " + dex_file.GetLocation()).c_str())); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_); - timings.AddSplit("Verify " + dex_file.GetLocation()); } static const char* class_initializer_black_list[] = { @@ -2116,7 +2117,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { + timings.NewSplit(strdup(("InitializeNoClinit " + dex_file.GetLocation()).c_str())); #ifndef NDEBUG for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) { const char* descriptor = class_initializer_black_list[i]; @@ -2126,12 +2128,11 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count_); - timings.AddSplit("InitializeNoClinit " + dex_file.GetLocation()); } void CompilerDriver::InitializeClasses(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -2140,7 +2141,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader, } void CompilerDriver::Compile(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -2220,10 +2221,10 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz } void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { + timings.NewSplit(strdup(("Compile " + dex_file.GetLocation()).c_str())); ParallelCompilationManager context(NULL, class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_); - timings.AddSplit("Compile " + dex_file.GetLocation()); } void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index f3f72dd3c7..a7a47ed876 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -78,11 +78,11 @@ class CompilerDriver { ~CompilerDriver(); void CompileAll(jobject class_loader, const std::vector& dex_files, - TimingLogger& timings) + base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); // Compile a single Method - void CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) + void CompileOne(const mirror::AbstractMethod* method, base::TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); InstructionSet GetInstructionSet() const { @@ -284,42 +284,42 @@ class CompilerDriver { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PreCompile(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); - void LoadImageClasses(TimingLogger& timings); + void LoadImageClasses(base::TimingLogger& timings); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. void Resolve(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void Verify(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings); + ThreadPool& thread_pool, base::TimingLogger& timings); void VerifyDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_); - void UpdateImageClasses(TimingLogger& timings); + void UpdateImageClasses(base::TimingLogger& timings); static void FindClinitImageClassesCallback(mirror::Object* object, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Compile(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings); + ThreadPool& thread_pool, base::TimingLogger& timings); void CompileDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx, diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index 78cacaf08e..b06a3180fb 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -36,7 +36,7 @@ namespace art { class CompilerDriverTest : public CommonTest { protected: void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { - TimingLogger timings("CompilerDriverTest::CompileAll", false); + base::TimingLogger timings("CompilerDriverTest::CompileAll", false, false); compiler_driver_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader), timings); diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 1a6a98a2af..fb6f775675 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -230,7 +230,7 @@ class Dex2Oat { bool image, UniquePtr& image_classes, bool dump_stats, - TimingLogger& timings) + base::TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // SirtRef and ClassLoader creation needs to come after Runtime::Create jobject class_loader = NULL; @@ -263,11 +263,11 @@ class Dex2Oat { Thread::Current()->TransitionFromRunnableToSuspended(kNative); - timings.AddSplit("dex2oat Setup"); driver->CompileAll(class_loader, dex_files, timings); Thread::Current()->TransitionFromSuspendedToRunnable(); + timings.NewSplit("dex2oat OatWriter"); std::string image_file_location; uint32_t image_file_location_oat_checksum = 0; uint32_t image_file_location_oat_data_begin = 0; @@ -287,13 +287,11 @@ class Dex2Oat { image_file_location_oat_data_begin, image_file_location, driver.get()); - timings.AddSplit("dex2oat OatWriter"); if (!driver->WriteElf(android_root, is_host, dex_files, oat_writer, oat_file)) { LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath(); return NULL; } - timings.AddSplit("dex2oat ElfWriter"); return driver.release(); } @@ -563,7 +561,7 @@ const unsigned int WatchDog::kWatchDogWarningSeconds; const unsigned int WatchDog::kWatchDogTimeoutSeconds; static int dex2oat(int argc, char** argv) { - TimingLogger timings("compiler", false); + base::TimingLogger timings("compiler", false, false); InitLogging(argv); @@ -928,6 +926,7 @@ static int dex2oat(int argc, char** argv) { } } + timings.StartSplit("dex2oat Setup"); UniquePtr compiler(dex2oat->CreateOatFile(boot_image_option, host_prefix.get(), android_root, @@ -998,13 +997,13 @@ static int dex2oat(int argc, char** argv) { // Elf32_Phdr.p_vaddr values by the desired base address. // if (image) { + timings.NewSplit("dex2oat ImageWriter"); Thread::Current()->TransitionFromRunnableToSuspended(kNative); bool image_creation_success = dex2oat->CreateImageFile(image_filename, image_base, oat_unstripped, oat_location, *compiler.get()); - timings.AddSplit("dex2oat ImageWriter"); Thread::Current()->TransitionFromSuspendedToRunnable(); if (!image_creation_success) { return EXIT_FAILURE; @@ -1014,7 +1013,7 @@ static int dex2oat(int argc, char** argv) { if (is_host) { if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) { - LOG(INFO) << Dumpable(timings); + LOG(INFO) << Dumpable(timings); } return EXIT_SUCCESS; } @@ -1022,6 +1021,7 @@ static int dex2oat(int argc, char** argv) { // If we don't want to strip in place, copy from unstripped location to stripped location. // We need to strip after image creation because FixupElf needs to use .strtab. if (oat_unstripped != oat_stripped) { + timings.NewSplit("dex2oat OatFile copy"); oat_file.reset(); UniquePtr in(OS::OpenFile(oat_unstripped.c_str(), false)); UniquePtr out(OS::OpenFile(oat_stripped.c_str(), true)); @@ -1036,23 +1036,25 @@ static int dex2oat(int argc, char** argv) { CHECK(write_ok); } oat_file.reset(out.release()); - timings.AddSplit("dex2oat OatFile copy"); LOG(INFO) << "Oat file copied successfully (stripped): " << oat_stripped; } #if ART_USE_PORTABLE_COMPILER // We currently only generate symbols on Portable + timings.NewSplit("dex2oat ElfStripper"); // Strip unneeded sections for target off_t seek_actual = lseek(oat_file->Fd(), 0, SEEK_SET); CHECK_EQ(0, seek_actual); ElfStripper::Strip(oat_file.get()); - timings.AddSplit("dex2oat ElfStripper"); + // We wrote the oat file successfully, and want to keep it. LOG(INFO) << "Oat file written successfully (stripped): " << oat_location; #endif // ART_USE_PORTABLE_COMPILER + timings.EndSplit(); + if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) { - LOG(INFO) << Dumpable(timings); + LOG(INFO) << Dumpable(timings); } return EXIT_SUCCESS; } diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index bf6fd17a49..dfb0220d46 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -14,6 +14,11 @@ * limitations under the License. */ + +#define ATRACE_TAG ATRACE_TAG_DALVIK +#include +#include + #include "timing_logger.h" #include "base/logging.h" @@ -26,49 +31,6 @@ namespace art { -void TimingLogger::Reset() { - times_.clear(); - labels_.clear(); - AddSplit(""); -} - -TimingLogger::TimingLogger(const std::string &name, bool precise) - : name_(name), - precise_(precise) { - AddSplit(""); -} - -void TimingLogger::AddSplit(const std::string &label) { - times_.push_back(NanoTime()); - labels_.push_back(label); -} - -uint64_t TimingLogger::GetTotalNs() const { - return times_.back() - times_.front(); -} - -void TimingLogger::Dump(std::ostream &os) const { - uint64_t largest_time = 0; - os << name_ << ": begin\n"; - for (size_t i = 1; i < times_.size(); ++i) { - uint64_t delta_time = times_[i] - times_[i - 1]; - largest_time = std::max(largest_time, delta_time); - } - // Compute which type of unit we will use for printing the timings. - TimeUnit tu = GetAppropriateTimeUnit(largest_time); - uint64_t divisor = GetNsToTimeUnitDivisor(tu); - for (size_t i = 1; i < times_.size(); ++i) { - uint64_t delta_time = times_[i] - times_[i - 1]; - if (!precise_ && divisor >= 1000) { - // Make the fraction 0. - delta_time -= delta_time % (divisor / 1000); - } - os << name_ << ": " << std::setw(8) << FormatDuration(delta_time, tu) << " " - << labels_[i] << "\n"; - } - os << name_ << ": end, " << NsToMs(GetTotalNs()) << " ms\n"; -} - CumulativeLogger::CumulativeLogger(const std::string& name) : name_(name), lock_name_("CumulativeLoggerLock" + name), @@ -112,17 +74,8 @@ uint64_t CumulativeLogger::GetTotalTime() const { return total; } -void CumulativeLogger::AddLogger(const TimingLogger &logger) { - MutexLock mu(Thread::Current(), lock_); - DCHECK_EQ(logger.times_.size(), logger.labels_.size()); - for (size_t i = 1; i < logger.times_.size(); ++i) { - const uint64_t delta_time = logger.times_[i] - logger.times_[i - 1]; - const std::string &label = logger.labels_[i]; - AddPair(label, delta_time); - } -} -void CumulativeLogger::AddNewLogger(const base::NewTimingLogger &logger) { +void CumulativeLogger::AddLogger(const base::TimingLogger &logger) { MutexLock mu(Thread::Current(), lock_); const std::vector >& splits = logger.GetSplits(); typedef std::vector >::const_iterator It; @@ -183,51 +136,55 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) { namespace base { -NewTimingLogger::NewTimingLogger(const char* name, bool precise, bool verbose) +TimingLogger::TimingLogger(const char* name, bool precise, bool verbose) : name_(name), precise_(precise), verbose_(verbose), current_split_(NULL), current_split_start_ns_(0) { } -void NewTimingLogger::Reset() { +void TimingLogger::Reset() { current_split_ = NULL; current_split_start_ns_ = 0; splits_.clear(); } -void NewTimingLogger::StartSplit(const char* new_split_label) { +void TimingLogger::StartSplit(const char* new_split_label) { DCHECK(current_split_ == NULL); if (verbose_) { LOG(INFO) << "Begin: " << new_split_label; } current_split_ = new_split_label; + ATRACE_BEGIN(current_split_); current_split_start_ns_ = NanoTime(); } // Ends the current split and starts the one given by the label. -void NewTimingLogger::NewSplit(const char* new_split_label) { +void TimingLogger::NewSplit(const char* new_split_label) { DCHECK(current_split_ != NULL); uint64_t current_time = NanoTime(); uint64_t split_time = current_time - current_split_start_ns_; + ATRACE_END(); splits_.push_back(std::pair(split_time, current_split_)); if (verbose_) { LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time) << "\n" << "Begin: " << new_split_label; } current_split_ = new_split_label; + ATRACE_BEGIN(current_split_); current_split_start_ns_ = current_time; } -void NewTimingLogger::EndSplit() { +void TimingLogger::EndSplit() { DCHECK(current_split_ != NULL); uint64_t current_time = NanoTime(); uint64_t split_time = current_time - current_split_start_ns_; + ATRACE_END(); if (verbose_) { LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time); } splits_.push_back(std::pair(split_time, current_split_)); } -uint64_t NewTimingLogger::GetTotalNs() const { +uint64_t TimingLogger::GetTotalNs() const { uint64_t total_ns = 0; typedef std::vector >::const_iterator It; for (It it = splits_.begin(), end = splits_.end(); it != end; ++it) { @@ -237,7 +194,7 @@ uint64_t NewTimingLogger::GetTotalNs() const { return total_ns; } -void NewTimingLogger::Dump(std::ostream &os) const { +void TimingLogger::Dump(std::ostream &os) const { uint64_t longest_split = 0; uint64_t total_ns = 0; typedef std::vector >::const_iterator It; diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index 0f00a046e5..0998837517 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -26,27 +26,8 @@ namespace art { -class CumulativeLogger; - -class TimingLogger { - public: - explicit TimingLogger(const std::string& name, bool precise); - void AddSplit(const std::string& label); - void Dump(std::ostream& os) const; - void Reset(); - uint64_t GetTotalNs() const; - - protected: - const std::string name_; - const bool precise_; - std::vector times_; - std::vector labels_; - - friend class CumulativeLogger; -}; - namespace base { - class NewTimingLogger; + class TimingLogger; } // namespace base class CumulativeLogger { @@ -62,8 +43,7 @@ class CumulativeLogger { // Allow the name to be modified, particularly when the cumulative logger is a field within a // parent class that is unable to determine the "name" of a sub-class. void SetName(const std::string& name); - void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_); - void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_); + void AddLogger(const base::TimingLogger& logger) LOCKS_EXCLUDED(lock_); private: void AddPair(const std::string &label, uint64_t delta_time) @@ -84,16 +64,15 @@ class CumulativeLogger { namespace base { // A replacement to timing logger that know when a split starts for the purposes of logging. -// TODO: replace uses of TimingLogger with base::NewTimingLogger. -class NewTimingLogger { +class TimingLogger { public: - explicit NewTimingLogger(const char* name, bool precise, bool verbose); + explicit TimingLogger(const char* name, bool precise, bool verbose); // Clears current splits and labels. void Reset(); // Starts a split, a split shouldn't be in progress. - void StartSplit(const char* new_split_label); + void StartSplit(const char* new_split_label); // Ends the current split and starts the one given by the label. void NewSplit(const char* new_split_label); @@ -111,7 +90,7 @@ class NewTimingLogger { protected: // The name of the timing logger. - const std::string name_; + const char* name_; // Do we want to print the exactly recorded split (true) or round down to the time unit being // used (false). @@ -130,7 +109,7 @@ class NewTimingLogger { std::vector > splits_; private: - DISALLOW_COPY_AND_ASSIGN(NewTimingLogger); + DISALLOW_COPY_AND_ASSIGN(TimingLogger); }; } // namespace base diff --git a/runtime/common_test.h b/runtime/common_test.h index 09ad7fd7b7..4fe9be6d8f 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -473,7 +473,7 @@ class CommonTest : public testing::Test { void CompileMethod(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); - TimingLogger timings("CommonTest::CompileMethod", false); + base::TimingLogger timings("CommonTest::CompileMethod", false, false); compiler_driver_->CompileOne(method, timings); MakeExecutable(method); } diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 1684664eff..0f566c954b 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -64,7 +64,7 @@ class GarbageCollector { void RegisterPause(uint64_t nano_length); - base::NewTimingLogger& GetTimings() { + base::TimingLogger& GetTimings() { return timings_; } @@ -101,7 +101,7 @@ class GarbageCollector { const bool verbose_; uint64_t duration_ns_; - base::NewTimingLogger timings_; + base::TimingLogger timings_; // Cumulative statistics. uint64_t total_time_ns_; diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index c8e60322d6..0cbd6fb267 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -1509,7 +1509,7 @@ void MarkSweep::FinishPhase() { // Update the cumulative loggers. cumulative_timings_.Start(); - cumulative_timings_.AddNewLogger(timings_); + cumulative_timings_.AddLogger(timings_); cumulative_timings_.End(); // Clear all of the spaces' mark bitmaps. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 1c1818873a..b0ba8b6874 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1131,7 +1131,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus << PrettySize(total_memory) << ", " << "paused " << pause_string.str() << " total " << PrettyDuration((duration / 1000) * 1000); if (VLOG_IS_ON(heap)) { - LOG(INFO) << Dumpable(collector->GetTimings()); + LOG(INFO) << Dumpable(collector->GetTimings()); } } @@ -1149,7 +1149,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus return gc_type; } -void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings, +void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, collector::GcType gc_type) { if (gc_type == collector::kGcTypeSticky) { // Don't need to do anything for mod union table in this case since we are only scanning dirty @@ -1441,7 +1441,7 @@ void Heap::SwapStacks() { } } -void Heap::ProcessCards(base::NewTimingLogger& timings) { +void Heap::ProcessCards(base::TimingLogger& timings) { // Clear cards and keep track of cards cleared in the mod-union table. typedef std::vector::iterator It; for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index b7b2e84942..b71c2bc421 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -379,7 +379,7 @@ class Heap { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Update and mark mod union table based on gc type. - void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings, + void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, collector::GcType gc_type) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); @@ -472,7 +472,7 @@ class Heap { void SwapStacks(); // Clear cards and update the mod union table. - void ProcessCards(base::NewTimingLogger& timings); + void ProcessCards(base::TimingLogger& timings); // All-known continuous spaces, where objects lie within fixed bounds. std::vector continuous_spaces_; diff --git a/runtime/image_test.cc b/runtime/image_test.cc index 75eead4d8f..92ee1f83c6 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -44,7 +44,7 @@ TEST_F(ImageTest, WriteRead) { { jobject class_loader = NULL; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - TimingLogger timings("ImageTest::WriteRead", false); + base::TimingLogger timings("ImageTest::WriteRead", false, false); compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc index 3f2e43e985..217e2d8743 100644 --- a/runtime/oat_test.cc +++ b/runtime/oat_test.cc @@ -77,7 +77,7 @@ TEST_F(OatTest, WriteRead) { compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, true)); jobject class_loader = NULL; if (compile) { - TimingLogger timings("OatTest::WriteRead", false); + base::TimingLogger timings("OatTest::WriteRead", false, false); compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); } @@ -96,7 +96,7 @@ TEST_F(OatTest, WriteRead) { ASSERT_TRUE(success); if (compile) { // OatWriter strips the code, regenerate to compare - TimingLogger timings("CommonTest::WriteRead", false); + base::TimingLogger timings("CommonTest::WriteRead", false, false); compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); } UniquePtr oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false)); -- cgit v1.2.3-59-g8ed1b From 4d4adb1dae07bb7421e863732ab789413a3b43f0 Mon Sep 17 00:00:00 2001 From: Sebastien Hertz Date: Wed, 24 Jul 2013 16:14:19 +0200 Subject: Prevent verifier from creating unused compilation data. The verifier used to create data which may be unused like GC map. This is the case for non-compiled method (which are interpreted). This CL aims to optimize this. Here are the changes: - Move compilation selection to MethodVerifier::IsCandidateForCompilation. - Compiler and verifier use this method to know if a method must be compiled. - Only create compilation data while compiling using Runtime::IsCompiler. - Do not create internal structures concerning GC map, ... in Runtime::Init and Runtime::Shutdown when we are not compiling. - Checks we are compiling when accessing these structures. - Add missing destruction of MethodVerifier::safecast_map_lock_ and MethodVerifier::safecast_map_ in Runtime::Shutdown. - Call Runtime::Shutdown just before Runtime instance is destroyed to avoid a crash. - Add missing "GUARDED_BY" macro for MethodVerifier::rejected_classes_ field. - Add "has_check_casts" to avoid the safecast pass if there is no check-cast instruction. - Add "has_virtual_or_interface_invokes" to avoid the devirtualization pass if there is no invoke-virtual/range nor invoke-interface/range instructions. Bug: 9987437 Change-Id: I418ee99f63e4203409cf5b7d2c2295b22fcf24c1 --- compiler/driver/compiler_driver.cc | 14 +-- runtime/runtime.cc | 2 +- runtime/verifier/method_verifier.cc | 192 +++++++++++++++++++++++------------- runtime/verifier/method_verifier.h | 12 ++- 4 files changed, 139 insertions(+), 81 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 2aa2a98efb..8ce0aa9c94 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -2240,18 +2240,8 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t CHECK(compiled_method != NULL); } else if ((access_flags & kAccAbstract) != 0) { } else { - // In small mode we only compile image classes. - bool dont_compile = (Runtime::Current()->IsSmallMode() && - ((image_classes_.get() == NULL) || (image_classes_->size() == 0))); - - // Don't compile class initializers, ever. - if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { - dont_compile = true; - } else if (code_item->insns_size_in_code_units_ < Runtime::Current()->GetSmallModeMethodDexSizeLimit()) { - // Do compile small methods. - dont_compile = false; - } - if (!dont_compile) { + bool compile = verifier::MethodVerifier::IsCandidateForCompilation(code_item, access_flags); + if (compile) { CompilerFn compiler = compiler_; #ifdef ART_SEA_IR_MODE bool use_sea = Runtime::Current()->IsSeaIRMode(); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index cf6e537df0..48ee127d21 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -134,10 +134,10 @@ Runtime::~Runtime() { delete java_vm_; Thread::Shutdown(); QuasiAtomic::Shutdown(); + verifier::MethodVerifier::Shutdown(); // TODO: acquire a static mutex on Runtime to avoid racing. CHECK(instance_ == NULL || instance_ == this); instance_ = NULL; - verifier::MethodVerifier::Shutdown(); } struct AbortState { diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index e182af7b82..3549945a4c 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -282,7 +282,9 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca new_instance_count_(0), monitor_enter_count_(0), can_load_classes_(can_load_classes), - allow_soft_failures_(allow_soft_failures) { + allow_soft_failures_(allow_soft_failures), + has_check_casts_(false), + has_virtual_or_interface_invokes_(false) { } void MethodVerifier::FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc, @@ -470,6 +472,13 @@ bool MethodVerifier::ComputeWidthsAndCountOps() { new_instance_count++; } else if (opcode == Instruction::MONITOR_ENTER) { monitor_enter_count++; + } else if (opcode == Instruction::CHECK_CAST) { + has_check_casts_ = true; + } else if ((inst->Opcode() == Instruction::INVOKE_VIRTUAL) || + (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) || + (inst->Opcode() == Instruction::INVOKE_INTERFACE) || + (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE)) { + has_virtual_or_interface_invokes_ = true; } size_t inst_size = inst->SizeInCodeUnits(); insn_flags_[dex_pc].SetLengthInCodeUnits(inst_size); @@ -1002,27 +1011,37 @@ bool MethodVerifier::VerifyCodeFlow() { return false; } - /* Generate a register map and add it to the method. */ - UniquePtr > map(GenerateGcMap()); - if (map.get() == NULL) { - DCHECK_NE(failures_.size(), 0U); - return false; // Not a real failure, but a failure to encode - } - if (kIsDebugBuild) { - VerifyGcMap(*map); - } - MethodReference ref(dex_file_, dex_method_idx_); - const std::vector* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get())); - verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map); + // Compute information for compiler. + if (Runtime::Current()->IsCompiler()) { + MethodReference ref(dex_file_, dex_method_idx_); + bool compile = IsCandidateForCompilation(code_item_, method_access_flags_); + if (compile) { + /* Generate a register map and add it to the method. */ + UniquePtr > map(GenerateGcMap()); + if (map.get() == NULL) { + DCHECK_NE(failures_.size(), 0U); + return false; // Not a real failure, but a failure to encode + } + if (kIsDebugBuild) { + VerifyGcMap(*map); + } + const std::vector* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get())); + verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map); + } - MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet(); - if (method_to_safe_casts != NULL) { - SetSafeCastMap(ref, method_to_safe_casts); - } + if (has_check_casts_) { + MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet(); + if(method_to_safe_casts != NULL ) { + SetSafeCastMap(ref, method_to_safe_casts); + } + } - MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap(); - if (pc_to_concrete_method != NULL) { - SetDevirtMap(ref, pc_to_concrete_method); + if (has_virtual_or_interface_invokes_) { + MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap(); + if(pc_to_concrete_method != NULL ) { + SetDevirtMap(ref, pc_to_concrete_method); + } + } } return true; } @@ -3948,6 +3967,7 @@ void MethodVerifier::VerifyGcMap(const std::vector& data) { } void MethodVerifier::SetDexGcMap(MethodReference ref, const std::vector& gc_map) { + DCHECK(Runtime::Current()->IsCompiler()); { WriterMutexLock mu(Thread::Current(), *dex_gc_maps_lock_); DexGcMapTable::iterator it = dex_gc_maps_->find(ref); @@ -3962,6 +3982,7 @@ void MethodVerifier::SetDexGcMap(MethodReference ref, const std::vector void MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) { + DCHECK(Runtime::Current()->IsCompiler()); MutexLock mu(Thread::Current(), *safecast_map_lock_); SafeCastMap::iterator it = safecast_map_->find(ref); if (it != safecast_map_->end()) { @@ -3970,10 +3991,11 @@ void MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSe } safecast_map_->Put(ref, cast_set); - CHECK(safecast_map_->find(ref) != safecast_map_->end()); + DCHECK(safecast_map_->find(ref) != safecast_map_->end()); } bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) { + DCHECK(Runtime::Current()->IsCompiler()); MutexLock mu(Thread::Current(), *safecast_map_lock_); SafeCastMap::const_iterator it = safecast_map_->find(ref); if (it == safecast_map_->end()) { @@ -3986,6 +4008,7 @@ bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) { } const std::vector* MethodVerifier::GetDexGcMap(MethodReference ref) { + DCHECK(Runtime::Current()->IsCompiler()); ReaderMutexLock mu(Thread::Current(), *dex_gc_maps_lock_); DexGcMapTable::const_iterator it = dex_gc_maps_->find(ref); if (it == dex_gc_maps_->end()) { @@ -3998,6 +4021,7 @@ const std::vector* MethodVerifier::GetDexGcMap(MethodReference ref) { void MethodVerifier::SetDevirtMap(MethodReference ref, const PcToConcreteMethodMap* devirt_map) { + DCHECK(Runtime::Current()->IsCompiler()); WriterMutexLock mu(Thread::Current(), *devirt_maps_lock_); DevirtualizationMapTable::iterator it = devirt_maps_->find(ref); if (it != devirt_maps_->end()) { @@ -4006,11 +4030,12 @@ void MethodVerifier::SetDevirtMap(MethodReference ref, } devirt_maps_->Put(ref, devirt_map); - CHECK(devirt_maps_->find(ref) != devirt_maps_->end()); + DCHECK(devirt_maps_->find(ref) != devirt_maps_->end()); } const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref, uint32_t dex_pc) { + DCHECK(Runtime::Current()->IsCompiler()); ReaderMutexLock mu(Thread::Current(), *devirt_maps_lock_); DevirtualizationMapTable::const_iterator it = devirt_maps_->find(ref); if (it == devirt_maps_->end()) { @@ -4070,6 +4095,24 @@ std::vector MethodVerifier::DescribeVRegs(uint32_t dex_pc) { return result; } +bool MethodVerifier::IsCandidateForCompilation(const DexFile::CodeItem* code_item, + const uint32_t access_flags) { + // Don't compile class initializers, ever. + if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { + return false; + } + + const Runtime* runtime = Runtime::Current(); + if (runtime->IsSmallMode() && runtime->UseCompileTimeClassPath()) { + // In Small mode, we only compile small methods. + const uint32_t code_size = code_item->insns_size_in_code_units_; + return (code_size < runtime->GetSmallModeMethodDexSizeLimit()); + } else { + // In normal mode, we compile everything. + return true; + } +} + ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL; MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL; @@ -4083,65 +4126,79 @@ Mutex* MethodVerifier::rejected_classes_lock_ = NULL; MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL; void MethodVerifier::Init() { - dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock"); - Thread* self = Thread::Current(); - { - WriterMutexLock mu(self, *dex_gc_maps_lock_); - dex_gc_maps_ = new MethodVerifier::DexGcMapTable; - } + if (Runtime::Current()->IsCompiler()) { + dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock"); + Thread* self = Thread::Current(); + { + WriterMutexLock mu(self, *dex_gc_maps_lock_); + dex_gc_maps_ = new MethodVerifier::DexGcMapTable; + } - safecast_map_lock_ = new Mutex("verifier Cast Elision lock"); - { - MutexLock mu(self, *safecast_map_lock_); - safecast_map_ = new MethodVerifier::SafeCastMap(); - } + safecast_map_lock_ = new Mutex("verifier Cast Elision lock"); + { + MutexLock mu(self, *safecast_map_lock_); + safecast_map_ = new MethodVerifier::SafeCastMap(); + } - devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock"); + devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock"); - { - WriterMutexLock mu(self, *devirt_maps_lock_); - devirt_maps_ = new MethodVerifier::DevirtualizationMapTable(); - } + { + WriterMutexLock mu(self, *devirt_maps_lock_); + devirt_maps_ = new MethodVerifier::DevirtualizationMapTable(); + } - rejected_classes_lock_ = new Mutex("verifier rejected classes lock"); - { - MutexLock mu(self, *rejected_classes_lock_); - rejected_classes_ = new MethodVerifier::RejectedClassesTable; + rejected_classes_lock_ = new Mutex("verifier rejected classes lock"); + { + MutexLock mu(self, *rejected_classes_lock_); + rejected_classes_ = new MethodVerifier::RejectedClassesTable; + } } art::verifier::RegTypeCache::Init(); } void MethodVerifier::Shutdown() { - Thread* self = Thread::Current(); - { - WriterMutexLock mu(self, *dex_gc_maps_lock_); - STLDeleteValues(dex_gc_maps_); - delete dex_gc_maps_; - dex_gc_maps_ = NULL; - } - delete dex_gc_maps_lock_; - dex_gc_maps_lock_ = NULL; - - { - WriterMutexLock mu(self, *devirt_maps_lock_); - STLDeleteValues(devirt_maps_); - delete devirt_maps_; - devirt_maps_ = NULL; - } - delete devirt_maps_lock_; - devirt_maps_lock_ = NULL; - - { - MutexLock mu(self, *rejected_classes_lock_); - delete rejected_classes_; - rejected_classes_ = NULL; + if (Runtime::Current()->IsCompiler()) { + Thread* self = Thread::Current(); + { + WriterMutexLock mu(self, *dex_gc_maps_lock_); + STLDeleteValues(dex_gc_maps_); + delete dex_gc_maps_; + dex_gc_maps_ = NULL; + } + delete dex_gc_maps_lock_; + dex_gc_maps_lock_ = NULL; + + { + MutexLock mu(self, *safecast_map_lock_); + STLDeleteValues(safecast_map_); + delete safecast_map_; + safecast_map_ = NULL; + } + delete safecast_map_lock_; + safecast_map_lock_ = NULL; + + { + WriterMutexLock mu(self, *devirt_maps_lock_); + STLDeleteValues(devirt_maps_); + delete devirt_maps_; + devirt_maps_ = NULL; + } + delete devirt_maps_lock_; + devirt_maps_lock_ = NULL; + + { + MutexLock mu(self, *rejected_classes_lock_); + delete rejected_classes_; + rejected_classes_ = NULL; + } + delete rejected_classes_lock_; + rejected_classes_lock_ = NULL; } - delete rejected_classes_lock_; - rejected_classes_lock_ = NULL; verifier::RegTypeCache::ShutDown(); } void MethodVerifier::AddRejectedClass(ClassReference ref) { + DCHECK(Runtime::Current()->IsCompiler()); { MutexLock mu(Thread::Current(), *rejected_classes_lock_); rejected_classes_->insert(ref); @@ -4150,6 +4207,7 @@ void MethodVerifier::AddRejectedClass(ClassReference ref) { } bool MethodVerifier::IsClassRejected(ClassReference ref) { + DCHECK(Runtime::Current()->IsCompiler()); MutexLock mu(Thread::Current(), *rejected_classes_lock_); return (rejected_classes_->find(ref) != rejected_classes_->end()); } diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 57d630de5a..fdd90cc767 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -237,6 +237,9 @@ class MethodVerifier { // Describe VRegs at the given dex pc. std::vector DescribeVRegs(uint32_t dex_pc); + static bool IsCandidateForCompilation(const DexFile::CodeItem* code_item, + const uint32_t access_flags); + private: // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -654,7 +657,7 @@ class MethodVerifier { LOCKS_EXCLUDED(devirt_maps_lock_); typedef std::set RejectedClassesTable; static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - static RejectedClassesTable* rejected_classes_; + static RejectedClassesTable* rejected_classes_ GUARDED_BY(rejected_classes_lock_); static void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); @@ -717,6 +720,13 @@ class MethodVerifier { // Converts soft failures to hard failures when false. Only false when the compiler isn't // running and the verifier is called from the class linker. const bool allow_soft_failures_; + + // Indicates if the method being verified contains at least one check-cast instruction. + bool has_check_casts_; + + // Indicates if the method being verified contains at least one invoke-virtual/range + // or invoke-interface/range. + bool has_virtual_or_interface_invokes_; }; std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs); -- cgit v1.2.3-59-g8ed1b From 7934ac288acfb2552bb0b06ec1f61e5820d924a4 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Fri, 26 Jul 2013 10:54:15 -0700 Subject: Fix cpplint whitespace/comments issues Change-Id: Iae286862c85fb8fd8901eae1204cd6d271d69496 --- build/Android.cpplint.mk | 2 +- compiler/dex/arena_allocator.h | 2 +- compiler/dex/compiler_enums.h | 20 +- compiler/dex/dataflow_iterator.h | 2 +- compiler/dex/dex_to_dex_compiler.cc | 4 +- compiler/dex/frontend.cc | 56 +- compiler/dex/frontend.h | 2 +- compiler/dex/local_value_numbering.h | 2 +- compiler/dex/mir_graph.cc | 12 +- compiler/dex/mir_graph.h | 6 +- compiler/dex/mir_optimization.cc | 2 +- compiler/dex/portable/mir_to_gbc.cc | 6 +- compiler/dex/quick/arm/arm_lir.h | 28 +- compiler/dex/quick/arm/call_arm.cc | 6 +- compiler/dex/quick/arm/fp_arm.cc | 2 +- compiler/dex/quick/arm/int_arm.cc | 16 +- compiler/dex/quick/arm/target_arm.cc | 2 +- compiler/dex/quick/arm/utility_arm.cc | 6 +- compiler/dex/quick/codegen_util.cc | 2 +- compiler/dex/quick/gen_common.cc | 10 +- compiler/dex/quick/gen_invoke.cc | 14 +- compiler/dex/quick/mips/call_mips.cc | 2 +- compiler/dex/quick/mips/fp_mips.cc | 4 +- compiler/dex/quick/mips/int_mips.cc | 6 +- compiler/dex/quick/mips/mips_lir.h | 58 +- compiler/dex/quick/mips/target_mips.cc | 4 +- compiler/dex/quick/mips/utility_mips.cc | 2 +- compiler/dex/quick/mir_to_lir.h | 4 +- compiler/dex/quick/ralloc_util.cc | 10 +- compiler/dex/quick/x86/assemble_x86.cc | 4 +- compiler/dex/quick/x86/call_x86.cc | 2 +- compiler/dex/quick/x86/fp_x86.cc | 2 +- compiler/dex/quick/x86/target_x86.cc | 4 +- compiler/dex/quick/x86/utility_x86.cc | 10 +- compiler/dex/quick/x86/x86_lir.h | 36 +- compiler/dex/ssa_transformation.cc | 8 +- compiler/dex/vreg_analysis.cc | 2 +- compiler/driver/compiler_driver.cc | 808 ++++++++++----------- compiler/driver/dex_compilation_unit.cc | 2 +- compiler/driver/dex_compilation_unit.h | 2 +- compiler/elf_fixup.cc | 32 +- compiler/elf_writer_mclinker.h | 2 +- compiler/elf_writer_quick.cc | 4 +- compiler/image_writer.cc | 6 +- compiler/jni/portable/jni_compiler.cc | 14 +- compiler/llvm/backend_types.h | 4 +- compiler/llvm/compiler_llvm.cc | 8 +- compiler/llvm/compiler_llvm.h | 4 +- compiler/llvm/gbc_expander.cc | 46 +- compiler/llvm/generated/art_module.cc | 132 ++-- compiler/llvm/intrinsic_helper.cc | 4 +- compiler/llvm/intrinsic_helper.h | 4 +- compiler/llvm/ir_builder.cc | 4 +- compiler/llvm/ir_builder.h | 4 +- compiler/llvm/llvm_compilation_unit.cc | 12 +- compiler/llvm/llvm_compilation_unit.h | 6 +- compiler/llvm/md_builder.cc | 4 +- compiler/llvm/md_builder.h | 4 +- compiler/llvm/runtime_support_builder.cc | 4 +- compiler/llvm/runtime_support_builder.h | 4 +- compiler/llvm/runtime_support_builder_arm.cc | 6 +- compiler/llvm/runtime_support_builder_arm.h | 4 +- compiler/llvm/runtime_support_builder_thumb2.cc | 6 +- compiler/llvm/runtime_support_builder_thumb2.h | 4 +- compiler/llvm/runtime_support_builder_x86.cc | 4 +- compiler/llvm/runtime_support_builder_x86.h | 4 +- compiler/llvm/runtime_support_llvm_func.h | 6 +- compiler/sea_ir/code_gen.cc | 4 +- compiler/sea_ir/code_gen.h | 8 +- compiler/sea_ir/frontend.cc | 4 +- compiler/sea_ir/instruction_nodes.h | 6 +- compiler/sea_ir/instruction_tools.cc | 2 +- compiler/sea_ir/instruction_tools.h | 2 +- compiler/sea_ir/sea.cc | 28 +- compiler/sea_ir/sea.h | 4 +- compiler/sea_ir/sea_node.h | 2 +- compiler/sea_ir/visitor.h | 4 +- compiler/stubs/portable/stubs.cc | 14 +- compiler/stubs/quick/stubs.cc | 12 +- compiler/utils/scoped_hashtable.h | 2 +- compiler/utils/scoped_hashtable_test.cc | 2 +- dalvikvm/dalvikvm.cc | 2 +- dex2oat/dex2oat.cc | 6 +- jdwpspy/Net.cpp | 2 +- oatdump/oatdump.cc | 2 +- runtime/base/histogram_test.cc | 28 +- runtime/base/logging.cc | 2 +- runtime/base/logging.h | 4 +- runtime/base/macros.h | 2 +- runtime/base/mutex-inl.h | 2 +- runtime/base/mutex.cc | 10 +- runtime/base/unix_file/mapped_file_test.cc | 2 +- runtime/check_jni.cc | 56 +- runtime/class_linker.cc | 6 +- runtime/class_linker.h | 4 +- runtime/common_test.h | 8 +- runtime/compiled_method.cc | 2 +- runtime/debugger.cc | 44 +- runtime/debugger.h | 2 +- runtime/dex_file.cc | 2 +- runtime/dex_file.h | 2 +- runtime/dex_file_verifier.cc | 2 +- runtime/dex_instruction-inl.h | 8 +- runtime/dex_instruction.cc | 8 +- runtime/dex_instruction.h | 4 +- runtime/dex_instruction_list.h | 2 +- runtime/disassembler_arm.cc | 52 +- runtime/disassembler_mips.cc | 42 +- runtime/disassembler_x86.cc | 2 +- runtime/exception_test.cc | 2 +- runtime/gc/accounting/card_table.cc | 2 +- runtime/gc/accounting/space_bitmap-inl.h | 2 +- runtime/gc/collector/mark_sweep.cc | 4 +- runtime/gc/collector/mark_sweep.h | 2 +- runtime/gc/heap.cc | 2 +- runtime/gc/space/dlmalloc_space.cc | 4 +- runtime/gc/space/space.h | 2 +- runtime/hprof/hprof.cc | 18 +- runtime/indirect_reference_table.cc | 2 +- runtime/indirect_reference_table.h | 8 +- runtime/instrumentation.cc | 14 +- runtime/intern_table.cc | 8 +- runtime/intern_table_test.cc | 2 +- runtime/interpreter/interpreter.cc | 6 +- runtime/invoke_type.h | 10 +- runtime/jdwp/jdwp.h | 8 +- runtime/jdwp/jdwp_constants.h | 8 +- runtime/jdwp/jdwp_event.cc | 6 +- runtime/jdwp/jdwp_handler.cc | 6 +- runtime/jdwp/jdwp_priv.h | 4 +- runtime/jdwp/jdwp_request.cc | 2 +- runtime/jdwp/object_registry.cc | 6 +- runtime/jni_internal.cc | 26 +- runtime/jvalue.h | 4 +- runtime/locks.h | 2 +- runtime/log_severity.h | 2 +- runtime/mem_map.cc | 6 +- runtime/mirror/dex_cache.h | 2 +- runtime/mirror/field.cc | 2 +- runtime/mirror/throwable.h | 2 +- runtime/monitor.cc | 6 +- runtime/native/dalvik_system_VMDebug.cc | 4 +- runtime/native/dalvik_system_Zygote.cc | 2 +- runtime/native/java_lang_Thread.cc | 2 +- runtime/oat/runtime/arm/context_arm.cc | 2 +- runtime/oat/runtime/mips/context_mips.cc | 2 +- runtime/oat/runtime/support_interpreter.cc | 2 +- runtime/oat/runtime/support_jni.cc | 2 +- runtime/oat/runtime/support_stubs.cc | 4 +- .../oat/runtime/x86/oat_support_entrypoints_x86.cc | 12 +- runtime/oat_test.cc | 2 +- runtime/reference_table.cc | 2 +- runtime/reference_table.h | 2 +- runtime/reflection.cc | 2 +- runtime/runtime.cc | 32 +- runtime/runtime_linux.cc | 2 +- runtime/runtime_support.cc | 8 +- runtime/runtime_support_llvm_func_list.h | 2 +- runtime/safe_map.h | 2 +- runtime/scoped_thread_state_change.h | 2 +- runtime/signal_catcher.cc | 2 +- runtime/stack.cc | 8 +- runtime/stack.h | 6 +- runtime/thread.cc | 20 +- runtime/thread.h | 6 +- runtime/thread_linux.cc | 2 +- runtime/thread_list.cc | 6 +- runtime/thread_list.h | 4 +- runtime/thread_state.h | 40 +- runtime/trace.cc | 14 +- runtime/utils.cc | 34 +- runtime/verifier/dex_gc_map.h | 6 +- runtime/verifier/method_verifier.cc | 16 +- runtime/verifier/method_verifier.h | 22 +- runtime/verifier/reg_type_cache.cc | 2 +- runtime/verifier/reg_type_test.cc | 2 +- runtime/zip_archive.cc | 2 +- test/ReferenceMap/stack_walk_refmap_jni.cc | 44 +- test/StackWalk/stack_walk_jni.cc | 2 +- 179 files changed, 1208 insertions(+), 1208 deletions(-) (limited to 'compiler/driver/compiler_driver.cc') diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk index eabaf31cca..adb87cb4e9 100644 --- a/build/Android.cpplint.mk +++ b/build/Android.cpplint.mk @@ -15,7 +15,7 @@ # ART_CPPLINT := art/tools/cpplint.py -ART_CPPLINT_FILTER := --filter=-whitespace/comments,-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf +ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf ART_CPPLINT_SRC := $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art" to verify we aren't regressing diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 3bd733e753..e8e2c027d0 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -86,7 +86,7 @@ struct MemStats { explicit MemStats(const ArenaAllocator &arena) : arena_(arena) {} private: const ArenaAllocator &arena_; -}; // MemStats +}; // MemStats } // namespace art diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h index 88240e8c40..97a682f2aa 100644 --- a/compiler/dex/compiler_enums.h +++ b/compiler/dex/compiler_enums.h @@ -48,7 +48,7 @@ enum SpecialTargetRegister { }; enum RegLocationType { - kLocDalvikFrame = 0, // Normal Dalvik register + kLocDalvikFrame = 0, // Normal Dalvik register kLocPhysReg, kLocCompilerTemp, kLocInvalid @@ -249,20 +249,20 @@ enum X86ConditionCode { kX86CondC = kX86CondB, // carry kX86CondNb = 0x3, // not-below - kX86CondAe = kX86CondNb, // above-equal - kX86CondNc = kX86CondNb, // not-carry + kX86CondAe = kX86CondNb, // above-equal + kX86CondNc = kX86CondNb, // not-carry kX86CondZ = 0x4, // zero kX86CondEq = kX86CondZ, // equal kX86CondNz = 0x5, // not-zero - kX86CondNe = kX86CondNz, // not-equal + kX86CondNe = kX86CondNz, // not-equal kX86CondBe = 0x6, // below-equal - kX86CondNa = kX86CondBe, // not-above + kX86CondNa = kX86CondBe, // not-above kX86CondNbe = 0x7, // not-below-equal - kX86CondA = kX86CondNbe,// above + kX86CondA = kX86CondNbe, // above kX86CondS = 0x8, // sign kX86CondNs = 0x9, // not-sign @@ -277,13 +277,13 @@ enum X86ConditionCode { kX86CondNge = kX86CondL, // not-greater-equal kX86CondNl = 0xd, // not-less-than - kX86CondGe = kX86CondNl, // not-greater-equal + kX86CondGe = kX86CondNl, // not-greater-equal kX86CondLe = 0xe, // less-than-equal - kX86CondNg = kX86CondLe, // not-greater + kX86CondNg = kX86CondLe, // not-greater kX86CondNle = 0xf, // not-less-than - kX86CondG = kX86CondNle,// greater + kX86CondG = kX86CondNle, // greater }; std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind); @@ -349,7 +349,7 @@ enum OpFeatureFlags { kIsIT, kMemLoad, kMemStore, - kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes. + kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes. kRegDef0, kRegDef1, kRegDefA, diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 847a614727..da44ffd99c 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -80,7 +80,7 @@ namespace art { GrowableArray* block_id_list_; int idx_; bool changed_; - }; // DataflowIterator + }; // DataflowIterator class ReachableNodesIterator : public DataflowIterator { public: diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 28c325726e..3c491ce20f 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -240,12 +240,12 @@ Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) { // We are modifying 4 consecutive bytes. ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 4u); inst->SetOpcode(Instruction::NOP); - inst->SetVRegA_10x(0u); // keep compliant with verifier. + inst->SetVRegA_10x(0u); // keep compliant with verifier. // Get to next instruction which is the second half of check-cast and replace // it by a NOP. inst = const_cast(inst->Next()); inst->SetOpcode(Instruction::NOP); - inst->SetVRegA_10x(0u); // keep compliant with verifier. + inst->SetVRegA_10x(0u); // keep compliant with verifier. return inst; } diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 113a80a96c..9cc4d18d37 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -72,37 +72,37 @@ extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& compiler) { } /* Default optimizer/debug setting for the compiler. */ -static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations +static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations (1 << kLoadStoreElimination) | - //(1 << kLoadHoisting) | - //(1 << kSuppressLoads) | - //(1 << kNullCheckElimination) | - //(1 << kPromoteRegs) | - //(1 << kTrackLiveTemps) | - //(1 << kSafeOptimizations) | - //(1 << kBBOpt) | - //(1 << kMatch) | - //(1 << kPromoteCompilerTemps) | + // (1 << kLoadHoisting) | + // (1 << kSuppressLoads) | + // (1 << kNullCheckElimination) | + // (1 << kPromoteRegs) | + // (1 << kTrackLiveTemps) | + // (1 << kSafeOptimizations) | + // (1 << kBBOpt) | + // (1 << kMatch) | + // (1 << kPromoteCompilerTemps) | 0; static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes - //(1 << kDebugDisplayMissingTargets) | - //(1 << kDebugVerbose) | - //(1 << kDebugDumpCFG) | - //(1 << kDebugSlowFieldPath) | - //(1 << kDebugSlowInvokePath) | - //(1 << kDebugSlowStringPath) | - //(1 << kDebugSlowestFieldPath) | - //(1 << kDebugSlowestStringPath) | - //(1 << kDebugExerciseResolveMethod) | - //(1 << kDebugVerifyDataflow) | - //(1 << kDebugShowMemoryUsage) | - //(1 << kDebugShowNops) | - //(1 << kDebugCountOpcodes) | - //(1 << kDebugDumpCheckStats) | - //(1 << kDebugDumpBitcodeFile) | - //(1 << kDebugVerifyBitcode) | - //(1 << kDebugShowSummaryMemoryUsage) | + // (1 << kDebugDisplayMissingTargets) | + // (1 << kDebugVerbose) | + // (1 << kDebugDumpCFG) | + // (1 << kDebugSlowFieldPath) | + // (1 << kDebugSlowInvokePath) | + // (1 << kDebugSlowStringPath) | + // (1 << kDebugSlowestFieldPath) | + // (1 << kDebugSlowestStringPath) | + // (1 << kDebugExerciseResolveMethod) | + // (1 << kDebugVerifyDataflow) | + // (1 << kDebugShowMemoryUsage) | + // (1 << kDebugShowNops) | + // (1 << kDebugCountOpcodes) | + // (1 << kDebugDumpCheckStats) | + // (1 << kDebugDumpBitcodeFile) | + // (1 << kDebugVerifyBitcode) | + // (1 << kDebugShowSummaryMemoryUsage) | 0; static CompiledMethod* CompileMethod(CompilerDriver& compiler, @@ -277,7 +277,7 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler, #if defined(ART_USE_PORTABLE_COMPILER) , llvm_compilation_unit #endif - ); // NOLINT(whitespace/parens) + ); // NOLINT(whitespace/parens) } } // namespace art diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h index a86338950c..5c68ab4244 100644 --- a/compiler/dex/frontend.h +++ b/compiler/dex/frontend.h @@ -102,7 +102,7 @@ class LLVMInfo { private: UniquePtr< ::llvm::LLVMContext> llvm_context_; - ::llvm::Module* llvm_module_; // Managed by context_. + ::llvm::Module* llvm_module_; // Managed by context_. UniquePtr intrinsic_helper_; UniquePtr ir_builder_; }; diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h index e3fd7ad2da..33ca8f1ad8 100644 --- a/compiler/dex/local_value_numbering.h +++ b/compiler/dex/local_value_numbering.h @@ -137,6 +137,6 @@ class LocalValueNumbering { std::set null_checked_; }; -} // namespace art +} // namespace art #endif // ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_ diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 264604c355..6b010ed9b3 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -972,23 +972,23 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { } } switch (dalvik_format) { - case Instruction::k11n: // Add one immediate from vB + case Instruction::k11n: // Add one immediate from vB case Instruction::k21s: case Instruction::k31i: case Instruction::k21h: str.append(StringPrintf(", #%d", insn.vB)); break; - case Instruction::k51l: // Add one wide immediate + case Instruction::k51l: // Add one wide immediate str.append(StringPrintf(", #%lld", insn.vB_wide)); break; - case Instruction::k21c: // One register, one string/type/method index + case Instruction::k21c: // One register, one string/type/method index case Instruction::k31c: str.append(StringPrintf(", index #%d", insn.vB)); break; - case Instruction::k22c: // Two registers, one string/type/method index + case Instruction::k22c: // Two registers, one string/type/method index str.append(StringPrintf(", index #%d", insn.vC)); break; - case Instruction::k22s: // Add one immediate from vC + case Instruction::k22s: // Add one immediate from vC case Instruction::k22b: str.append(StringPrintf(", #%d", insn.vC)); break; @@ -1154,4 +1154,4 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { return bb; } -} // namespace art +} // namespace art diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 342d2a296a..e9ec949f23 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -273,7 +273,7 @@ struct RegLocation { unsigned fp:1; // Floating point? unsigned core:1; // Non-floating point? unsigned ref:1; // Something GC cares about. - unsigned high_word:1; // High word of pair? + unsigned high_word:1; // High word of pair? unsigned home:1; // Does this represent the home location? uint8_t low_reg; // First physical register. uint8_t high_reg; // 2nd physical register (if wide). @@ -650,7 +650,7 @@ class MIRGraph { BasicBlock* cur_block_; int num_blocks_; const DexFile::CodeItem* current_code_item_; - SafeMap block_map_; // FindBlock lookup cache. + SafeMap block_map_; // FindBlock lookup cache. std::vector m_units_; // List of methods included in this graph typedef std::pair MIRLocation; // Insert point, (m_unit_ index, offset) std::vector method_stack_; // Include stack @@ -659,7 +659,7 @@ class MIRGraph { int def_count_; // Used to estimate size of ssa name storage. int* opcode_count_; // Dex opcode coverage stats. int num_ssa_regs_; // Number of names following SSA transformation. - std::vector extended_basic_blocks_; // Heads of block "traces". + std::vector extended_basic_blocks_; // Heads of block "traces". int method_sreg_; unsigned int attributes_; Checkstats* checkstats_; diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index d79b26e4b9..a6314f4cab 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -845,7 +845,7 @@ bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) { bb = NextDominatedBlock(bb); } } - return false; // Not iterative - return value will be ignored + return false; // Not iterative - return value will be ignored } diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 6fc01bdff2..7831cf6f7a 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -1648,7 +1648,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { if (bb->block_type == kEntryBlock) { SetMethodInfo(); - { // Allocate shadowframe. + { // Allocate shadowframe. art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::AllocaShadowFrame; ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id); @@ -1656,7 +1656,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { irb_->CreateCall(func, entries); } - { // Store arguments to vregs. + { // Store arguments to vregs. uint16_t arg_reg = cu_->num_regs; ::llvm::Function::arg_iterator arg_iter(func_->arg_begin()); @@ -1666,7 +1666,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { uint32_t shorty_size = strlen(shorty); CHECK_GE(shorty_size, 1u); - ++arg_iter; // skip method object + ++arg_iter; // skip method object if ((cu_->access_flags & kAccStatic) == 0) { SetVregOnValue(arg_iter, arg_reg); diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h index 93fee05e4e..2f54190ae7 100644 --- a/compiler/dex/quick/arm/arm_lir.h +++ b/compiler/dex/quick/arm/arm_lir.h @@ -239,7 +239,7 @@ enum ArmShiftEncodings { */ enum ArmOpcode { kArmFirst = 0, - kArm16BitData = kArmFirst, // DATA [0] rd[15..0]. + kArm16BitData = kArmFirst, // DATA [0] rd[15..0]. kThumbAdcRR, // adc [0100000101] rm[5..3] rd[2..0]. kThumbAddRRI3, // add(1) [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/ kThumbAddRI8, // add(2) [00110] rd[10..8] imm_8[7..0]. @@ -332,12 +332,12 @@ enum ArmOpcode { kThumb2VcvtDF, // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0]. kThumb2Vsqrts, // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0]. kThumb2Vsqrtd, // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0]. - kThumb2MovImmShift,// mov(T2) rd, # [11110] i [00001001111] imm3 rd[11..8] imm8. + kThumb2MovImmShift, // mov(T2) rd, # [11110] i [00001001111] imm3 rd[11..8] imm8. kThumb2MovImm16, // mov(T3) rd, # [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8. kThumb2StrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0]. kThumb2LdrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0]. - kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/ - kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/ + kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/ + kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/ kThumb2Cbnz, // cbnz rd,