diff options
198 files changed, 6158 insertions, 1426 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index bc0838435c..e5258087db 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -113,6 +113,7 @@ ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY ART_GTEST_stub_test_DEX_DEPS := AllFields ART_GTEST_transaction_test_DEX_DEPS := Transaction ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup +ART_GTEST_unstarted_runtime_test_DEX_DEPS := Nested ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps MultiDex ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler diff --git a/compiler/Android.bp b/compiler/Android.bp index 46f3358af1..f6a4db49fb 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -80,6 +80,7 @@ art_cc_defaults { "optimizing/register_allocator_graph_color.cc", "optimizing/register_allocator_linear_scan.cc", "optimizing/select_generator.cc", + "optimizing/scheduler.cc", "optimizing/sharpening.cc", "optimizing/side_effects_analysis.cc", "optimizing/ssa_builder.cc", @@ -123,6 +124,7 @@ art_cc_defaults { "jni/quick/arm64/calling_convention_arm64.cc", "linker/arm64/relative_patcher_arm64.cc", "optimizing/code_generator_arm64.cc", + "optimizing/scheduler_arm64.cc", "optimizing/instruction_simplifier_arm64.cc", "optimizing/intrinsics_arm64.cc", "optimizing/nodes_arm64.cc", @@ -362,6 +364,7 @@ art_cc_test { "jni/jni_cfi_test.cc", "optimizing/codegen_test.cc", "optimizing/optimizing_cfi_test.cc", + "optimizing/scheduler_test.cc", ], codegen: { diff --git a/compiler/compiler.h b/compiler/compiler.h index 908d3669ed..2ca0b77a73 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -27,7 +27,6 @@ namespace jit { class JitCodeCache; } namespace mirror { - class ClassLoader; class DexCache; } @@ -64,7 +63,7 @@ class Compiler { InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) const = 0; diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 76aeaa55d7..d4f6545c59 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -284,13 +284,16 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, } uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c(); ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(unit_.GetClassLoader()))); ClassLinker* class_linker = unit_.GetClassLinker(); ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>( GetDexFile(), method_idx, unit_.GetDexCache(), - unit_.GetClassLoader(), + class_loader, /* referrer */ nullptr, kVirtual); @@ -327,7 +330,7 @@ CompiledMethod* ArtCompileDEX( InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, DexToDexCompilationLevel dex_to_dex_compilation_level) { DCHECK(driver != nullptr); diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h index 00c596d60e..0a00d45297 100644 --- a/compiler/dex/dex_to_dex_compiler.h +++ b/compiler/dex/dex_to_dex_compiler.h @@ -18,7 +18,6 @@ #define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ #include "dex_file.h" -#include "handle.h" #include "invoke_type.h" namespace art { @@ -26,10 +25,6 @@ namespace art { class CompiledMethod; class CompilerDriver; -namespace mirror { -class ClassLoader; -} // namespace mirror - namespace optimizer { enum class DexToDexCompilationLevel { @@ -45,7 +40,7 @@ CompiledMethod* ArtCompileDEX(CompilerDriver* driver, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, DexToDexCompilationLevel dex_to_dex_compilation_level); diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h index 81d80f4f8f..f056dd3c00 100644 --- a/compiler/driver/compiler_driver-inl.h +++ b/compiler/driver/compiler_driver-inl.h @@ -31,12 +31,17 @@ namespace art { +inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa, + const DexCompilationUnit* mUnit) { + return soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()).Ptr(); +} + inline mirror::Class* CompilerDriver::ResolveClass( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, dex::TypeIndex cls_index, const DexCompilationUnit* mUnit) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); - DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); + DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit)); mirror::Class* cls = mUnit->GetClassLinker()->ResolveType( *mUnit->GetDexFile(), cls_index, dex_cache, class_loader); DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending()); @@ -51,7 +56,7 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); - DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); + DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit)); const DexFile::MethodId& referrer_method_id = mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex()); return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit); @@ -82,7 +87,7 @@ inline ArtField* CompilerDriver::ResolveField( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static) { - DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); + DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit)); return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx, is_static); } @@ -193,7 +198,7 @@ inline ArtMethod* CompilerDriver::ResolveMethod( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) { - DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); + DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit)); ArtMethod* resolved_method = check_incompatible_class_change ? mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kForceICCECheck>( diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 4e19dbe949..1d4eaf8c5a 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -583,7 +583,7 @@ static void CompileMethod(Thread* self, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level, bool compilation_enabled, @@ -624,6 +624,9 @@ static void CompileMethod(Thread* self, // Look-up the ArtMethod associated with this code_item (if any) // -- It is later used to lookup any [optimization] annotations for this method. ScopedObjectAccess soa(self); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader_handle(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(class_loader))); // TODO: Lookup annotation from DexFile directly without resolving method. ArtMethod* method = @@ -631,7 +634,7 @@ static void CompileMethod(Thread* self, dex_file, method_idx, dex_cache, - class_loader, + class_loader_handle, /* referrer */ nullptr, invoke_type); @@ -678,14 +681,9 @@ static void CompileMethod(Thread* self, if (compile) { // NOTE: if compiler declines to compile this method, it will return null. - compiled_method = driver->GetCompiler()->Compile(code_item, - access_flags, - invoke_type, - class_def_idx, - method_idx, - class_loader, - dex_file, - dex_cache); + compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type, + class_def_idx, method_idx, class_loader, + dex_file, dex_cache); } if (compiled_method == nullptr && dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) { @@ -732,14 +730,12 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t uint32_t method_idx = method->GetDexMethodIndex(); uint32_t access_flags = method->GetAccessFlags(); InvokeType invoke_type = method->GetInvokeType(); - StackHandleScope<2> hs(self); + StackHandleScope<1> hs(self); Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(method->GetDeclaringClass()->GetClassLoader())); { ScopedObjectAccessUnchecked soa(self); ScopedLocalRef<jobject> local_class_loader( - soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get())); + soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader())); jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get()); // Find the dex_file dex_file = method->GetDexFile(); @@ -773,7 +769,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t invoke_type, class_def_idx, method_idx, - class_loader, + jclass_loader, *dex_file, dex_to_dex_compilation_level, true, @@ -799,7 +795,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t invoke_type, class_def_idx, method_idx, - class_loader, + jclass_loader, *dex_file, dex_to_dex_compilation_level, true, @@ -1074,30 +1070,22 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { public: - ResolveCatchBlockExceptionsClassVisitor() : classes_() {} + explicit ResolveCatchBlockExceptionsClassVisitor( + std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve) + : exceptions_to_resolve_(exceptions_to_resolve) {} virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { - classes_.push_back(c); - return true; - } - - void FindExceptionTypesToResolve( - std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve) - REQUIRES_SHARED(Locks::mutator_lock_) { const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - for (ObjPtr<mirror::Class> klass : classes_) { - for (ArtMethod& method : klass->GetMethods(pointer_size)) { - FindExceptionTypesToResolveForMethod(&method, exceptions_to_resolve); - } + for (auto& m : c->GetMethods(pointer_size)) { + ResolveExceptionsForMethod(&m); } + return true; } private: - void FindExceptionTypesToResolveForMethod( - ArtMethod* method, - std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve) + void ResolveExceptionsForMethod(ArtMethod* method_handle) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile::CodeItem* code_item = method->GetCodeItem(); + const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); if (code_item == nullptr) { return; // native or abstract method } @@ -1117,9 +1105,9 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { dex::TypeIndex encoded_catch_handler_handlers_type_idx = dex::TypeIndex(DecodeUnsignedLeb128(&encoded_catch_handler_list)); // Add to set of types to resolve if not already in the dex cache resolved types - if (!method->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { - exceptions_to_resolve->emplace(encoded_catch_handler_handlers_type_idx, - method->GetDexFile()); + if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { + exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx, + method_handle->GetDexFile()); } // ignore address associated with catch handler DecodeUnsignedLeb128(&encoded_catch_handler_list); @@ -1131,7 +1119,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { } } - std::vector<ObjPtr<mirror::Class>> classes_; + std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve_; }; class RecordImageClassesVisitor : public ClassVisitor { @@ -1185,14 +1173,8 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) { hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"))); do { unresolved_exception_types.clear(); - { - // Thread suspension is not allowed while ResolveCatchBlockExceptionsClassVisitor - // is using a std::vector<ObjPtr<mirror::Class>>. - ScopedAssertNoThreadSuspension ants(__FUNCTION__); - ResolveCatchBlockExceptionsClassVisitor visitor; - class_linker->VisitClasses(&visitor); - visitor.FindExceptionTypesToResolve(&unresolved_exception_types); - } + ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types); + class_linker->VisitClasses(&visitor); for (const auto& exception_type : unresolved_exception_types) { dex::TypeIndex exception_type_idx = exception_type.first; const DexFile* dex_file = exception_type.second; @@ -1441,14 +1423,19 @@ void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodRefere dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index); } -bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class, - ObjPtr<mirror::Class> resolved_class) { +bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, + Handle<mirror::DexCache> dex_cache, + dex::TypeIndex type_idx) { + // Get type from dex cache assuming it was populated by the verifier + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == nullptr) { stats_->TypeNeedsAccessCheck(); return false; // Unknown class needs access checks. } + const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx); bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible. if (!is_accessible) { + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); if (referrer_class == nullptr) { stats_->TypeNeedsAccessCheck(); return false; // Incomplete referrer knowledge needs access check. @@ -1465,9 +1452,12 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_c return is_accessible; } -bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class, - ObjPtr<mirror::Class> resolved_class, +bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, + Handle<mirror::DexCache> dex_cache, + dex::TypeIndex type_idx, bool* finalizable) { + // Get type from dex cache assuming it was populated by the verifier. + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == nullptr) { stats_->TypeNeedsAccessCheck(); // Be conservative. @@ -1475,8 +1465,10 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class return false; // Unknown class needs access checks. } *finalizable = resolved_class->IsFinalizable(); + const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx); bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible. if (!is_accessible) { + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); if (referrer_class == nullptr) { stats_->TypeNeedsAccessCheck(); return false; // Incomplete referrer knowledge needs access check. @@ -1520,7 +1512,9 @@ ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, mirror::Class* referrer_class; Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache()); { - Handle<mirror::ClassLoader> class_loader_handle = mUnit->GetClassLoader(); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader_handle( + hs.NewHandle(soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()))); resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false); referrer_class = resolved_field != nullptr ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr; @@ -2593,18 +2587,10 @@ class CompileClassVisitor : public CompilationVisitor { continue; } previous_direct_method_idx = method_idx; - CompileMethod(soa.Self(), - driver, - it.GetMethodCodeItem(), - it.GetMethodAccessFlags(), - it.GetMethodInvokeType(class_def), - class_def_index, - method_idx, - class_loader, - dex_file, - dex_to_dex_compilation_level, - compilation_enabled, - dex_cache); + CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled, dex_cache); it.Next(); } // Compile virtual methods @@ -2618,17 +2604,10 @@ class CompileClassVisitor : public CompilationVisitor { continue; } previous_virtual_method_idx = method_idx; - CompileMethod(soa.Self(), - driver, it.GetMethodCodeItem(), - it.GetMethodAccessFlags(), - it.GetMethodInvokeType(class_def), - class_def_index, - method_idx, - class_loader, - dex_file, - dex_to_dex_compilation_level, - compilation_enabled, - dex_cache); + CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled, dex_cache); it.Next(); } DCHECK(!it.HasNext()); diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index d032a26fd5..503fe3adfc 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -187,14 +187,16 @@ class CompilerDriver { REQUIRES(!requires_constructor_barrier_lock_); // Are runtime access checks necessary in the compiled code? - bool CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class, - ObjPtr<mirror::Class> resolved_class) + bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, + Handle<mirror::DexCache> dex_cache, + dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? // out_is_finalizable is set to whether the type is finalizable. - bool CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class, - ObjPtr<mirror::Class> resolved_class, + bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, + Handle<mirror::DexCache> dex_cache, + dex::TypeIndex type_idx, bool* out_is_finalizable) REQUIRES_SHARED(Locks::mutator_lock_); @@ -404,6 +406,10 @@ class CompilerDriver { uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_); + mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa, + const DexCompilationUnit* mUnit) + REQUIRES_SHARED(Locks::mutator_lock_); + private: void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files, diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index e4b66ebc5a..1e4ca16844 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -101,7 +101,6 @@ class CompilerDriverTest : public CommonCompilerTest { }; // Disabled due to 10 second runtime on host -// TODO: Update the test for hash-based dex cache arrays. Bug: 30627598 TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { CompileAll(nullptr); diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc index 7e8e812c4a..47b19297e5 100644 --- a/compiler/driver/dex_compilation_unit.cc +++ b/compiler/driver/dex_compilation_unit.cc @@ -21,7 +21,7 @@ namespace art { -DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader, +DexCompilationUnit::DexCompilationUnit(jobject class_loader, ClassLinker* class_linker, const DexFile& dex_file, const DexFile::CodeItem* code_item, diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h index 24a9a5b653..854927d747 100644 --- a/compiler/driver/dex_compilation_unit.h +++ b/compiler/driver/dex_compilation_unit.h @@ -34,7 +34,7 @@ class VerifiedMethod; class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> { public: - DexCompilationUnit(Handle<mirror::ClassLoader> class_loader, + DexCompilationUnit(jobject class_loader, ClassLinker* class_linker, const DexFile& dex_file, const DexFile::CodeItem* code_item, @@ -44,7 +44,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> { const VerifiedMethod* verified_method, Handle<mirror::DexCache> dex_cache); - Handle<mirror::ClassLoader> GetClassLoader() const { + jobject GetClassLoader() const { return class_loader_; } @@ -113,7 +113,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> { } private: - const Handle<mirror::ClassLoader> class_loader_; + const jobject class_loader_; ClassLinker* const class_linker_; @@ -125,7 +125,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> { const uint32_t access_flags_; const VerifiedMethod* verified_method_; - const Handle<mirror::DexCache> dex_cache_; + Handle<mirror::DexCache> dex_cache_; std::string symbol_; }; diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 3e9ae0834c..c72edb18a3 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -940,11 +940,9 @@ void ImageWriter::PruneNonImageClasses() { } ObjPtr<mirror::DexCache> dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache(); for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { - mirror::TypeDexCachePair pair = - dex_cache->GetResolvedTypes()[i].load(std::memory_order_relaxed); - mirror::Class* klass = pair.object.Read(); + Class* klass = dex_cache->GetResolvedType(dex::TypeIndex(i)); if (klass != nullptr && !KeepClass(klass)) { - dex_cache->ClearResolvedType(dex::TypeIndex(pair.index)); + dex_cache->SetResolvedType(dex::TypeIndex(i), nullptr); } } ArtMethod** resolved_methods = dex_cache->GetResolvedMethods(); @@ -1924,7 +1922,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { // above comment for intern tables. ClassTable temp_class_table; temp_class_table.ReadFromMemory(class_table_memory_ptr); - ObjPtr<mirror::ClassLoader> class_loader = GetClassLoader(); + CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u); + mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr; CHECK_EQ(temp_class_table.NumZygoteClasses(class_loader), table->NumNonZygoteClasses(class_loader) + table->NumZygoteClasses(class_loader)); UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown)); @@ -2214,7 +2213,7 @@ void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache, orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache), ImageAddressVisitor(this)); } - mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes(); if (orig_types != nullptr) { copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(), NativeLocationInImage(orig_types), diff --git a/compiler/image_writer.h b/compiler/image_writer.h index bdc7146632..cc7df1ce21 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -51,13 +51,8 @@ class ImageSpace; } // namespace space } // namespace gc -namespace mirror { -class ClassLoader; -} // namespace mirror - class ClassLoaderVisitor; class ClassTable; -class ImtConflictTable; static constexpr int kInvalidFd = -1; @@ -84,11 +79,6 @@ class ImageWriter FINAL { return true; } - ObjPtr<mirror::ClassLoader> GetClassLoader() { - CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u); - return compile_app_image_ ? *class_loaders_.begin() : nullptr; - } - template <typename T> T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) { if (object == nullptr || IsInBootImage(object)) { diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index eaac0b40f5..cbd831a60f 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -211,7 +211,7 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr); if (success && (jit_logger_ != nullptr)) { - jit_logger_->WriteLog(code_cache, method); + jit_logger_->WriteLog(code_cache, method, osr); } } diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc index 9ce3b0cfe8..aa4f66773a 100644 --- a/compiler/jit/jit_logger.cc +++ b/compiler/jit/jit_logger.cc @@ -23,6 +23,7 @@ #include "driver/compiler_driver.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" +#include "oat_file-inl.h" namespace art { namespace jit { @@ -49,9 +50,10 @@ void JitLogger::OpenPerfMapLog() { } } -void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method) { +void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) { if (perf_file_ != nullptr) { - const void* ptr = method->GetEntryPointFromQuickCompiledCode(); + const void* ptr = osr ? code_cache->LookupOsrMethodHeader(method)->GetCode() + : method->GetEntryPointFromQuickCompiledCode(); size_t code_size = code_cache->GetMemorySizeOfCodePointer(ptr); std::string method_name = method->PrettyMethod(); @@ -268,9 +270,10 @@ void JitLogger::OpenJitDumpLog() { WriteJitDumpHeader(); } -void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method) { +void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) { if (jit_dump_file_ != nullptr) { - const void* code = method->GetEntryPointFromQuickCompiledCode(); + const void* code = osr ? code_cache->LookupOsrMethodHeader(method)->GetCode() + : method->GetEntryPointFromQuickCompiledCode(); size_t code_size = code_cache->GetMemorySizeOfCodePointer(code); std::string method_name = method->PrettyMethod(); diff --git a/compiler/jit/jit_logger.h b/compiler/jit/jit_logger.h index 0f8cfe4e2f..460864e8a9 100644 --- a/compiler/jit/jit_logger.h +++ b/compiler/jit/jit_logger.h @@ -94,10 +94,10 @@ class JitLogger { OpenJitDumpLog(); } - void WriteLog(JitCodeCache* code_cache, ArtMethod* method) + void WriteLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) REQUIRES_SHARED(Locks::mutator_lock_) { - WritePerfMapLog(code_cache, method); - WriteJitDumpLog(code_cache, method); + WritePerfMapLog(code_cache, method, osr); + WriteJitDumpLog(code_cache, method, osr); } void CloseLog() { @@ -108,13 +108,13 @@ class JitLogger { private: // For perf-map profiling void OpenPerfMapLog(); - void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method) + void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) REQUIRES_SHARED(Locks::mutator_lock_); void ClosePerfMapLog(); // For perf-inject profiling void OpenJitDumpLog(); - void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method) + void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) REQUIRES_SHARED(Locks::mutator_lock_); void CloseJitDumpLog(); diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 227fdc4874..bd2c5e3bfc 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -1060,7 +1060,6 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { WriteCodeMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset, size_t relative_offset) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : OatDexMethodVisitor(writer, relative_offset), - class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr), out_(out), file_offset_(file_offset), soa_(Thread::Current()), @@ -1246,13 +1245,12 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } private: - ObjPtr<mirror::ClassLoader> class_loader_; OutputStream* const out_; const size_t file_offset_; const ScopedObjectAccess soa_; const ScopedAssertNoThreadSuspension no_thread_suspension_; ClassLinker* const class_linker_; - ObjPtr<mirror::DexCache> dex_cache_; + mirror::DexCache* dex_cache_; std::vector<uint8_t> patched_code_; void ReportWriteFailure(const char* what, const ClassDataItemIterator& it) { @@ -1263,7 +1261,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { ArtMethod* GetTargetMethod(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) { MethodReference ref = patch.TargetMethod(); - ObjPtr<mirror::DexCache> dex_cache = + mirror::DexCache* dex_cache = (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache( Thread::Current(), *ref.dex_file); ArtMethod* method = dex_cache->GetResolvedMethod( @@ -1297,7 +1295,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return target_offset; } - ObjPtr<mirror::DexCache> GetDexCache(const DexFile* target_dex_file) + mirror::DexCache* GetDexCache(const DexFile* target_dex_file) REQUIRES_SHARED(Locks::mutator_lock_) { return (target_dex_file == dex_file_) ? dex_cache_ @@ -1305,12 +1303,10 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK(writer_->HasImage()); - ObjPtr<mirror::DexCache> dex_cache = GetDexCache(patch.TargetTypeDexFile()); - ObjPtr<mirror::Class> type = - ClassLinker::LookupResolvedType(patch.TargetTypeIndex(), dex_cache, class_loader_); + mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile()); + mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex()); CHECK(type != nullptr); - return type.Ptr(); + return type; } mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) { diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 223439b0c7..8cf4089eba 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -51,10 +51,7 @@ class HGraphBuilder : public ValueObject { compiler_driver_(driver), compilation_stats_(compiler_stats), block_builder_(graph, dex_file, code_item), - ssa_builder_(graph, - dex_compilation_unit->GetClassLoader(), - dex_compilation_unit->GetDexCache(), - handles), + ssa_builder_(graph, dex_compilation_unit->GetDexCache(), handles), instruction_builder_(graph, &block_builder_, &ssa_builder_, @@ -79,12 +76,10 @@ class HGraphBuilder : public ValueObject { code_item_(code_item), dex_compilation_unit_(nullptr), compiler_driver_(nullptr), + null_dex_cache_(), compilation_stats_(nullptr), block_builder_(graph, nullptr, code_item), - ssa_builder_(graph, - handles->NewHandle<mirror::ClassLoader>(nullptr), - handles->NewHandle<mirror::DexCache>(nullptr), - handles), + ssa_builder_(graph, null_dex_cache_, handles), instruction_builder_(graph, &block_builder_, &ssa_builder_, @@ -96,7 +91,7 @@ class HGraphBuilder : public ValueObject { /* compiler_driver */ nullptr, /* interpreter_metadata */ nullptr, /* compiler_stats */ nullptr, - handles->NewHandle<mirror::DexCache>(nullptr), + null_dex_cache_, handles) {} GraphAnalysisResult BuildGraph(); @@ -117,6 +112,8 @@ class HGraphBuilder : public ValueObject { CompilerDriver* const compiler_driver_; + ScopedNullHandle<mirror::DexCache> null_dex_cache_; + OptimizingCompilerStats* compilation_stats_; HBasicBlockBuilder block_builder_; diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index a095970a1e..00969443c1 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -484,6 +484,8 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph, type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), boot_image_address_patches_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), clobbered_ra_(false) { // Save RA (containing the return address) to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(RA)); @@ -704,9 +706,6 @@ bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const { // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration() // into the path that creates a stack frame so that RA can be explicitly saved and restored. // RA can't otherwise be saved/restored when it's the only spilled register. - // TODO: Can this be improved? It causes creation of a stack frame (while RA might be - // saved in an unused temporary register) and saving of RA and the current method pointer - // in the frame. return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_; } @@ -1160,6 +1159,67 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo // offset to `out` (e.g. lw, jialc, addiu). } +CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch( + const DexFile& dex_file, + dex::StringIndex dex_index, + Handle<mirror::String> handle) { + jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); + jit_string_patches_.emplace_back(dex_file, dex_index.index_); + return &jit_string_patches_.back(); +} + +CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch( + const DexFile& dex_file, + dex::TypeIndex dex_index, + Handle<mirror::Class> handle) { + jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); + jit_class_patches_.emplace_back(dex_file, dex_index.index_); + return &jit_class_patches_.back(); +} + +void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code, + const uint8_t* roots_data, + const CodeGeneratorMIPS::JitPatchInfo& info, + uint64_t index_in_table) const { + uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label); + uintptr_t address = + reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); + uint32_t addr32 = dchecked_integral_cast<uint32_t>(address); + // lui reg, addr32_high + DCHECK_EQ(code[literal_offset + 0], 0x34); + DCHECK_EQ(code[literal_offset + 1], 0x12); + DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00); + DCHECK_EQ(code[literal_offset + 3], 0x3C); + // lw reg, reg, addr32_low + DCHECK_EQ(code[literal_offset + 4], 0x78); + DCHECK_EQ(code[literal_offset + 5], 0x56); + DCHECK_EQ((code[literal_offset + 7] & 0xFC), 0x8C); + addr32 += (addr32 & 0x8000) << 1; // Account for sign extension in "lw reg, reg, addr32_low". + // lui reg, addr32_high + code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16); + code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24); + // lw reg, reg, addr32_low + code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0); + code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8); +} + +void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { + for (const JitPatchInfo& info : jit_string_patches_) { + const auto& it = jit_string_roots_.find(StringReference(&info.target_dex_file, + dex::StringIndex(info.index))); + DCHECK(it != jit_string_roots_.end()); + PatchJitRootUse(code, roots_data, info, it->second); + } + for (const JitPatchInfo& info : jit_class_patches_) { + const auto& it = jit_class_roots_.find(TypeReference(&info.target_dex_file, + dex::TypeIndex(info.index))); + DCHECK(it != jit_class_roots_.end()); + PatchJitRootUse(code, roots_data, info, it->second); + } +} + void CodeGeneratorMIPS::MarkGCCard(Register object, Register value, bool value_can_be_null) { @@ -5225,8 +5285,7 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kJitTableAddress: DCHECK(Runtime::Current()->UseJitCompilation()); - // TODO: implement. - fallback_load = true; + fallback_load = false; break; case HLoadString::LoadKind::kDexCacheViaMethod: fallback_load = false; @@ -5265,8 +5324,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kJitTableAddress: DCHECK(Runtime::Current()->UseJitCompilation()); - // TODO: implement. - fallback_load = true; + fallback_load = false; break; case HLoadClass::LoadKind::kDexCacheViaMethod: fallback_load = false; @@ -5591,7 +5649,14 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF break; } case HLoadClass::LoadKind::kJitTableAddress: { - LOG(FATAL) << "Unimplemented"; + CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(), + cls->GetTypeIndex(), + cls->GetClass()); + bool reordering = __ SetReorder(false); + __ Bind(&info->high_label); + __ Lui(out, /* placeholder */ 0x1234); + GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678); + __ SetReorder(reordering); break; } case HLoadClass::LoadKind::kDexCacheViaMethod: @@ -5730,6 +5795,18 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ __ Bind(slow_path->GetExitLabel()); return; } + case HLoadString::LoadKind::kJitTableAddress: { + CodeGeneratorMIPS::JitPatchInfo* info = + codegen_->NewJitRootStringPatch(load->GetDexFile(), + load->GetStringIndex(), + load->GetString()); + bool reordering = __ SetReorder(false); + __ Bind(&info->high_label); + __ Lui(out, /* placeholder */ 0x1234); + GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678); + __ SetReorder(reordering); + return; + } default: break; } diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index e92eeef88f..47eba50248 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -352,6 +352,7 @@ class CodeGeneratorMIPS : public CodeGenerator { // Emit linker patches. void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; void MarkGCCard(Register object, Register value, bool value_can_be_null); @@ -465,6 +466,31 @@ class CodeGeneratorMIPS : public CodeGenerator { void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base); + // The JitPatchInfo is used for JIT string and class loads. + struct JitPatchInfo { + JitPatchInfo(const DexFile& dex_file, uint64_t idx) + : target_dex_file(dex_file), index(idx) { } + JitPatchInfo(JitPatchInfo&& other) = default; + + const DexFile& target_dex_file; + // String/type index. + uint64_t index; + // Label for the instruction loading the most significant half of the address. + // The least significant half is loaded with the instruction that follows immediately. + MipsLabel high_label; + }; + + void PatchJitRootUse(uint8_t* code, + const uint8_t* roots_data, + const JitPatchInfo& info, + uint64_t index_in_table) const; + JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file, + dex::StringIndex dex_index, + Handle<mirror::String> handle); + JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file, + dex::TypeIndex dex_index, + Handle<mirror::Class> handle); + private: Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp); @@ -512,6 +538,10 @@ class CodeGeneratorMIPS : public CodeGenerator { ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_; // Deduplication map for patchable boot image addresses. Uint32ToLiteralMap boot_image_address_patches_; + // Patches for string root accesses in JIT compiled code. + ArenaDeque<JitPatchInfo> jit_string_patches_; + // Patches for class root accesses in JIT compiled code. + ArenaDeque<JitPatchInfo> jit_class_patches_; // PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods. // This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations. diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index e96e3d75e1..55904a3679 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -91,9 +91,6 @@ Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Typ // Space on the stack is reserved for all arguments. stack_index_ += Primitive::Is64BitType(type) ? 2 : 1; - // TODO: shouldn't we use a whole machine word per argument on the stack? - // Implicit 4-byte method pointer (and such) will cause misalignment. - return next_location; } @@ -434,7 +431,11 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph, pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), boot_image_address_patches_(std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(StringReferenceValueComparator(), + graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(TypeReferenceValueComparator(), + graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { // Save RA (containing the return address) to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(RA)); } @@ -1055,6 +1056,49 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn // offset to `out` (e.g. ld, jialc, daddiu). } +Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file, + dex::StringIndex string_index, + Handle<mirror::String> handle) { + jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); + return jit_string_patches_.GetOrCreate( + StringReference(&dex_file, string_index), + [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); +} + +Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file, + dex::TypeIndex type_index, + Handle<mirror::Class> handle) { + jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); + return jit_class_patches_.GetOrCreate( + TypeReference(&dex_file, type_index), + [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); +} + +void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code, + const uint8_t* roots_data, + const Literal* literal, + uint64_t index_in_table) const { + uint32_t literal_offset = GetAssembler().GetLabelLocation(literal->GetLabel()); + uintptr_t address = + reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); + reinterpret_cast<uint32_t*>(code + literal_offset)[0] = dchecked_integral_cast<uint32_t>(address); +} + +void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { + for (const auto& entry : jit_string_patches_) { + const auto& it = jit_string_roots_.find(entry.first); + DCHECK(it != jit_string_roots_.end()); + PatchJitRootUse(code, roots_data, entry.second, it->second); + } + for (const auto& entry : jit_class_patches_) { + const auto& it = jit_class_roots_.find(entry.first); + DCHECK(it != jit_class_roots_.end()); + PatchJitRootUse(code, roots_data, entry.second, it->second); + } +} + void CodeGeneratorMIPS64::SetupBlockedRegisters() const { // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated. blocked_core_registers_[ZERO] = true; @@ -3309,8 +3353,6 @@ HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kJitTableAddress: DCHECK(Runtime::Current()->UseJitCompilation()); - // TODO: implement. - fallback_load = true; break; } if (fallback_load) { @@ -3341,8 +3383,6 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kJitTableAddress: DCHECK(Runtime::Current()->UseJitCompilation()); - // TODO: implement. - fallback_load = true; break; case HLoadClass::LoadKind::kDexCacheViaMethod: break; @@ -3580,10 +3620,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S generate_null_check = true; break; } - case HLoadClass::LoadKind::kJitTableAddress: { - LOG(FATAL) << "Unimplemented"; + case HLoadClass::LoadKind::kJitTableAddress: + __ LoadLiteral(out, + kLoadUnsignedWord, + codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), + cls->GetTypeIndex(), + cls->GetClass())); + GenerateGcRootFieldLoad(cls, out_loc, out, 0); break; - } case HLoadClass::LoadKind::kDexCacheViaMethod: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE(); @@ -3685,6 +3729,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA __ Bind(slow_path->GetExitLabel()); return; } + case HLoadString::LoadKind::kJitTableAddress: + __ LoadLiteral(out, + kLoadUnsignedWord, + codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), + load->GetStringIndex(), + load->GetString())); + GenerateGcRootFieldLoad(load, out_loc, out, 0); + return; default: break; } diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 5ba8912134..26cc7dc788 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -52,7 +52,7 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength = static constexpr GpuRegister kCoreCalleeSaves[] = - { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; // TODO: review + { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; static constexpr FpuRegister kFpuCalleeSaves[] = { F24, F25, F26, F27, F28, F29, F30, F31 }; @@ -312,6 +312,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator { // Emit linker patches. void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null); @@ -425,10 +426,27 @@ class CodeGeneratorMIPS64 : public CodeGenerator { void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out); + void PatchJitRootUse(uint8_t* code, + const uint8_t* roots_data, + const Literal* literal, + uint64_t index_in_table) const; + Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, + dex::StringIndex string_index, + Handle<mirror::String> handle); + Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, + dex::TypeIndex type_index, + Handle<mirror::Class> handle); + private: using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>; using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>; using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>; + using StringToLiteralMap = ArenaSafeMap<StringReference, + Literal*, + StringReferenceValueComparator>; + using TypeToLiteralMap = ArenaSafeMap<TypeReference, + Literal*, + TypeReferenceValueComparator>; using BootStringToLiteralMap = ArenaSafeMap<StringReference, Literal*, StringReferenceValueComparator>; @@ -476,6 +494,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator { ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_; // Deduplication map for patchable boot image addresses. Uint32ToLiteralMap boot_image_address_patches_; + // Patches for string root accesses in JIT compiled code. + StringToLiteralMap jit_string_patches_; + // Patches for class root accesses in JIT compiled code. + TypeToLiteralMap jit_class_patches_; DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64); }; diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 763d6da6f5..f8bbf68c1c 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -17,30 +17,15 @@ #include <functional> #include <memory> -#include "arch/instruction_set.h" -#include "arch/arm/instruction_set_features_arm.h" -#include "arch/arm/registers_arm.h" -#include "arch/arm64/instruction_set_features_arm64.h" -#include "arch/mips/instruction_set_features_mips.h" -#include "arch/mips/registers_mips.h" -#include "arch/mips64/instruction_set_features_mips64.h" -#include "arch/mips64/registers_mips64.h" -#include "arch/x86/instruction_set_features_x86.h" -#include "arch/x86/registers_x86.h" -#include "arch/x86_64/instruction_set_features_x86_64.h" #include "base/macros.h" #include "builder.h" -#include "code_simulator_container.h" -#include "common_compiler_test.h" +#include "codegen_test_utils.h" #include "dex_file.h" #include "dex_instruction.h" #include "driver/compiler_options.h" -#include "graph_checker.h" #include "nodes.h" #include "optimizing_unit_test.h" -#include "prepare_for_register_allocation.h" #include "register_allocator_linear_scan.h" -#include "ssa_liveness_analysis.h" #include "utils.h" #include "utils/arm/assembler_arm_vixl.h" #include "utils/arm/managed_register_arm.h" @@ -48,324 +33,10 @@ #include "utils/mips64/managed_register_mips64.h" #include "utils/x86/managed_register_x86.h" -#ifdef ART_ENABLE_CODEGEN_arm -#include "code_generator_arm.h" -#include "code_generator_arm_vixl.h" -#endif - -#ifdef ART_ENABLE_CODEGEN_arm64 -#include "code_generator_arm64.h" -#endif - -#ifdef ART_ENABLE_CODEGEN_x86 -#include "code_generator_x86.h" -#endif - -#ifdef ART_ENABLE_CODEGEN_x86_64 -#include "code_generator_x86_64.h" -#endif - -#ifdef ART_ENABLE_CODEGEN_mips -#include "code_generator_mips.h" -#endif - -#ifdef ART_ENABLE_CODEGEN_mips64 -#include "code_generator_mips64.h" -#endif - #include "gtest/gtest.h" namespace art { -typedef CodeGenerator* (*CreateCodegenFn)(HGraph*, const CompilerOptions&); - -class CodegenTargetConfig { - public: - CodegenTargetConfig(InstructionSet isa, CreateCodegenFn create_codegen) - : isa_(isa), create_codegen_(create_codegen) { - } - InstructionSet GetInstructionSet() const { return isa_; } - CodeGenerator* CreateCodeGenerator(HGraph* graph, const CompilerOptions& compiler_options) { - return create_codegen_(graph, compiler_options); - } - - private: - CodegenTargetConfig() {} - InstructionSet isa_; - CreateCodegenFn create_codegen_; -}; - -#ifdef ART_ENABLE_CODEGEN_arm -// Provide our own codegen, that ensures the C calling conventions -// are preserved. Currently, ART and C do not match as R4 is caller-save -// in ART, and callee-save in C. Alternatively, we could use or write -// the stub that saves and restores all registers, but it is easier -// to just overwrite the code generator. -class TestCodeGeneratorARM : public arm::CodeGeneratorARM { - public: - TestCodeGeneratorARM(HGraph* graph, - const ArmInstructionSetFeatures& isa_features, - const CompilerOptions& compiler_options) - : arm::CodeGeneratorARM(graph, isa_features, compiler_options) { - AddAllocatedRegister(Location::RegisterLocation(arm::R6)); - AddAllocatedRegister(Location::RegisterLocation(arm::R7)); - } - - void SetupBlockedRegisters() const OVERRIDE { - arm::CodeGeneratorARM::SetupBlockedRegisters(); - blocked_core_registers_[arm::R4] = true; - blocked_core_registers_[arm::R6] = false; - blocked_core_registers_[arm::R7] = false; - } -}; - -// A way to test the VIXL32-based code generator on ARM. This will replace -// TestCodeGeneratorARM when the VIXL32-based backend replaces the existing one. -class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL { - public: - TestCodeGeneratorARMVIXL(HGraph* graph, - const ArmInstructionSetFeatures& isa_features, - const CompilerOptions& compiler_options) - : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) { - AddAllocatedRegister(Location::RegisterLocation(arm::R6)); - AddAllocatedRegister(Location::RegisterLocation(arm::R7)); - } - - void SetupBlockedRegisters() const OVERRIDE { - arm::CodeGeneratorARMVIXL::SetupBlockedRegisters(); - blocked_core_registers_[arm::R4] = true; - blocked_core_registers_[arm::R6] = false; - blocked_core_registers_[arm::R7] = false; - } -}; -#endif - -#ifdef ART_ENABLE_CODEGEN_x86 -class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 { - public: - TestCodeGeneratorX86(HGraph* graph, - const X86InstructionSetFeatures& isa_features, - const CompilerOptions& compiler_options) - : x86::CodeGeneratorX86(graph, isa_features, compiler_options) { - // Save edi, we need it for getting enough registers for long multiplication. - AddAllocatedRegister(Location::RegisterLocation(x86::EDI)); - } - - void SetupBlockedRegisters() const OVERRIDE { - x86::CodeGeneratorX86::SetupBlockedRegisters(); - // ebx is a callee-save register in C, but caller-save for ART. - blocked_core_registers_[x86::EBX] = true; - - // Make edi available. - blocked_core_registers_[x86::EDI] = false; - } -}; -#endif - -class InternalCodeAllocator : public CodeAllocator { - public: - InternalCodeAllocator() : size_(0) { } - - virtual uint8_t* Allocate(size_t size) { - size_ = size; - memory_.reset(new uint8_t[size]); - return memory_.get(); - } - - size_t GetSize() const { return size_; } - uint8_t* GetMemory() const { return memory_.get(); } - - private: - size_t size_; - std::unique_ptr<uint8_t[]> memory_; - - DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator); -}; - -static bool CanExecuteOnHardware(InstructionSet target_isa) { - return (target_isa == kRuntimeISA) - // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2). - || (kRuntimeISA == kArm && target_isa == kThumb2); -} - -static bool CanExecute(InstructionSet target_isa) { - CodeSimulatorContainer simulator(target_isa); - return CanExecuteOnHardware(target_isa) || simulator.CanSimulate(); -} - -template <typename Expected> -static Expected SimulatorExecute(CodeSimulator* simulator, Expected (*f)()); - -template <> -bool SimulatorExecute<bool>(CodeSimulator* simulator, bool (*f)()) { - simulator->RunFrom(reinterpret_cast<intptr_t>(f)); - return simulator->GetCReturnBool(); -} - -template <> -int32_t SimulatorExecute<int32_t>(CodeSimulator* simulator, int32_t (*f)()) { - simulator->RunFrom(reinterpret_cast<intptr_t>(f)); - return simulator->GetCReturnInt32(); -} - -template <> -int64_t SimulatorExecute<int64_t>(CodeSimulator* simulator, int64_t (*f)()) { - simulator->RunFrom(reinterpret_cast<intptr_t>(f)); - return simulator->GetCReturnInt64(); -} - -template <typename Expected> -static void VerifyGeneratedCode(InstructionSet target_isa, - Expected (*f)(), - bool has_result, - Expected expected) { - ASSERT_TRUE(CanExecute(target_isa)) << "Target isa is not executable."; - - // Verify on simulator. - CodeSimulatorContainer simulator(target_isa); - if (simulator.CanSimulate()) { - Expected result = SimulatorExecute<Expected>(simulator.Get(), f); - if (has_result) { - ASSERT_EQ(expected, result); - } - } - - // Verify on hardware. - if (CanExecuteOnHardware(target_isa)) { - Expected result = f(); - if (has_result) { - ASSERT_EQ(expected, result); - } - } -} - -template <typename Expected> -static void Run(const InternalCodeAllocator& allocator, - const CodeGenerator& codegen, - bool has_result, - Expected expected) { - InstructionSet target_isa = codegen.GetInstructionSet(); - - typedef Expected (*fptr)(); - CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize()); - fptr f = reinterpret_cast<fptr>(allocator.GetMemory()); - if (target_isa == kThumb2) { - // For thumb we need the bottom bit set. - f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1); - } - VerifyGeneratedCode(target_isa, f, has_result, expected); -} - -static void ValidateGraph(HGraph* graph) { - GraphChecker graph_checker(graph); - graph_checker.Run(); - if (!graph_checker.IsValid()) { - for (const auto& error : graph_checker.GetErrors()) { - std::cout << error << std::endl; - } - } - ASSERT_TRUE(graph_checker.IsValid()); -} - -template <typename Expected> -static void RunCodeNoCheck(CodeGenerator* codegen, - HGraph* graph, - const std::function<void(HGraph*)>& hook_before_codegen, - bool has_result, - Expected expected) { - SsaLivenessAnalysis liveness(graph, codegen); - PrepareForRegisterAllocation(graph).Run(); - liveness.Analyze(); - RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters(); - hook_before_codegen(graph); - InternalCodeAllocator allocator; - codegen->Compile(&allocator); - Run(allocator, *codegen, has_result, expected); -} - -template <typename Expected> -static void RunCode(CodeGenerator* codegen, - HGraph* graph, - std::function<void(HGraph*)> hook_before_codegen, - bool has_result, - Expected expected) { - ValidateGraph(graph); - RunCodeNoCheck(codegen, graph, hook_before_codegen, has_result, expected); -} - -template <typename Expected> -static void RunCode(CodegenTargetConfig target_config, - HGraph* graph, - std::function<void(HGraph*)> hook_before_codegen, - bool has_result, - Expected expected) { - CompilerOptions compiler_options; - std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options)); - RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected); -} - -#ifdef ART_ENABLE_CODEGEN_arm -CodeGenerator* create_codegen_arm(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const ArmInstructionSetFeatures> features_arm( - ArmInstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) TestCodeGeneratorARM(graph, - *features_arm.get(), - compiler_options); -} - -CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const ArmInstructionSetFeatures> features_arm( - ArmInstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) - TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options); -} -#endif - -#ifdef ART_ENABLE_CODEGEN_arm64 -CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64( - Arm64InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) arm64::CodeGeneratorARM64(graph, - *features_arm64.get(), - compiler_options); -} -#endif - -#ifdef ART_ENABLE_CODEGEN_x86 -CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const X86InstructionSetFeatures> features_x86( - X86InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options); -} -#endif - -#ifdef ART_ENABLE_CODEGEN_x86_64 -CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64( - X86_64InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) - x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options); -} -#endif - -#ifdef ART_ENABLE_CODEGEN_mips -CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const MipsInstructionSetFeatures> features_mips( - MipsInstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) - mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options); -} -#endif - -#ifdef ART_ENABLE_CODEGEN_mips64 -CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) { - std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64( - Mips64InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) - mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options); -} -#endif - // Return all combinations of ISA and code generator that are executable on // hardware, or on simulator, and that we'd like to test. static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h new file mode 100644 index 0000000000..cd954043f5 --- /dev/null +++ b/compiler/optimizing/codegen_test_utils.h @@ -0,0 +1,355 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_ +#define ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_ + +#include "arch/arm/instruction_set_features_arm.h" +#include "arch/arm/registers_arm.h" +#include "arch/arm64/instruction_set_features_arm64.h" +#include "arch/instruction_set.h" +#include "arch/mips/instruction_set_features_mips.h" +#include "arch/mips/registers_mips.h" +#include "arch/mips64/instruction_set_features_mips64.h" +#include "arch/mips64/registers_mips64.h" +#include "arch/x86/instruction_set_features_x86.h" +#include "arch/x86/registers_x86.h" +#include "arch/x86_64/instruction_set_features_x86_64.h" +#include "code_simulator_container.h" +#include "common_compiler_test.h" +#include "graph_checker.h" +#include "prepare_for_register_allocation.h" +#include "ssa_liveness_analysis.h" + +#ifdef ART_ENABLE_CODEGEN_arm +#include "code_generator_arm.h" +#include "code_generator_arm_vixl.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "code_generator_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +#include "code_generator_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +#include "code_generator_x86_64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips +#include "code_generator_mips.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 +#include "code_generator_mips64.h" +#endif + +namespace art { + +typedef CodeGenerator* (*CreateCodegenFn)(HGraph*, const CompilerOptions&); + +class CodegenTargetConfig { + public: + CodegenTargetConfig(InstructionSet isa, CreateCodegenFn create_codegen) + : isa_(isa), create_codegen_(create_codegen) { + } + InstructionSet GetInstructionSet() const { return isa_; } + CodeGenerator* CreateCodeGenerator(HGraph* graph, const CompilerOptions& compiler_options) { + return create_codegen_(graph, compiler_options); + } + + private: + CodegenTargetConfig() {} + InstructionSet isa_; + CreateCodegenFn create_codegen_; +}; + +#ifdef ART_ENABLE_CODEGEN_arm +// Provide our own codegen, that ensures the C calling conventions +// are preserved. Currently, ART and C do not match as R4 is caller-save +// in ART, and callee-save in C. Alternatively, we could use or write +// the stub that saves and restores all registers, but it is easier +// to just overwrite the code generator. +class TestCodeGeneratorARM : public arm::CodeGeneratorARM { + public: + TestCodeGeneratorARM(HGraph* graph, + const ArmInstructionSetFeatures& isa_features, + const CompilerOptions& compiler_options) + : arm::CodeGeneratorARM(graph, isa_features, compiler_options) { + AddAllocatedRegister(Location::RegisterLocation(arm::R6)); + AddAllocatedRegister(Location::RegisterLocation(arm::R7)); + } + + void SetupBlockedRegisters() const OVERRIDE { + arm::CodeGeneratorARM::SetupBlockedRegisters(); + blocked_core_registers_[arm::R4] = true; + blocked_core_registers_[arm::R6] = false; + blocked_core_registers_[arm::R7] = false; + } +}; + +// A way to test the VIXL32-based code generator on ARM. This will replace +// TestCodeGeneratorARM when the VIXL32-based backend replaces the existing one. +class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL { + public: + TestCodeGeneratorARMVIXL(HGraph* graph, + const ArmInstructionSetFeatures& isa_features, + const CompilerOptions& compiler_options) + : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) { + AddAllocatedRegister(Location::RegisterLocation(arm::R6)); + AddAllocatedRegister(Location::RegisterLocation(arm::R7)); + } + + void SetupBlockedRegisters() const OVERRIDE { + arm::CodeGeneratorARMVIXL::SetupBlockedRegisters(); + blocked_core_registers_[arm::R4] = true; + blocked_core_registers_[arm::R6] = false; + blocked_core_registers_[arm::R7] = false; + } +}; +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 { + public: + TestCodeGeneratorX86(HGraph* graph, + const X86InstructionSetFeatures& isa_features, + const CompilerOptions& compiler_options) + : x86::CodeGeneratorX86(graph, isa_features, compiler_options) { + // Save edi, we need it for getting enough registers for long multiplication. + AddAllocatedRegister(Location::RegisterLocation(x86::EDI)); + } + + void SetupBlockedRegisters() const OVERRIDE { + x86::CodeGeneratorX86::SetupBlockedRegisters(); + // ebx is a callee-save register in C, but caller-save for ART. + blocked_core_registers_[x86::EBX] = true; + + // Make edi available. + blocked_core_registers_[x86::EDI] = false; + } +}; +#endif + +class InternalCodeAllocator : public CodeAllocator { + public: + InternalCodeAllocator() : size_(0) { } + + virtual uint8_t* Allocate(size_t size) { + size_ = size; + memory_.reset(new uint8_t[size]); + return memory_.get(); + } + + size_t GetSize() const { return size_; } + uint8_t* GetMemory() const { return memory_.get(); } + + private: + size_t size_; + std::unique_ptr<uint8_t[]> memory_; + + DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator); +}; + +static bool CanExecuteOnHardware(InstructionSet target_isa) { + return (target_isa == kRuntimeISA) + // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2). + || (kRuntimeISA == kArm && target_isa == kThumb2); +} + +static bool CanExecute(InstructionSet target_isa) { + CodeSimulatorContainer simulator(target_isa); + return CanExecuteOnHardware(target_isa) || simulator.CanSimulate(); +} + +template <typename Expected> +inline static Expected SimulatorExecute(CodeSimulator* simulator, Expected (*f)()); + +template <> +inline bool SimulatorExecute<bool>(CodeSimulator* simulator, bool (*f)()) { + simulator->RunFrom(reinterpret_cast<intptr_t>(f)); + return simulator->GetCReturnBool(); +} + +template <> +inline int32_t SimulatorExecute<int32_t>(CodeSimulator* simulator, int32_t (*f)()) { + simulator->RunFrom(reinterpret_cast<intptr_t>(f)); + return simulator->GetCReturnInt32(); +} + +template <> +inline int64_t SimulatorExecute<int64_t>(CodeSimulator* simulator, int64_t (*f)()) { + simulator->RunFrom(reinterpret_cast<intptr_t>(f)); + return simulator->GetCReturnInt64(); +} + +template <typename Expected> +static void VerifyGeneratedCode(InstructionSet target_isa, + Expected (*f)(), + bool has_result, + Expected expected) { + ASSERT_TRUE(CanExecute(target_isa)) << "Target isa is not executable."; + + // Verify on simulator. + CodeSimulatorContainer simulator(target_isa); + if (simulator.CanSimulate()) { + Expected result = SimulatorExecute<Expected>(simulator.Get(), f); + if (has_result) { + ASSERT_EQ(expected, result); + } + } + + // Verify on hardware. + if (CanExecuteOnHardware(target_isa)) { + Expected result = f(); + if (has_result) { + ASSERT_EQ(expected, result); + } + } +} + +template <typename Expected> +static void Run(const InternalCodeAllocator& allocator, + const CodeGenerator& codegen, + bool has_result, + Expected expected) { + InstructionSet target_isa = codegen.GetInstructionSet(); + + typedef Expected (*fptr)(); + CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize()); + fptr f = reinterpret_cast<fptr>(allocator.GetMemory()); + if (target_isa == kThumb2) { + // For thumb we need the bottom bit set. + f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1); + } + VerifyGeneratedCode(target_isa, f, has_result, expected); +} + +static void ValidateGraph(HGraph* graph) { + GraphChecker graph_checker(graph); + graph_checker.Run(); + if (!graph_checker.IsValid()) { + for (const auto& error : graph_checker.GetErrors()) { + std::cout << error << std::endl; + } + } + ASSERT_TRUE(graph_checker.IsValid()); +} + +template <typename Expected> +static void RunCodeNoCheck(CodeGenerator* codegen, + HGraph* graph, + const std::function<void(HGraph*)>& hook_before_codegen, + bool has_result, + Expected expected) { + SsaLivenessAnalysis liveness(graph, codegen); + PrepareForRegisterAllocation(graph).Run(); + liveness.Analyze(); + RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters(); + hook_before_codegen(graph); + InternalCodeAllocator allocator; + codegen->Compile(&allocator); + Run(allocator, *codegen, has_result, expected); +} + +template <typename Expected> +static void RunCode(CodeGenerator* codegen, + HGraph* graph, + std::function<void(HGraph*)> hook_before_codegen, + bool has_result, + Expected expected) { + ValidateGraph(graph); + RunCodeNoCheck(codegen, graph, hook_before_codegen, has_result, expected); +} + +template <typename Expected> +static void RunCode(CodegenTargetConfig target_config, + HGraph* graph, + std::function<void(HGraph*)> hook_before_codegen, + bool has_result, + Expected expected) { + CompilerOptions compiler_options; + std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options)); + RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected); +} + +#ifdef ART_ENABLE_CODEGEN_arm +CodeGenerator* create_codegen_arm(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const ArmInstructionSetFeatures> features_arm( + ArmInstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) TestCodeGeneratorARM(graph, + *features_arm.get(), + compiler_options); +} + +CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const ArmInstructionSetFeatures> features_arm( + ArmInstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options); +} +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64( + Arm64InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) arm64::CodeGeneratorARM64(graph, + *features_arm64.get(), + compiler_options); +} +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const X86InstructionSetFeatures> features_x86( + X86InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options); +} +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64( + X86_64InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options); +} +#endif + +#ifdef ART_ENABLE_CODEGEN_mips +CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const MipsInstructionSetFeatures> features_mips( + MipsInstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options); +} +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 +CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64( + Mips64InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options); +} +#endif + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_ diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h index 21c3ae628a..ecb86875d6 100644 --- a/compiler/optimizing/common_arm.h +++ b/compiler/optimizing/common_arm.h @@ -146,6 +146,12 @@ inline vixl::aarch32::Register InputRegister(HInstruction* instr) { return InputRegisterAt(instr, 0); } +inline vixl::aarch32::DRegister DRegisterFromS(vixl::aarch32::SRegister s) { + vixl::aarch32::DRegister d = vixl::aarch32::DRegister(s.GetCode() / 2); + DCHECK(s.Is(d.GetLane(0)) || s.Is(d.GetLane(1))); + return d; +} + inline int32_t Int32ConstantFrom(HInstruction* instr) { if (instr->IsIntConstant()) { return instr->AsIntConstant()->GetValue(); diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index 3973985338..5539413aad 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -57,14 +57,18 @@ static bool IsIntAndGet(HInstruction* instruction, int64_t* value) { return false; } -/** Returns b^e for b,e >= 1. */ -static int64_t IntPow(int64_t b, int64_t e) { +/** Returns b^e for b,e >= 1. Sets overflow if arithmetic wrap-around occurred. */ +static int64_t IntPow(int64_t b, int64_t e, /*out*/ bool* overflow) { DCHECK_GE(b, 1); DCHECK_GE(e, 1); int64_t pow = 1; while (e) { if (e & 1) { + int64_t oldpow = pow; pow *= b; + if (pow < oldpow) { + *overflow = true; + } } e >>= 1; b *= b; @@ -1020,20 +1024,27 @@ bool InductionVarRange::GenerateLastValueGeometric(HInductionVarAnalysis::Induct HInstruction* opb = nullptr; if (GenerateCode(info->op_a, nullptr, graph, block, &opa, false, false) && GenerateCode(info->op_b, nullptr, graph, block, &opb, false, false)) { - // Compute f ^ m for known maximum index value m. - int64_t fpow = IntPow(f, m); if (graph != nullptr) { - DCHECK(info->operation == HInductionVarAnalysis::kMul || - info->operation == HInductionVarAnalysis::kDiv); Primitive::Type type = info->type; + // Compute f ^ m for known maximum index value m. + bool overflow = false; + int64_t fpow = IntPow(f, m, &overflow); + if (info->operation == HInductionVarAnalysis::kDiv) { + // For division, any overflow truncates to zero. + if (overflow || (type != Primitive::kPrimLong && !CanLongValueFitIntoInt(fpow))) { + fpow = 0; + } + } else if (type != Primitive::kPrimLong) { + // For multiplication, okay to truncate to required precision. + DCHECK(info->operation == HInductionVarAnalysis::kMul); + fpow = static_cast<int32_t>(fpow); + } + // Generate code. if (fpow == 0) { // Special case: repeated mul/div always yields zero. *result = graph->GetConstant(type, 0); } else { // Last value: a * f ^ m + b or a * f ^ -m + b. - if (type != Primitive::kPrimLong) { - fpow = static_cast<int32_t>(fpow); // okay to truncate - } HInstruction* e = nullptr; if (info->operation == HInductionVarAnalysis::kMul) { e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow)); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 22f0646fd0..7772e8f973 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -198,9 +198,9 @@ static uint32_t FindMethodIndexIn(ArtMethod* method, } static dex::TypeIndex FindClassIndexIn(mirror::Class* cls, - const DexCompilationUnit& compilation_unit) + const DexFile& dex_file, + Handle<mirror::DexCache> dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = *compilation_unit.GetDexFile(); dex::TypeIndex index; if (cls->GetDexCache() == nullptr) { DCHECK(cls->IsArrayClass()) << cls->PrettyClass(); @@ -209,19 +209,22 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls, DCHECK(cls->IsProxyClass()) << cls->PrettyClass(); // TODO: deal with proxy classes. } else if (IsSameDexFile(cls->GetDexFile(), dex_file)) { - DCHECK_EQ(cls->GetDexCache(), compilation_unit.GetDexCache().Get()); + DCHECK_EQ(cls->GetDexCache(), dex_cache.Get()); index = cls->GetDexTypeIndex(); + // Update the dex cache to ensure the class is in. The generated code will + // consider it is. We make it safe by updating the dex cache, as other + // dex files might also load the class, and there is no guarantee the dex + // cache of the dex file of the class will be updated. + if (dex_cache->GetResolvedType(index) == nullptr) { + dex_cache->SetResolvedType(index, cls); + } } else { index = cls->FindTypeIndexInOtherDexFile(dex_file); - // We cannot guarantee the entry will resolve to the same class, + // We cannot guarantee the entry in the dex cache will resolve to the same class, // as there may be different class loaders. So only return the index if it's - // the right class already resolved with the class loader. - if (index.IsValid()) { - ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType( - index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get()); - if (resolved != cls) { - index = dex::TypeIndex::Invalid(); - } + // the right class in the dex cache already. + if (index.IsValid() && dex_cache->GetResolvedType(index) != cls) { + index = dex::TypeIndex::Invalid(); } } @@ -448,8 +451,9 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction, DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface()) << invoke_instruction->DebugName(); + const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile(); dex::TypeIndex class_index = FindClassIndexIn( - GetMonomorphicType(classes), caller_compilation_unit_); + GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache()); if (!class_index.IsValid()) { VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method) << " from inline cache is not inlined because its class is not" @@ -492,7 +496,6 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction, // Run type propagation to get the guard typed, and eventually propagate the // type of the receiver. ReferenceTypePropagation rtp_fixup(graph_, - outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, /* is_first_run */ false); @@ -583,6 +586,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker(); PointerSize pointer_size = class_linker->GetImagePointerSize(); + const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile(); bool all_targets_inlined = true; bool one_target_inlined = false; @@ -604,7 +608,8 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, HInstruction* cursor = invoke_instruction->GetPrevious(); HBasicBlock* bb_cursor = invoke_instruction->GetBlock(); - dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_); + dex::TypeIndex class_index = FindClassIndexIn( + handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache()); HInstruction* return_replacement = nullptr; if (!class_index.IsValid() || !TryBuildAndInline(invoke_instruction, @@ -660,7 +665,6 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, // Run type propagation to get the guards typed. ReferenceTypePropagation rtp_fixup(graph_, - outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, /* is_first_run */ false); @@ -855,7 +859,6 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( // Run type propagation to get the guard typed. ReferenceTypePropagation rtp_fixup(graph_, - outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, /* is_first_run */ false); @@ -924,7 +927,6 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, // Actual return value has a more specific type than the method's declared // return type. Run RTP again on the outer graph to propagate it. ReferenceTypePropagation(graph_, - outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, /* is_first_run */ false).Run(); @@ -1177,11 +1179,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex /* dex_pc */ 0); if (iget->GetType() == Primitive::kPrimNot) { // Use the same dex_cache that we used for field lookup as the hint_dex_cache. - ReferenceTypePropagation rtp(graph_, - outer_compilation_unit_.GetClassLoader(), - dex_cache, - handles_, - /* is_first_run */ false); + ReferenceTypePropagation rtp(graph_, dex_cache, handles_, /* is_first_run */ false); rtp.Visit(iget); } return iget; @@ -1227,7 +1225,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, resolved_method->GetDeclaringClass()->GetClassLoader())); DexCompilationUnit dex_compilation_unit( - class_loader, + class_loader.ToJObject(), class_linker, callee_dex_file, code_item, @@ -1343,7 +1341,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, // are more specific than the declared ones, run RTP again on the inner graph. if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) { ReferenceTypePropagation(callee_graph, - outer_compilation_unit_.GetClassLoader(), dex_compilation_unit.GetDexCache(), handles_, /* is_first_run */ false).Run(); diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 3d911d77ba..cac385ce3c 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -668,10 +668,11 @@ static InvokeType GetInvokeTypeFromOpCode(Instruction::Code opcode) { ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) { ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<2> hs(soa.Self()); + StackHandleScope<3> hs(soa.Self()); ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker(); - Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader(); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader()))); Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass())); // We fetch the referenced class eagerly (that is, the class pointed by in the MethodId // at method_idx), as `CanAccessResolvedMethod` expects it be be in the dex cache. @@ -1283,7 +1284,9 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio static mirror::Class* GetClassFrom(CompilerDriver* driver, const DexCompilationUnit& compilation_unit) { ScopedObjectAccess soa(Thread::Current()); - Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader(); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(compilation_unit.GetClassLoader()))); Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache(); return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit); @@ -1299,9 +1302,10 @@ mirror::Class* HInstructionBuilder::GetCompilingClass() const { bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const { ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<2> hs(soa.Self()); + StackHandleScope<3> hs(soa.Self()); Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache(); - Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader(); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader()))); Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass( soa, dex_cache, class_loader, type_index, dex_compilation_unit_))); Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass())); @@ -1339,8 +1343,10 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, uint16_t field_index = instruction.VRegB_21c(); ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<3> hs(soa.Self()); Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache(); - Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader(); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader()))); ArtField* resolved_field = compiler_driver_->ResolveField( soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true); @@ -1351,7 +1357,6 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, return true; } - StackHandleScope<2> hs(soa.Self()); Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType(); Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache(); Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass())); @@ -1630,7 +1635,9 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, const DexCompilationUnit* compilation_unit = outer ? outer_compilation_unit_ : dex_compilation_unit_; const DexFile& dex_file = *compilation_unit->GetDexFile(); - Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader(); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader()))); Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass( soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit)); @@ -1685,9 +1692,17 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction, } } -bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const { +bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, + Handle<mirror::DexCache> dex_cache, + bool* finalizable) const { return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks( - LookupReferrerClass(), LookupResolvedType(type_index, *dex_compilation_unit_), finalizable); + dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index, finalizable); +} + +bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const { + ScopedObjectAccess soa(Thread::Current()); + Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache(); + return NeedsAccessCheck(type_index, dex_cache, finalizable); } bool HInstructionBuilder::CanDecodeQuickenedInfo() const { @@ -2727,18 +2742,4 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, return true; } // NOLINT(readability/fn_size) -ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType( - dex::TypeIndex type_index, - const DexCompilationUnit& compilation_unit) const { - return ClassLinker::LookupResolvedType( - type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get()); -} - -ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const { - // TODO: Cache the result in a Handle<mirror::Class>. - const DexFile::MethodId& method_id = - dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex()); - return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_); -} - } // namespace art diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h index 6e3b078dbb..5efe95094c 100644 --- a/compiler/optimizing/instruction_builder.h +++ b/compiler/optimizing/instruction_builder.h @@ -103,8 +103,11 @@ class HInstructionBuilder : public ValueObject { // Returns whether the current method needs access check for the type. // Output parameter finalizable is set to whether the type is finalizable. - bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const + bool NeedsAccessCheck(dex::TypeIndex type_index, + Handle<mirror::DexCache> dex_cache, + /*out*/bool* finalizable) const REQUIRES_SHARED(Locks::mutator_lock_); + bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const; template<typename T> void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); @@ -287,12 +290,6 @@ class HInstructionBuilder : public ValueObject { // not be resolved. ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type); - ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_index, - const DexCompilationUnit& compilation_unit) const - REQUIRES_SHARED(Locks::mutator_lock_); - - ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_); - ArenaAllocator* const arena_; HGraph* const graph_; VariableSizedHandleScope* handles_; diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 1e73cf67df..6425e1313f 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -31,6 +31,9 @@ class DexFile; static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U; static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000); +static constexpr uint32_t kNanFloat = 0x7fc00000U; +static constexpr uint64_t kNanDouble = 0x7ff8000000000000; + // Recognize intrinsics from HInvoke nodes. class IntrinsicsRecognizer : public HOptimization { public: diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 1a10173ed7..70a3d38c13 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -40,10 +40,12 @@ using helpers::LocationFrom; using helpers::LowRegisterFrom; using helpers::LowSRegisterFrom; using helpers::OutputDRegister; +using helpers::OutputSRegister; using helpers::OutputRegister; using helpers::OutputVRegister; using helpers::RegisterFrom; using helpers::SRegisterFrom; +using helpers::DRegisterFromS; using namespace vixl::aarch32; // NOLINT(build/namespaces) @@ -462,6 +464,214 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) { GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); } +static void GenMinMaxFloat(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) { + Location op1_loc = invoke->GetLocations()->InAt(0); + Location op2_loc = invoke->GetLocations()->InAt(1); + Location out_loc = invoke->GetLocations()->Out(); + + // Optimization: don't generate any code if inputs are the same. + if (op1_loc.Equals(op2_loc)) { + DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder. + return; + } + + vixl32::SRegister op1 = SRegisterFrom(op1_loc); + vixl32::SRegister op2 = SRegisterFrom(op2_loc); + vixl32::SRegister out = OutputSRegister(invoke); + UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); + const vixl32::Register temp1 = temps.Acquire(); + vixl32::Register temp2 = RegisterFrom(invoke->GetLocations()->GetTemp(0)); + vixl32::Label nan, done; + + DCHECK(op1.Is(out)); + + __ Vcmp(op1, op2); + __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); + __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling. + + // op1 <> op2 + vixl32::ConditionType cond = is_min ? gt : lt; + { + ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(), + 2 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(cond); + __ vmov(cond, F32, out, op2); + } + __ B(ne, &done, /* far_target */ false); // for <>(not equal), we've done min/max calculation. + + // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0). + __ Vmov(temp1, op1); + __ Vmov(temp2, op2); + if (is_min) { + __ Orr(temp1, temp1, temp2); + } else { + __ And(temp1, temp1, temp2); + } + __ Vmov(out, temp1); + __ B(&done); + + // handle NaN input. + __ Bind(&nan); + __ Movt(temp1, High16Bits(kNanFloat)); // 0x7FC0xxxx is a NaN. + __ Vmov(out, temp1); + + __ Bind(&done); +} + +static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { + LocationSummary* locations = new (arena) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); +} + +void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) { + CreateFPFPToFPLocations(arena_, invoke); + invoke->GetLocations()->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) { + GenMinMaxFloat(invoke, /* is_min */ true, GetAssembler()); +} + +void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) { + CreateFPFPToFPLocations(arena_, invoke); + invoke->GetLocations()->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) { + GenMinMaxFloat(invoke, /* is_min */ false, GetAssembler()); +} + +static void GenMinMaxDouble(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) { + Location op1_loc = invoke->GetLocations()->InAt(0); + Location op2_loc = invoke->GetLocations()->InAt(1); + Location out_loc = invoke->GetLocations()->Out(); + + // Optimization: don't generate any code if inputs are the same. + if (op1_loc.Equals(op2_loc)) { + DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in. + return; + } + + vixl32::DRegister op1 = DRegisterFrom(op1_loc); + vixl32::DRegister op2 = DRegisterFrom(op2_loc); + vixl32::DRegister out = OutputDRegister(invoke); + vixl32::Label handle_nan_eq, done; + + DCHECK(op1.Is(out)); + + __ Vcmp(op1, op2); + __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); + __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling. + + // op1 <> op2 + vixl32::ConditionType cond = is_min ? gt : lt; + { + ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(), + 2 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(cond); + __ vmov(cond, F64, out, op2); + } + __ B(ne, &done, /* far_target */ false); // for <>(not equal), we've done min/max calculation. + + // handle op1 == op2, max(+0.0,-0.0). + if (!is_min) { + __ Vand(F64, out, op1, op2); + __ B(&done); + } + + // handle op1 == op2, min(+0.0,-0.0), NaN input. + __ Bind(&handle_nan_eq); + __ Vorr(F64, out, op1, op2); // assemble op1/-0.0/NaN. + + __ Bind(&done); +} + +void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) { + CreateFPFPToFPLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) { + GenMinMaxDouble(invoke, /* is_min */ true , GetAssembler()); +} + +void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) { + CreateFPFPToFPLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) { + GenMinMaxDouble(invoke, /* is_min */ false, GetAssembler()); +} + +static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) { + Location op1_loc = invoke->GetLocations()->InAt(0); + Location op2_loc = invoke->GetLocations()->InAt(1); + Location out_loc = invoke->GetLocations()->Out(); + + // Optimization: don't generate any code if inputs are the same. + if (op1_loc.Equals(op2_loc)) { + DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder. + return; + } + + vixl32::Register op1_lo = LowRegisterFrom(op1_loc); + vixl32::Register op1_hi = HighRegisterFrom(op1_loc); + vixl32::Register op2_lo = LowRegisterFrom(op2_loc); + vixl32::Register op2_hi = HighRegisterFrom(op2_loc); + vixl32::Register out_lo = LowRegisterFrom(out_loc); + vixl32::Register out_hi = HighRegisterFrom(out_loc); + UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); + const vixl32::Register temp = temps.Acquire(); + + DCHECK(op1_lo.Is(out_lo)); + DCHECK(op1_hi.Is(out_hi)); + + // Compare op1 >= op2, or op1 < op2. + __ Cmp(out_lo, op2_lo); + __ Sbcs(temp, out_hi, op2_hi); + + // Now GE/LT condition code is correct for the long comparison. + { + vixl32::ConditionType cond = is_min ? ge : lt; + ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(), + 3 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ itt(cond); + __ mov(cond, out_lo, op2_lo); + __ mov(cond, out_hi, op2_hi); + } +} + +static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) { + LocationSummary* locations = new (arena) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::SameAsFirstInput()); +} + +void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) { + CreateLongLongToLongLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) { + GenMinMaxLong(invoke, /* is_min */ true, GetAssembler()); +} + +void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) { + CreateLongLongToLongLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) { + GenMinMaxLong(invoke, /* is_min */ false, GetAssembler()); +} + static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) { vixl32::Register op1 = InputRegisterAt(invoke, 0); vixl32::Register op2 = InputRegisterAt(invoke, 1); @@ -2778,12 +2988,6 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) { __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0)); } -UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinDoubleDouble) -UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinFloatFloat) -UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble) -UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxFloatFloat) -UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinLongLong) -UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxLongLong) UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe? UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe? UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure. diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index d15145e673..76900f23a9 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -1354,13 +1354,15 @@ std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& return os; } -void HInstruction::MoveBefore(HInstruction* cursor) { - DCHECK(!IsPhi()); - DCHECK(!IsControlFlow()); - DCHECK(CanBeMoved() || - // HShouldDeoptimizeFlag can only be moved by CHAGuardOptimization. - IsShouldDeoptimizeFlag()); - DCHECK(!cursor->IsPhi()); +void HInstruction::MoveBefore(HInstruction* cursor, bool do_checks) { + if (do_checks) { + DCHECK(!IsPhi()); + DCHECK(!IsControlFlow()); + DCHECK(CanBeMoved() || + // HShouldDeoptimizeFlag can only be moved by CHAGuardOptimization. + IsShouldDeoptimizeFlag()); + DCHECK(!cursor->IsPhi()); + } next_->previous_ = previous_; if (previous_ != nullptr) { diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index f0ea9e20e6..acf14aa726 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2065,8 +2065,8 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { other->ReplaceInput(this, use_index); } - // Move `this` instruction before `cursor`. - void MoveBefore(HInstruction* cursor); + // Move `this` instruction before `cursor` + void MoveBefore(HInstruction* cursor, bool do_checks = true); // Move `this` before its first user and out of any loops. If there is no // out-of-loop user that dominates all other users, move the instruction diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index dad87e3d9e..1ab671022b 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -90,6 +90,7 @@ #include "reference_type_propagation.h" #include "register_allocator_linear_scan.h" #include "select_generator.h" +#include "scheduler.h" #include "sharpening.h" #include "side_effects_analysis.h" #include "ssa_builder.h" @@ -305,7 +306,7 @@ class OptimizingCompiler FINAL : public Compiler { InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) const OVERRIDE; @@ -374,7 +375,7 @@ class OptimizingCompiler FINAL : public Compiler { InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, ArtMethod* method, @@ -658,10 +659,13 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, new (arena) arm64::InstructionSimplifierArm64(graph, stats); SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + HInstructionScheduling* scheduling = + new (arena) HInstructionScheduling(graph, instruction_set); HOptimization* arm64_optimizations[] = { simplifier, side_effects, - gvn + gvn, + scheduling, }; RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer); break; @@ -871,7 +875,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> class_loader, + jobject class_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, ArtMethod* method, @@ -942,8 +946,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, const uint8_t* interpreter_metadata = nullptr; if (method == nullptr) { ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader>(class_loader))); method = compiler_driver->ResolveMethod( - soa, dex_cache, class_loader, &dex_compilation_unit, method_idx, invoke_type); + soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type); } // For AOT compilation, we may not get a method, for example if its class is erroneous. // JIT should always have a method. @@ -952,6 +959,16 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, graph->SetArtMethod(method); ScopedObjectAccess soa(Thread::Current()); interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize()); + dex::TypeIndex type_index = method->GetDeclaringClass()->GetDexTypeIndex(); + + // Update the dex cache if the type is not in it yet. Note that under AOT, + // the verifier must have set it, but under JIT, there's no guarantee, as we + // don't necessarily run the verifier. + // The compiler and the compiler driver assume the compiling class is + // in the dex cache. + if (dex_cache->GetResolvedType(type_index) == nullptr) { + dex_cache->SetResolvedType(type_index, method->GetDeclaringClass()); + } } std::unique_ptr<CodeGenerator> codegen( @@ -1031,7 +1048,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - Handle<mirror::ClassLoader> jclass_loader, + jobject jclass_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) const { CompilerDriver* compiler_driver = GetCompilerDriver(); @@ -1126,6 +1143,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); DCHECK(method->IsCompilable()); + jobject jclass_loader = class_loader.ToJObject(); const DexFile* dex_file = method->GetDexFile(); const uint16_t class_def_idx = method->GetClassDefIndex(); const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); @@ -1149,7 +1167,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, invoke_type, class_def_idx, method_idx, - class_loader, + jclass_loader, *dex_file, dex_cache, method, diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 58d90176cd..bf963b8996 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -64,6 +64,9 @@ LiveInterval* BuildInterval(const size_t ranges[][2], void RemoveSuspendChecks(HGraph* graph) { for (HBasicBlock* block : graph->GetBlocks()) { if (block != nullptr) { + if (block->GetLoopInformation() != nullptr) { + block->GetLoopInformation()->SetSuspendCheck(nullptr); + } for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); if (current->IsSuspendCheck()) { diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index be4857a49a..b02f2509ab 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -66,13 +66,11 @@ ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetThrowabl class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { public: RTPVisitor(HGraph* graph, - Handle<mirror::ClassLoader> class_loader, Handle<mirror::DexCache> hint_dex_cache, HandleCache* handle_cache, ArenaVector<HInstruction*>* worklist, bool is_first_run) : HGraphDelegateVisitor(graph), - class_loader_(class_loader), hint_dex_cache_(hint_dex_cache), handle_cache_(handle_cache), worklist_(worklist), @@ -104,7 +102,6 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { bool is_exact); private: - Handle<mirror::ClassLoader> class_loader_; Handle<mirror::DexCache> hint_dex_cache_; HandleCache* handle_cache_; ArenaVector<HInstruction*>* worklist_; @@ -112,13 +109,11 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { }; ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph, - Handle<mirror::ClassLoader> class_loader, Handle<mirror::DexCache> hint_dex_cache, VariableSizedHandleScope* handles, bool is_first_run, const char* name) : HOptimization(graph, name), - class_loader_(class_loader), hint_dex_cache_(hint_dex_cache), handle_cache_(handles), worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)), @@ -153,12 +148,7 @@ void ReferenceTypePropagation::ValidateTypes() { } void ReferenceTypePropagation::Visit(HInstruction* instruction) { - RTPVisitor visitor(graph_, - class_loader_, - hint_dex_cache_, - &handle_cache_, - &worklist_, - is_first_run_); + RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_); instruction->Accept(&visitor); } @@ -332,12 +322,7 @@ void ReferenceTypePropagation::Run() { } void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { - RTPVisitor visitor(graph_, - class_loader_, - hint_dex_cache_, - &handle_cache_, - &worklist_, - is_first_run_); + RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_); // Handle Phis first as there might be instructions in the same block who depend on them. for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { VisitPhi(it.Current()->AsPhi()); @@ -557,10 +542,9 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* DCHECK_EQ(instr->GetType(), Primitive::kPrimNot); ScopedObjectAccess soa(Thread::Current()); - ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_); - ObjPtr<mirror::Class> klass = - ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get()); - SetClassAsTypeInfo(instr, klass, is_exact); + mirror::DexCache* dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_); + // Get type from dex cache assuming it was populated by the verifier. + SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact); } void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) { @@ -573,13 +557,25 @@ void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) { SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true); } +static mirror::Class* GetClassFromDexCache(Thread* self, + const DexFile& dex_file, + dex::TypeIndex type_idx, + Handle<mirror::DexCache> hint_dex_cache) + REQUIRES_SHARED(Locks::mutator_lock_) { + mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache); + // Get type from dex cache assuming it was populated by the verifier. + return dex_cache->GetResolvedType(type_idx); +} + void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) { // We check if the existing type is valid: the inliner may have set it. if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) { - UpdateReferenceTypeInfo(instr, - instr->GetTypeIndex(), - instr->GetDexFile(), - /* is_exact */ false); + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(), + instr->GetDexFile(), + instr->GetTypeIndex(), + hint_dex_cache_); + SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false); } } diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index 215e96786b..4663471729 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -33,7 +33,6 @@ namespace art { class ReferenceTypePropagation : public HOptimization { public: ReferenceTypePropagation(HGraph* graph, - Handle<mirror::ClassLoader> class_loader, Handle<mirror::DexCache> hint_dex_cache, VariableSizedHandleScope* handles, bool is_first_run, @@ -106,8 +105,6 @@ class ReferenceTypePropagation : public HOptimization { void ValidateTypes(); - Handle<mirror::ClassLoader> class_loader_; - // Note: hint_dex_cache_ is usually, but not necessarily, the dex cache associated with // graph_->GetDexFile(). Since we may look up also in other dex files, it's used only // as a hint, to reduce the number of calls to the costly ClassLinker::FindDexCache(). diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc index 84a4bab1a9..b061c871b0 100644 --- a/compiler/optimizing/reference_type_propagation_test.cc +++ b/compiler/optimizing/reference_type_propagation_test.cc @@ -38,7 +38,6 @@ class ReferenceTypePropagationTest : public CommonCompilerTest { void SetupPropagation(VariableSizedHandleScope* handles) { graph_->InitializeInexactObjectRTI(handles); propagation_ = new (&allocator_) ReferenceTypePropagation(graph_, - Handle<mirror::ClassLoader>(), Handle<mirror::DexCache>(), handles, true, diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc new file mode 100644 index 0000000000..d65d20cf43 --- /dev/null +++ b/compiler/optimizing/scheduler.cc @@ -0,0 +1,610 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <string> + +#include "prepare_for_register_allocation.h" +#include "scheduler.h" + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "scheduler_arm64.h" +#endif + +namespace art { + +void SchedulingGraph::AddDependency(SchedulingNode* node, + SchedulingNode* dependency, + bool is_data_dependency) { + if (node == nullptr || dependency == nullptr) { + // A `nullptr` node indicates an instruction out of scheduling range (eg. in + // an other block), so we do not need to add a dependency edge to the graph. + return; + } + + if (is_data_dependency) { + if (!HasImmediateDataDependency(node, dependency)) { + node->AddDataPredecessor(dependency); + } + } else if (!HasImmediateOtherDependency(node, dependency)) { + node->AddOtherPredecessor(dependency); + } +} + +static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) { + // Read after write. + if (node.MayDependOn(other)) { + return true; + } + + // Write after read. + if (other.MayDependOn(node)) { + return true; + } + + // Memory write after write. + if (node.DoesAnyWrite() && other.DoesAnyWrite()) { + return true; + } + + return false; +} + + +// Check whether `node` depends on `other`, taking into account `SideEffect` +// information and `CanThrow` information. +static bool HasSideEffectDependency(const HInstruction* node, const HInstruction* other) { + if (MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) { + return true; + } + + if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) { + return true; + } + + if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) { + return true; + } + + if (other->CanThrow() && node->CanThrow()) { + return true; + } + + // Check side-effect dependency between ArrayGet and BoundsCheck. + if (node->IsArrayGet() && other->IsBoundsCheck() && node->InputAt(1) == other) { + return true; + } + + return false; +} + +void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) { + SchedulingNode* instruction_node = GetNode(instruction); + + // Define-use dependencies. + for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { + AddDataDependency(GetNode(use.GetUser()), instruction_node); + } + + // Scheduling barrier dependencies. + DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_); + if (contains_scheduling_barrier_) { + // A barrier depends on instructions after it. And instructions before the + // barrier depend on it. + for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) { + SchedulingNode* other_node = GetNode(other); + bool other_is_barrier = other_node->IsSchedulingBarrier(); + if (is_scheduling_barrier || other_is_barrier) { + AddOtherDependency(other_node, instruction_node); + } + if (other_is_barrier) { + // This other scheduling barrier guarantees ordering of instructions after + // it, so avoid creating additional useless dependencies in the graph. + // For example if we have + // instr_1 + // barrier_2 + // instr_3 + // barrier_4 + // instr_5 + // we only create the following non-data dependencies + // 1 -> 2 + // 2 -> 3 + // 2 -> 4 + // 3 -> 4 + // 4 -> 5 + // and do not create + // 1 -> 4 + // 2 -> 5 + // Note that in this example we could also avoid creating the dependency + // `2 -> 4`. But if we remove `instr_3` that dependency is required to + // order the barriers. So we generate it to avoid a special case. + break; + } + } + } + + // Side effect dependencies. + if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) { + for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) { + SchedulingNode* other_node = GetNode(other); + if (other_node->IsSchedulingBarrier()) { + // We have reached a scheduling barrier so we can stop further + // processing. + DCHECK(HasImmediateOtherDependency(other_node, instruction_node)); + break; + } + if (HasSideEffectDependency(other, instruction)) { + AddOtherDependency(other_node, instruction_node); + } + } + } + + // Environment dependencies. + // We do not need to process those if the instruction is a scheduling barrier, + // since the barrier already has non-data dependencies on all following + // instructions. + if (!is_scheduling_barrier) { + for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { + // Note that here we could stop processing if the environment holder is + // across a scheduling barrier. But checking this would likely require + // more work than simply iterating through environment uses. + AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node); + } + } +} + +bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node, + const SchedulingNode* other) const { + return ContainsElement(node->GetDataPredecessors(), other); +} + +bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction, + const HInstruction* other_instruction) const { + const SchedulingNode* node = GetNode(instruction); + const SchedulingNode* other = GetNode(other_instruction); + if (node == nullptr || other == nullptr) { + // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their + // corresponding SchedulingNode in the graph, and tell whether there is a dependency. + // Otherwise there is no dependency from SchedulingGraph's perspective, for example, + // instruction and other_instruction are in different basic blocks. + return false; + } + return HasImmediateDataDependency(node, other); +} + +bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node, + const SchedulingNode* other) const { + return ContainsElement(node->GetOtherPredecessors(), other); +} + +bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction, + const HInstruction* other_instruction) const { + const SchedulingNode* node = GetNode(instruction); + const SchedulingNode* other = GetNode(other_instruction); + if (node == nullptr || other == nullptr) { + // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their + // corresponding SchedulingNode in the graph, and tell whether there is a dependency. + // Otherwise there is no dependency from SchedulingGraph's perspective, for example, + // instruction and other_instruction are in different basic blocks. + return false; + } + return HasImmediateOtherDependency(node, other); +} + +static const std::string InstructionTypeId(const HInstruction* instruction) { + std::string id; + Primitive::Type type = instruction->GetType(); + if (type == Primitive::kPrimNot) { + id.append("l"); + } else { + id.append(Primitive::Descriptor(instruction->GetType())); + } + // Use lower-case to be closer to the `HGraphVisualizer` output. + id[0] = std::tolower(id[0]); + id.append(std::to_string(instruction->GetId())); + return id; +} + +// Ideally we would reuse the graph visualizer code, but it is not available +// from here and it is not worth moving all that code only for our use. +static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) { + const HInstruction* instruction = node->GetInstruction(); + // Use the instruction typed id as the node identifier. + std::string instruction_id = InstructionTypeId(instruction); + output << instruction_id << "[shape=record, label=\"" + << instruction_id << ' ' << instruction->DebugName() << " ["; + // List the instruction's inputs in its description. When visualizing the + // graph this helps differentiating data inputs from other dependencies. + const char* seperator = ""; + for (const HInstruction* input : instruction->GetInputs()) { + output << seperator << InstructionTypeId(input); + seperator = ","; + } + output << "]"; + // Other properties of the node. + output << "\\ninternal_latency: " << node->GetInternalLatency(); + output << "\\ncritical_path: " << node->GetCriticalPath(); + if (node->IsSchedulingBarrier()) { + output << "\\n(barrier)"; + } + output << "\"];\n"; + // We want program order to go from top to bottom in the graph output, so we + // reverse the edges and specify `dir=back`. + for (const SchedulingNode* predecessor : node->GetDataPredecessors()) { + const HInstruction* predecessor_instruction = predecessor->GetInstruction(); + output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n " + << "[label=\"" << predecessor->GetLatency() << "\",dir=back]\n"; + } + for (const SchedulingNode* predecessor : node->GetOtherPredecessors()) { + const HInstruction* predecessor_instruction = predecessor->GetInstruction(); + output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n " + << "[dir=back,color=blue]\n"; + } +} + +void SchedulingGraph::DumpAsDotGraph(const std::string& description, + const ArenaVector<SchedulingNode*>& initial_candidates) { + // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that + // we should move this dotty graph dump feature to visualizer, and have a compiler option for it. + std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app); + // Description of this graph, as a comment. + output << "// " << description << "\n"; + // Start the dot graph. Use an increasing index for easier differentiation. + output << "digraph G {\n"; + for (const auto& entry : nodes_map_) { + DumpAsDotNode(output, entry.second); + } + // Create a fake 'end_of_scheduling' node to help visualization of critical_paths. + for (auto node : initial_candidates) { + const HInstruction* instruction = node->GetInstruction(); + output << InstructionTypeId(instruction) << ":s -> end_of_scheduling:n " + << "[label=\"" << node->GetLatency() << "\",dir=back]\n"; + } + // End of the dot graph. + output << "}\n"; + output.close(); +} + +SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition( + ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const { + // Schedule condition inputs that can be materialized immediately before their use. + // In following example, after we've scheduled HSelect, we want LessThan to be scheduled + // immediately, because it is a materialized condition, and will be emitted right before HSelect + // in codegen phase. + // + // i20 HLessThan [...] HLessThan HAdd HAdd + // i21 HAdd [...] ===> | | | + // i22 HAdd [...] +----------+---------+ + // i23 HSelect [i21, i22, i20] HSelect + + if (prev_select_ == nullptr) { + return nullptr; + } + + const HInstruction* instruction = prev_select_->GetInstruction(); + const HCondition* condition = nullptr; + DCHECK(instruction != nullptr); + + if (instruction->IsIf()) { + condition = instruction->AsIf()->InputAt(0)->AsCondition(); + } else if (instruction->IsSelect()) { + condition = instruction->AsSelect()->GetCondition()->AsCondition(); + } + + SchedulingNode* condition_node = (condition != nullptr) ? graph.GetNode(condition) : nullptr; + + if ((condition_node != nullptr) && + condition->HasOnlyOneNonEnvironmentUse() && + ContainsElement(*nodes, condition_node)) { + DCHECK(!condition_node->HasUnscheduledSuccessors()); + // Remove the condition from the list of candidates and schedule it. + RemoveElement(*nodes, condition_node); + return condition_node; + } + + return nullptr; +} + +SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode( + ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) { + DCHECK(!nodes->empty()); + SchedulingNode* select_node = nullptr; + + // Optimize for materialized condition and its emit before use scenario. + select_node = SelectMaterializedCondition(nodes, graph); + + if (select_node == nullptr) { + // Get highest priority node based on critical path information. + select_node = (*nodes)[0]; + size_t select = 0; + for (size_t i = 1, e = nodes->size(); i < e; i++) { + SchedulingNode* check = (*nodes)[i]; + SchedulingNode* candidate = (*nodes)[select]; + select_node = GetHigherPrioritySchedulingNode(candidate, check); + if (select_node == check) { + select = i; + } + } + DeleteNodeAtIndex(nodes, select); + } + + prev_select_ = select_node; + return select_node; +} + +SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingNode( + SchedulingNode* candidate, SchedulingNode* check) const { + uint32_t candidate_path = candidate->GetCriticalPath(); + uint32_t check_path = check->GetCriticalPath(); + // First look at the critical_path. + if (check_path != candidate_path) { + return check_path < candidate_path ? check : candidate; + } + // If both critical paths are equal, schedule instructions with a higher latency + // first in program order. + return check->GetLatency() < candidate->GetLatency() ? check : candidate; +} + +void HScheduler::Schedule(HGraph* graph) { + for (HBasicBlock* block : graph->GetReversePostOrder()) { + if (IsSchedulable(block)) { + Schedule(block); + } + } +} + +void HScheduler::Schedule(HBasicBlock* block) { + ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler)); + + // Build the scheduling graph. + scheduling_graph_.Clear(); + for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* instruction = it.Current(); + SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction)); + CalculateLatency(node); + scheduling_nodes.push_back(node); + } + + if (scheduling_graph_.Size() <= 1) { + scheduling_graph_.Clear(); + return; + } + + cursor_ = block->GetLastInstruction(); + + // Find the initial candidates for scheduling. + candidates_.clear(); + for (SchedulingNode* node : scheduling_nodes) { + if (!node->HasUnscheduledSuccessors()) { + node->MaybeUpdateCriticalPath(node->GetLatency()); + candidates_.push_back(node); + } + } + + ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler)); + if (kDumpDotSchedulingGraphs) { + // Remember the list of initial candidates for debug output purposes. + initial_candidates.assign(candidates_.begin(), candidates_.end()); + } + + // Schedule all nodes. + while (!candidates_.empty()) { + Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_)); + } + + if (kDumpDotSchedulingGraphs) { + // Dump the graph in `dot` format. + HGraph* graph = block->GetGraph(); + std::stringstream description; + description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx()) + << " B" << block->GetBlockId(); + scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates); + } +} + +void HScheduler::Schedule(SchedulingNode* scheduling_node) { + // Check whether any of the node's predecessors will be valid candidates after + // this node is scheduled. + uint32_t path_to_node = scheduling_node->GetCriticalPath(); + for (SchedulingNode* predecessor : scheduling_node->GetDataPredecessors()) { + predecessor->MaybeUpdateCriticalPath( + path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency()); + predecessor->DecrementNumberOfUnscheduledSuccessors(); + if (!predecessor->HasUnscheduledSuccessors()) { + candidates_.push_back(predecessor); + } + } + for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) { + // Do not update the critical path. + // The 'other' (so 'non-data') dependencies (usually) do not represent a + // 'material' dependency of nodes on others. They exist for program + // correctness. So we do not use them to compute the critical path. + predecessor->DecrementNumberOfUnscheduledSuccessors(); + if (!predecessor->HasUnscheduledSuccessors()) { + candidates_.push_back(predecessor); + } + } + + Schedule(scheduling_node->GetInstruction()); +} + +// Move an instruction after cursor instruction inside one basic block. +static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) { + DCHECK_EQ(instruction->GetBlock(), cursor->GetBlock()); + DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction()); + DCHECK(!instruction->IsControlFlow()); + DCHECK(!cursor->IsControlFlow()); + instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false); +} + +void HScheduler::Schedule(HInstruction* instruction) { + if (instruction == cursor_) { + cursor_ = cursor_->GetPrevious(); + } else { + MoveAfterInBlock(instruction, cursor_); + } +} + +bool HScheduler::IsSchedulable(const HInstruction* instruction) const { + // We want to avoid exhaustively listing all instructions, so we first check + // for instruction categories that we know are safe. + if (instruction->IsControlFlow() || + instruction->IsConstant()) { + return true; + } + // Currently all unary and binary operations are safe to schedule, so avoid + // checking for each of them individually. + // Since nothing prevents a new scheduling-unsafe HInstruction to subclass + // HUnaryOperation (or HBinaryOperation), check in debug mode that we have + // the exhaustive lists here. + if (instruction->IsUnaryOperation()) { + DCHECK(instruction->IsBooleanNot() || + instruction->IsNot() || + instruction->IsNeg()) << "unexpected instruction " << instruction->DebugName(); + return true; + } + if (instruction->IsBinaryOperation()) { + DCHECK(instruction->IsAdd() || + instruction->IsAnd() || + instruction->IsCompare() || + instruction->IsCondition() || + instruction->IsDiv() || + instruction->IsMul() || + instruction->IsOr() || + instruction->IsRem() || + instruction->IsRor() || + instruction->IsShl() || + instruction->IsShr() || + instruction->IsSub() || + instruction->IsUShr() || + instruction->IsXor()) << "unexpected instruction " << instruction->DebugName(); + return true; + } + // The scheduler should not see any of these. + DCHECK(!instruction->IsParallelMove()) << "unexpected instruction " << instruction->DebugName(); + // List of instructions explicitly excluded: + // HClearException + // HClinitCheck + // HDeoptimize + // HLoadClass + // HLoadException + // HMemoryBarrier + // HMonitorOperation + // HNativeDebugInfo + // HThrow + // HTryBoundary + // TODO: Some of the instructions above may be safe to schedule (maybe as + // scheduling barriers). + return instruction->IsArrayGet() || + instruction->IsArraySet() || + instruction->IsArrayLength() || + instruction->IsBoundType() || + instruction->IsBoundsCheck() || + instruction->IsCheckCast() || + instruction->IsClassTableGet() || + instruction->IsCurrentMethod() || + instruction->IsDivZeroCheck() || + instruction->IsInstanceFieldGet() || + instruction->IsInstanceFieldSet() || + instruction->IsInstanceOf() || + instruction->IsInvokeInterface() || + instruction->IsInvokeStaticOrDirect() || + instruction->IsInvokeUnresolved() || + instruction->IsInvokeVirtual() || + instruction->IsLoadString() || + instruction->IsNewArray() || + instruction->IsNewInstance() || + instruction->IsNullCheck() || + instruction->IsPackedSwitch() || + instruction->IsParameterValue() || + instruction->IsPhi() || + instruction->IsReturn() || + instruction->IsReturnVoid() || + instruction->IsSelect() || + instruction->IsStaticFieldGet() || + instruction->IsStaticFieldSet() || + instruction->IsSuspendCheck() || + instruction->IsTypeConversion() || + instruction->IsUnresolvedInstanceFieldGet() || + instruction->IsUnresolvedInstanceFieldSet() || + instruction->IsUnresolvedStaticFieldGet() || + instruction->IsUnresolvedStaticFieldSet(); +} + +bool HScheduler::IsSchedulable(const HBasicBlock* block) const { + // We may be only interested in loop blocks. + if (only_optimize_loop_blocks_ && !block->IsInLoop()) { + return false; + } + if (block->GetTryCatchInformation() != nullptr) { + // Do not schedule blocks that are part of try-catch. + // Because scheduler cannot see if catch block has assumptions on the instruction order in + // the try block. In following example, if we enable scheduler for the try block, + // MulitiplyAccumulate may be scheduled before DivZeroCheck, + // which can result in an incorrect value in the catch block. + // try { + // a = a/b; // DivZeroCheck + // // Div + // c = c*d+e; // MulitiplyAccumulate + // } catch {System.out.print(c); } + return false; + } + // Check whether all instructions in this block are schedulable. + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + if (!IsSchedulable(it.Current())) { + return false; + } + } + return true; +} + +bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const { + return instr->IsControlFlow() || + // Don't break calling convention. + instr->IsParameterValue() || + // Code generation of goto relies on SuspendCheck's position. + instr->IsSuspendCheck(); +} + +void HInstructionScheduling::Run(bool only_optimize_loop_blocks, + bool schedule_randomly) { + // Avoid compilation error when compiling for unsupported instruction set. + UNUSED(only_optimize_loop_blocks); + UNUSED(schedule_randomly); + switch (instruction_set_) { +#ifdef ART_ENABLE_CODEGEN_arm64 + case kArm64: { + // Phase-local allocator that allocates scheduler internal data structures like + // scheduling nodes, internel nodes map, dependencies, etc. + ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool()); + + CriticalPathSchedulingNodeSelector critical_path_selector; + RandomSchedulingNodeSelector random_selector; + SchedulingNodeSelector* selector = schedule_randomly + ? static_cast<SchedulingNodeSelector*>(&random_selector) + : static_cast<SchedulingNodeSelector*>(&critical_path_selector); + + arm64::HSchedulerARM64 scheduler(&arena_allocator, selector); + scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); + scheduler.Schedule(graph_); + break; + } +#endif + default: + break; + } +} + +} // namespace art diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h new file mode 100644 index 0000000000..ab0dad4300 --- /dev/null +++ b/compiler/optimizing/scheduler.h @@ -0,0 +1,487 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_SCHEDULER_H_ +#define ART_COMPILER_OPTIMIZING_SCHEDULER_H_ + +#include <fstream> + +#include "base/time_utils.h" +#include "driver/compiler_driver.h" +#include "nodes.h" +#include "optimization.h" + +namespace art { + +// General description of instruction scheduling. +// +// This pass tries to improve the quality of the generated code by reordering +// instructions in the graph to avoid execution delays caused by execution +// dependencies. +// Currently, scheduling is performed at the block level, so no `HInstruction` +// ever leaves its block in this pass. +// +// The scheduling process iterates through blocks in the graph. For blocks that +// we can and want to schedule: +// 1) Build a dependency graph for instructions. +// It includes data dependencies (inputs/uses), but also environment +// dependencies and side-effect dependencies. +// 2) Schedule the dependency graph. +// This is a topological sort of the dependency graph, using heuristics to +// decide what node to scheduler first when there are multiple candidates. +// +// A few factors impacting the quality of the scheduling are: +// - The heuristics used to decide what node to schedule in the topological sort +// when there are multiple valid candidates. There is a wide range of +// complexity possible here, going from a simple model only considering +// latencies, to a super detailed CPU pipeline model. +// - Fewer dependencies in the dependency graph give more freedom for the +// scheduling heuristics. For example de-aliasing can allow possibilities for +// reordering of memory accesses. +// - The level of abstraction of the IR. It is easier to evaluate scheduling for +// IRs that translate to a single assembly instruction than for IRs +// that generate multiple assembly instructions or generate different code +// depending on properties of the IR. +// - Scheduling is performed before register allocation, it is not aware of the +// impact of moving instructions on register allocation. +// +// +// The scheduling code uses the terms predecessors, successors, and dependencies. +// This can be confusing at times, so here are clarifications. +// These terms are used from the point of view of the program dependency graph. So +// the inputs of an instruction are part of its dependencies, and hence part its +// predecessors. So the uses of an instruction are (part of) its successors. +// (Side-effect dependencies can yield predecessors or successors that are not +// inputs or uses.) +// +// Here is a trivial example. For the Java code: +// +// int a = 1 + 2; +// +// we would have the instructions +// +// i1 HIntConstant 1 +// i2 HIntConstant 2 +// i3 HAdd [i1,i2] +// +// `i1` and `i2` are predecessors of `i3`. +// `i3` is a successor of `i1` and a successor of `i2`. +// In a scheduling graph for this code we would have three nodes `n1`, `n2`, +// and `n3` (respectively for instructions `i1`, `i1`, and `i3`). +// Conceptually the program dependency graph for this would contain two edges +// +// n1 -> n3 +// n2 -> n3 +// +// Since we schedule backwards (starting from the last instruction in each basic +// block), the implementation of nodes keeps a list of pointers their +// predecessors. So `n3` would keep pointers to its predecessors `n1` and `n2`. +// +// Node dependencies are also referred to from the program dependency graph +// point of view: we say that node `B` immediately depends on `A` if there is an +// edge from `A` to `B` in the program dependency graph. `A` is a predecessor of +// `B`, `B` is a successor of `A`. In the example above `n3` depends on `n1` and +// `n2`. +// Since nodes in the scheduling graph keep a list of their predecessors, node +// `B` will have a pointer to its predecessor `A`. +// As we schedule backwards, `B` will be selected for scheduling before `A` is. +// +// So the scheduling for the example above could happen as follow +// +// |---------------------------+------------------------| +// | candidates for scheduling | instructions scheduled | +// | --------------------------+------------------------| +// +// The only node without successors is `n3`, so it is the only initial +// candidate. +// +// | n3 | (none) | +// +// We schedule `n3` as the last (and only) instruction. All its predecessors +// that do not have any unscheduled successors become candidate. That is, `n1` +// and `n2` become candidates. +// +// | n1, n2 | n3 | +// +// One of the candidates is selected. In practice this is where scheduling +// heuristics kick in, to decide which of the candidates should be selected. +// In this example, let it be `n1`. It is scheduled before previously scheduled +// nodes (in program order). There are no other nodes to add to the list of +// candidates. +// +// | n2 | n1 | +// | | n3 | +// +// The only candidate available for scheduling is `n2`. Schedule it before +// (in program order) the previously scheduled nodes. +// +// | (none) | n2 | +// | | n1 | +// | | n3 | +// |---------------------------+------------------------| +// +// So finally the instructions will be executed in the order `i2`, `i1`, and `i3`. +// In this trivial example, it does not matter which of `i1` and `i2` is +// scheduled first since they are constants. However the same process would +// apply if `i1` and `i2` were actual operations (for example `HMul` and `HDiv`). + +// Set to true to have instruction scheduling dump scheduling graphs to the file +// `scheduling_graphs.dot`. See `SchedulingGraph::DumpAsDotGraph()`. +static constexpr bool kDumpDotSchedulingGraphs = false; + +// Typically used as a default instruction latency. +static constexpr uint32_t kGenericInstructionLatency = 1; + +class HScheduler; + +/** + * A node representing an `HInstruction` in the `SchedulingGraph`. + */ +class SchedulingNode : public ArenaObject<kArenaAllocScheduler> { + public: + SchedulingNode(HInstruction* instr, ArenaAllocator* arena, bool is_scheduling_barrier) + : latency_(0), + internal_latency_(0), + critical_path_(0), + instruction_(instr), + is_scheduling_barrier_(is_scheduling_barrier), + data_predecessors_(arena->Adapter(kArenaAllocScheduler)), + other_predecessors_(arena->Adapter(kArenaAllocScheduler)), + num_unscheduled_successors_(0) { + data_predecessors_.reserve(kPreallocatedPredecessors); + } + + void AddDataPredecessor(SchedulingNode* predecessor) { + data_predecessors_.push_back(predecessor); + predecessor->num_unscheduled_successors_++; + } + + void AddOtherPredecessor(SchedulingNode* predecessor) { + other_predecessors_.push_back(predecessor); + predecessor->num_unscheduled_successors_++; + } + + void DecrementNumberOfUnscheduledSuccessors() { + num_unscheduled_successors_--; + } + + void MaybeUpdateCriticalPath(uint32_t other_critical_path) { + critical_path_ = std::max(critical_path_, other_critical_path); + } + + bool HasUnscheduledSuccessors() const { + return num_unscheduled_successors_ != 0; + } + + HInstruction* GetInstruction() const { return instruction_; } + uint32_t GetLatency() const { return latency_; } + void SetLatency(uint32_t latency) { latency_ = latency; } + uint32_t GetInternalLatency() const { return internal_latency_; } + void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; } + uint32_t GetCriticalPath() const { return critical_path_; } + bool IsSchedulingBarrier() const { return is_scheduling_barrier_; } + const ArenaVector<SchedulingNode*>& GetDataPredecessors() const { return data_predecessors_; } + const ArenaVector<SchedulingNode*>& GetOtherPredecessors() const { return other_predecessors_; } + + private: + // The latency of this node. It represents the latency between the moment the + // last instruction for this node has executed to the moment the result + // produced by this node is available to users. + uint32_t latency_; + // This represents the time spent *within* the generated code for this node. + // It should be zero for nodes that only generate a single instruction. + uint32_t internal_latency_; + + // The critical path from this instruction to the end of scheduling. It is + // used by the scheduling heuristics to measure the priority of this instruction. + // It is defined as + // critical_path_ = latency_ + max((use.internal_latency_ + use.critical_path_) for all uses) + // (Note that here 'uses' is equivalent to 'data successors'. Also see comments in + // `HScheduler::Schedule(SchedulingNode* scheduling_node)`). + uint32_t critical_path_; + + // The instruction that this node represents. + HInstruction* const instruction_; + + // If a node is scheduling barrier, other nodes cannot be scheduled before it. + const bool is_scheduling_barrier_; + + // The lists of predecessors. They cannot be scheduled before this node. Once + // this node is scheduled, we check whether any of its predecessors has become a + // valid candidate for scheduling. + // Predecessors in `data_predecessors_` are data dependencies. Those in + // `other_predecessors_` contain side-effect dependencies, environment + // dependencies, and scheduling barrier dependencies. + ArenaVector<SchedulingNode*> data_predecessors_; + ArenaVector<SchedulingNode*> other_predecessors_; + + // The number of unscheduled successors for this node. This number is + // decremented as successors are scheduled. When it reaches zero this node + // becomes a valid candidate to schedule. + uint32_t num_unscheduled_successors_; + + static constexpr size_t kPreallocatedPredecessors = 4; +}; + +/* + * Directed acyclic graph for scheduling. + */ +class SchedulingGraph : public ValueObject { + public: + SchedulingGraph(const HScheduler* scheduler, ArenaAllocator* arena) + : scheduler_(scheduler), + arena_(arena), + contains_scheduling_barrier_(false), + nodes_map_(arena_->Adapter(kArenaAllocScheduler)) {} + + SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) { + SchedulingNode* node = new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier); + nodes_map_.Insert(std::make_pair(instr, node)); + contains_scheduling_barrier_ |= is_scheduling_barrier; + AddDependencies(instr, is_scheduling_barrier); + return node; + } + + void Clear() { + nodes_map_.Clear(); + contains_scheduling_barrier_ = false; + } + + SchedulingNode* GetNode(const HInstruction* instr) const { + auto it = nodes_map_.Find(instr); + if (it == nodes_map_.end()) { + return nullptr; + } else { + return it->second; + } + } + + bool IsSchedulingBarrier(const HInstruction* instruction) const; + + bool HasImmediateDataDependency(const SchedulingNode* node, const SchedulingNode* other) const; + bool HasImmediateDataDependency(const HInstruction* node, const HInstruction* other) const; + bool HasImmediateOtherDependency(const SchedulingNode* node, const SchedulingNode* other) const; + bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const; + + size_t Size() const { + return nodes_map_.Size(); + } + + // Dump the scheduling graph, in dot file format, appending it to the file + // `scheduling_graphs.dot`. + void DumpAsDotGraph(const std::string& description, + const ArenaVector<SchedulingNode*>& initial_candidates); + + protected: + void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency); + void AddDataDependency(SchedulingNode* node, SchedulingNode* dependency) { + AddDependency(node, dependency, /*is_data_dependency*/true); + } + void AddOtherDependency(SchedulingNode* node, SchedulingNode* dependency) { + AddDependency(node, dependency, /*is_data_dependency*/false); + } + + // Add dependencies nodes for the given `HInstruction`: inputs, environments, and side-effects. + void AddDependencies(HInstruction* instruction, bool is_scheduling_barrier = false); + + const HScheduler* const scheduler_; + + ArenaAllocator* const arena_; + + bool contains_scheduling_barrier_; + + ArenaHashMap<const HInstruction*, SchedulingNode*> nodes_map_; +}; + +/* + * The visitors derived from this base class are used by schedulers to evaluate + * the latencies of `HInstruction`s. + */ +class SchedulingLatencyVisitor : public HGraphDelegateVisitor { + public: + // This class and its sub-classes will never be used to drive a visit of an + // `HGraph` but only to visit `HInstructions` one at a time, so we do not need + // to pass a valid graph to `HGraphDelegateVisitor()`. + SchedulingLatencyVisitor() : HGraphDelegateVisitor(nullptr) {} + + void VisitInstruction(HInstruction* instruction) OVERRIDE { + LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". " + "Architecture-specific scheduling latency visitors must handle all instructions" + " (potentially by overriding the generic `VisitInstruction()`."; + UNREACHABLE(); + } + + void Visit(HInstruction* instruction) { + instruction->Accept(this); + } + + void CalculateLatency(SchedulingNode* node) { + // By default nodes have no internal latency. + last_visited_internal_latency_ = 0; + Visit(node->GetInstruction()); + } + + uint32_t GetLastVisitedLatency() const { return last_visited_latency_; } + uint32_t GetLastVisitedInternalLatency() const { return last_visited_internal_latency_; } + + protected: + // The latency of the most recent visited SchedulingNode. + // This is for reporting the latency value to the user of this visitor. + uint32_t last_visited_latency_; + // This represents the time spent *within* the generated code for the most recent visited + // SchedulingNode. This is for reporting the internal latency value to the user of this visitor. + uint32_t last_visited_internal_latency_; +}; + +class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> { + public: + virtual SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes, + const SchedulingGraph& graph) = 0; + virtual ~SchedulingNodeSelector() {} + protected: + static void DeleteNodeAtIndex(ArenaVector<SchedulingNode*>* nodes, size_t index) { + (*nodes)[index] = nodes->back(); + nodes->pop_back(); + } +}; + +/* + * Select a `SchedulingNode` at random within the candidates. + */ +class RandomSchedulingNodeSelector : public SchedulingNodeSelector { + public: + explicit RandomSchedulingNodeSelector() : seed_(0) { + seed_ = static_cast<uint32_t>(NanoTime()); + srand(seed_); + } + + SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes, + const SchedulingGraph& graph) OVERRIDE { + UNUSED(graph); + DCHECK(!nodes->empty()); + size_t select = rand_r(&seed_) % nodes->size(); + SchedulingNode* select_node = (*nodes)[select]; + DeleteNodeAtIndex(nodes, select); + return select_node; + } + + uint32_t seed_; +}; + +/* + * Select a `SchedulingNode` according to critical path information, + * with heuristics to favor certain instruction patterns like materialized condition. + */ +class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector { + public: + CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {} + + SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes, + const SchedulingGraph& graph) OVERRIDE; + + protected: + SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate, + SchedulingNode* check) const; + + SchedulingNode* SelectMaterializedCondition(ArenaVector<SchedulingNode*>* nodes, + const SchedulingGraph& graph) const; + + private: + const SchedulingNode* prev_select_; +}; + +class HScheduler { + public: + HScheduler(ArenaAllocator* arena, + SchedulingLatencyVisitor* latency_visitor, + SchedulingNodeSelector* selector) + : arena_(arena), + latency_visitor_(latency_visitor), + selector_(selector), + only_optimize_loop_blocks_(true), + scheduling_graph_(this, arena), + candidates_(arena_->Adapter(kArenaAllocScheduler)) {} + virtual ~HScheduler() {} + + void Schedule(HGraph* graph); + + void SetOnlyOptimizeLoopBlocks(bool loop_only) { only_optimize_loop_blocks_ = loop_only; } + + // Instructions can not be rescheduled across a scheduling barrier. + virtual bool IsSchedulingBarrier(const HInstruction* instruction) const; + + protected: + void Schedule(HBasicBlock* block); + void Schedule(SchedulingNode* scheduling_node); + void Schedule(HInstruction* instruction); + + // Any instruction returning `false` via this method will prevent its + // containing basic block from being scheduled. + // This method is used to restrict scheduling to instructions that we know are + // safe to handle. + virtual bool IsSchedulable(const HInstruction* instruction) const; + bool IsSchedulable(const HBasicBlock* block) const; + + void CalculateLatency(SchedulingNode* node) { + latency_visitor_->CalculateLatency(node); + node->SetLatency(latency_visitor_->GetLastVisitedLatency()); + node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency()); + } + + ArenaAllocator* const arena_; + SchedulingLatencyVisitor* const latency_visitor_; + SchedulingNodeSelector* const selector_; + bool only_optimize_loop_blocks_; + + // We instantiate the members below as part of this class to avoid + // instantiating them locally for every chunk scheduled. + SchedulingGraph scheduling_graph_; + // A pointer indicating where the next instruction to be scheduled will be inserted. + HInstruction* cursor_; + // The list of candidates for scheduling. A node becomes a candidate when all + // its predecessors have been scheduled. + ArenaVector<SchedulingNode*> candidates_; + + private: + DISALLOW_COPY_AND_ASSIGN(HScheduler); +}; + +inline bool SchedulingGraph::IsSchedulingBarrier(const HInstruction* instruction) const { + return scheduler_->IsSchedulingBarrier(instruction); +} + +class HInstructionScheduling : public HOptimization { + public: + HInstructionScheduling(HGraph* graph, InstructionSet instruction_set) + : HOptimization(graph, kInstructionScheduling), + instruction_set_(instruction_set) {} + + void Run() { + Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false); + } + void Run(bool only_optimize_loop_blocks, bool schedule_randomly); + + static constexpr const char* kInstructionScheduling = "scheduler"; + + const InstructionSet instruction_set_; + + private: + DISALLOW_COPY_AND_ASSIGN(HInstructionScheduling); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_SCHEDULER_H_ diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc new file mode 100644 index 0000000000..e3701fbcb1 --- /dev/null +++ b/compiler/optimizing/scheduler_arm64.cc @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "scheduler_arm64.h" +#include "code_generator_utils.h" + +namespace art { +namespace arm64 { + +void SchedulingLatencyVisitorARM64::VisitBinaryOperation(HBinaryOperation* instr) { + last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType()) + ? kArm64FloatingPointOpLatency + : kArm64IntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitBitwiseNegatedRight( + HBitwiseNegatedRight* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64IntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitArm64DataProcWithShifterOp( + HArm64DataProcWithShifterOp* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64DataProcWithShifterOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitIntermediateAddress( + HIntermediateAddress* ATTRIBUTE_UNUSED) { + // Although the code generated is a simple `add` instruction, we found through empirical results + // that spacing it from its use in memory accesses was beneficial. + last_visited_latency_ = kArm64IntegerOpLatency + 2; +} + +void SchedulingLatencyVisitorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64MulIntegerLatency; +} + +void SchedulingLatencyVisitorARM64::VisitArrayGet(HArrayGet* instruction) { + if (!instruction->GetArray()->IsIntermediateAddress()) { + // Take the intermediate address computation into account. + last_visited_internal_latency_ = kArm64IntegerOpLatency; + } + last_visited_latency_ = kArm64MemoryLoadLatency; +} + +void SchedulingLatencyVisitorARM64::VisitArrayLength(HArrayLength* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64MemoryLoadLatency; +} + +void SchedulingLatencyVisitorARM64::VisitArraySet(HArraySet* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64MemoryStoreLatency; +} + +void SchedulingLatencyVisitorARM64::VisitBoundsCheck(HBoundsCheck* ATTRIBUTE_UNUSED) { + last_visited_internal_latency_ = kArm64IntegerOpLatency; + // Users do not use any data results. + last_visited_latency_ = 0; +} + +void SchedulingLatencyVisitorARM64::VisitDiv(HDiv* instr) { + Primitive::Type type = instr->GetResultType(); + switch (type) { + case Primitive::kPrimFloat: + last_visited_latency_ = kArm64DivFloatLatency; + break; + case Primitive::kPrimDouble: + last_visited_latency_ = kArm64DivDoubleLatency; + break; + default: + // Follow the code path used by code generation. + if (instr->GetRight()->IsConstant()) { + int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant()); + if (imm == 0) { + last_visited_internal_latency_ = 0; + last_visited_latency_ = 0; + } else if (imm == 1 || imm == -1) { + last_visited_internal_latency_ = 0; + last_visited_latency_ = kArm64IntegerOpLatency; + } else if (IsPowerOfTwo(AbsOrMin(imm))) { + last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; + last_visited_latency_ = kArm64IntegerOpLatency; + } else { + DCHECK(imm <= -2 || imm >= 2); + last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; + last_visited_latency_ = kArm64MulIntegerLatency; + } + } else { + last_visited_latency_ = kArm64DivIntegerLatency; + } + break; + } +} + +void SchedulingLatencyVisitorARM64::VisitInstanceFieldGet(HInstanceFieldGet* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64MemoryLoadLatency; +} + +void SchedulingLatencyVisitorARM64::VisitInstanceOf(HInstanceOf* ATTRIBUTE_UNUSED) { + last_visited_internal_latency_ = kArm64CallInternalLatency; + last_visited_latency_ = kArm64IntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitInvoke(HInvoke* ATTRIBUTE_UNUSED) { + last_visited_internal_latency_ = kArm64CallInternalLatency; + last_visited_latency_ = kArm64CallLatency; +} + +void SchedulingLatencyVisitorARM64::VisitLoadString(HLoadString* ATTRIBUTE_UNUSED) { + last_visited_internal_latency_ = kArm64LoadStringInternalLatency; + last_visited_latency_ = kArm64MemoryLoadLatency; +} + +void SchedulingLatencyVisitorARM64::VisitMul(HMul* instr) { + last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType()) + ? kArm64MulFloatingPointLatency + : kArm64MulIntegerLatency; +} + +void SchedulingLatencyVisitorARM64::VisitNewArray(HNewArray* ATTRIBUTE_UNUSED) { + last_visited_internal_latency_ = kArm64IntegerOpLatency + kArm64CallInternalLatency; + last_visited_latency_ = kArm64CallLatency; +} + +void SchedulingLatencyVisitorARM64::VisitNewInstance(HNewInstance* instruction) { + if (instruction->IsStringAlloc()) { + last_visited_internal_latency_ = 2 + kArm64MemoryLoadLatency + kArm64CallInternalLatency; + } else { + last_visited_internal_latency_ = kArm64CallInternalLatency; + } + last_visited_latency_ = kArm64CallLatency; +} + +void SchedulingLatencyVisitorARM64::VisitRem(HRem* instruction) { + if (Primitive::IsFloatingPointType(instruction->GetResultType())) { + last_visited_internal_latency_ = kArm64CallInternalLatency; + last_visited_latency_ = kArm64CallLatency; + } else { + // Follow the code path used by code generation. + if (instruction->GetRight()->IsConstant()) { + int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant()); + if (imm == 0) { + last_visited_internal_latency_ = 0; + last_visited_latency_ = 0; + } else if (imm == 1 || imm == -1) { + last_visited_internal_latency_ = 0; + last_visited_latency_ = kArm64IntegerOpLatency; + } else if (IsPowerOfTwo(AbsOrMin(imm))) { + last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; + last_visited_latency_ = kArm64IntegerOpLatency; + } else { + DCHECK(imm <= -2 || imm >= 2); + last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; + last_visited_latency_ = kArm64MulIntegerLatency; + } + } else { + last_visited_internal_latency_ = kArm64DivIntegerLatency; + last_visited_latency_ = kArm64MulIntegerLatency; + } + } +} + +void SchedulingLatencyVisitorARM64::VisitStaticFieldGet(HStaticFieldGet* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64MemoryLoadLatency; +} + +void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { + HBasicBlock* block = instruction->GetBlock(); + DCHECK((block->GetLoopInformation() != nullptr) || + (block->IsEntryBlock() && instruction->GetNext()->IsGoto())); + // Users do not use any data results. + last_visited_latency_ = 0; +} + +void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) { + if (Primitive::IsFloatingPointType(instr->GetResultType()) || + Primitive::IsFloatingPointType(instr->GetInputType())) { + last_visited_latency_ = kArm64TypeConversionFloatingPointIntegerLatency; + } else { + last_visited_latency_ = kArm64IntegerOpLatency; + } +} + +} // namespace arm64 +} // namespace art diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h new file mode 100644 index 0000000000..702027c535 --- /dev/null +++ b/compiler/optimizing/scheduler_arm64.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_SCHEDULER_ARM64_H_ +#define ART_COMPILER_OPTIMIZING_SCHEDULER_ARM64_H_ + +#include "scheduler.h" + +namespace art { +namespace arm64 { + +static constexpr uint32_t kArm64MemoryLoadLatency = 5; +static constexpr uint32_t kArm64MemoryStoreLatency = 3; + +static constexpr uint32_t kArm64CallInternalLatency = 10; +static constexpr uint32_t kArm64CallLatency = 5; + +// AArch64 instruction latency. +// We currently assume that all arm64 CPUs share the same instruction latency list. +static constexpr uint32_t kArm64IntegerOpLatency = 2; +static constexpr uint32_t kArm64FloatingPointOpLatency = 5; + + +static constexpr uint32_t kArm64DataProcWithShifterOpLatency = 3; +static constexpr uint32_t kArm64DivDoubleLatency = 30; +static constexpr uint32_t kArm64DivFloatLatency = 15; +static constexpr uint32_t kArm64DivIntegerLatency = 5; +static constexpr uint32_t kArm64LoadStringInternalLatency = 7; +static constexpr uint32_t kArm64MulFloatingPointLatency = 6; +static constexpr uint32_t kArm64MulIntegerLatency = 6; +static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5; + +class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor { + public: + // Default visitor for instructions not handled specifically below. + void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64IntegerOpLatency; + } + +// We add a second unused parameter to be able to use this macro like the others +// defined in `nodes.h`. +#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \ + M(ArrayGet , unused) \ + M(ArrayLength , unused) \ + M(ArraySet , unused) \ + M(BinaryOperation , unused) \ + M(BoundsCheck , unused) \ + M(Div , unused) \ + M(InstanceFieldGet , unused) \ + M(InstanceOf , unused) \ + M(Invoke , unused) \ + M(LoadString , unused) \ + M(Mul , unused) \ + M(NewArray , unused) \ + M(NewInstance , unused) \ + M(Rem , unused) \ + M(StaticFieldGet , unused) \ + M(SuspendCheck , unused) \ + M(TypeConversion , unused) + +#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \ + M(BitwiseNegatedRight, unused) \ + M(MultiplyAccumulate, unused) \ + M(IntermediateAddress, unused) + +#define DECLARE_VISIT_INSTRUCTION(type, unused) \ + void Visit##type(H##type* instruction) OVERRIDE; + + FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) + +#undef DECLARE_VISIT_INSTRUCTION +}; + +class HSchedulerARM64 : public HScheduler { + public: + HSchedulerARM64(ArenaAllocator* arena, SchedulingNodeSelector* selector) + : HScheduler(arena, &arm64_latency_visitor_, selector) {} + ~HSchedulerARM64() OVERRIDE {} + + bool IsSchedulable(const HInstruction* instruction) const OVERRIDE { +#define CASE_INSTRUCTION_KIND(type, unused) case \ + HInstruction::InstructionKind::k##type: + switch (instruction->GetKind()) { + FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(CASE_INSTRUCTION_KIND) + return true; + FOR_EACH_CONCRETE_INSTRUCTION_ARM64(CASE_INSTRUCTION_KIND) + return true; + default: + return HScheduler::IsSchedulable(instruction); + } +#undef CASE_INSTRUCTION_KIND + } + + private: + SchedulingLatencyVisitorARM64 arm64_latency_visitor_; + DISALLOW_COPY_AND_ASSIGN(HSchedulerARM64); +}; + +} // namespace arm64 +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_SCHEDULER_ARM64_H_ diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc new file mode 100644 index 0000000000..31d13e2a26 --- /dev/null +++ b/compiler/optimizing/scheduler_test.cc @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/arena_allocator.h" +#include "builder.h" +#include "codegen_test_utils.h" +#include "common_compiler_test.h" +#include "nodes.h" +#include "optimizing_unit_test.h" +#include "pc_relative_fixups_x86.h" +#include "register_allocator.h" +#include "scheduler.h" + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "scheduler_arm64.h" +#endif + +namespace art { + +// Return all combinations of ISA and code generator that are executable on +// hardware, or on simulator, and that we'd like to test. +static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { + ::std::vector<CodegenTargetConfig> v; + ::std::vector<CodegenTargetConfig> test_config_candidates = { +#ifdef ART_ENABLE_CODEGEN_arm + CodegenTargetConfig(kArm, create_codegen_arm), + CodegenTargetConfig(kThumb2, create_codegen_arm), +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + CodegenTargetConfig(kArm64, create_codegen_arm64), +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + CodegenTargetConfig(kX86, create_codegen_x86), +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + CodegenTargetConfig(kX86_64, create_codegen_x86_64), +#endif +#ifdef ART_ENABLE_CODEGEN_mips + CodegenTargetConfig(kMips, create_codegen_mips), +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 + CodegenTargetConfig(kMips64, create_codegen_mips64) +#endif + }; + + for (auto test_config : test_config_candidates) { + if (CanExecute(test_config.GetInstructionSet())) { + v.push_back(test_config); + } + } + + return v; +} + +class SchedulerTest : public CommonCompilerTest {}; + +#ifdef ART_ENABLE_CODEGEN_arm64 +TEST_F(SchedulerTest, DependencyGraph) { + ArenaPool pool; + ArenaAllocator allocator(&pool); + HGraph* graph = CreateGraph(&allocator); + HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HBasicBlock* block1 = new (&allocator) HBasicBlock(graph); + graph->AddBlock(entry); + graph->AddBlock(block1); + graph->SetEntryBlock(entry); + + // entry: + // array ParameterValue + // c1 IntConstant + // c2 IntConstant + // block1: + // add1 Add [c1, c2] + // add2 Add [add1, c2] + // mul Mul [add1, add2] + // div_check DivZeroCheck [add2] (env: add2, mul) + // div Div [add1, div_check] + // array_get1 ArrayGet [array, add1] + // array_set1 ArraySet [array, add1, add2] + // array_get2 ArrayGet [array, add1] + // array_set2 ArraySet [array, add1, add2] + + HInstruction* array = new (&allocator) HParameterValue(graph->GetDexFile(), + dex::TypeIndex(0), + 0, + Primitive::kPrimNot); + HInstruction* c1 = graph->GetIntConstant(1); + HInstruction* c2 = graph->GetIntConstant(10); + HInstruction* add1 = new (&allocator) HAdd(Primitive::kPrimInt, c1, c2); + HInstruction* add2 = new (&allocator) HAdd(Primitive::kPrimInt, add1, c2); + HInstruction* mul = new (&allocator) HMul(Primitive::kPrimInt, add1, add2); + HInstruction* div_check = new (&allocator) HDivZeroCheck(add2, 0); + HInstruction* div = new (&allocator) HDiv(Primitive::kPrimInt, add1, div_check, 0); + HInstruction* array_get1 = new (&allocator) HArrayGet(array, add1, Primitive::kPrimInt, 0); + HInstruction* array_set1 = new (&allocator) HArraySet(array, add1, add2, Primitive::kPrimInt, 0); + HInstruction* array_get2 = new (&allocator) HArrayGet(array, add1, Primitive::kPrimInt, 0); + HInstruction* array_set2 = new (&allocator) HArraySet(array, add1, add2, Primitive::kPrimInt, 0); + + DCHECK(div_check->CanThrow()); + + entry->AddInstruction(array); + + HInstruction* block_instructions[] = {add1, + add2, + mul, + div_check, + div, + array_get1, + array_set1, + array_get2, + array_set2}; + for (auto instr : block_instructions) { + block1->AddInstruction(instr); + } + + HEnvironment* environment = new (&allocator) HEnvironment(&allocator, + 2, + graph->GetArtMethod(), + 0, + div_check); + div_check->SetRawEnvironment(environment); + environment->SetRawEnvAt(0, add2); + add2->AddEnvUseAt(div_check->GetEnvironment(), 0); + environment->SetRawEnvAt(1, mul); + mul->AddEnvUseAt(div_check->GetEnvironment(), 1); + + ArenaAllocator* arena = graph->GetArena(); + CriticalPathSchedulingNodeSelector critical_path_selector; + arm64::HSchedulerARM64 scheduler(arena, &critical_path_selector); + SchedulingGraph scheduling_graph(&scheduler, arena); + // Instructions must be inserted in reverse order into the scheduling graph. + for (auto instr : ReverseRange(block_instructions)) { + scheduling_graph.AddNode(instr); + } + + // Should not have dependencies cross basic blocks. + ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(add1, c1)); + ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(add2, c2)); + + // Define-use dependency. + ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(add2, add1)); + ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(add1, add2)); + ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(div_check, add2)); + ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(div_check, add1)); + ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(div, div_check)); + ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(array_set1, add1)); + ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(array_set1, add2)); + + // Read and write dependencies + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set1, array_get1)); + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_get2)); + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_get2, array_set1)); + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_set1)); + + // Env dependency. + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(div_check, mul)); + ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(mul, div_check)); + + // CanThrow. + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set1, div_check)); +} +#endif + +static void CompileWithRandomSchedulerAndRun(const uint16_t* data, + bool has_result, + int expected) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { + ArenaPool pool; + ArenaAllocator arena(&pool); + HGraph* graph = CreateCFG(&arena, data); + + // Schedule the graph randomly. + HInstructionScheduling scheduling(graph, target_config.GetInstructionSet()); + scheduling.Run(/*only_optimize_loop_blocks*/ false, /*schedule_randomly*/ true); + + RunCode(target_config, + graph, + [](HGraph* graph_arg) { RemoveSuspendChecks(graph_arg); }, + has_result, expected); + } +} + +TEST_F(SchedulerTest, RandomScheduling) { + // + // Java source: crafted code to make sure (random) scheduling should get correct result. + // + // int result = 0; + // float fr = 10.0f; + // for (int i = 1; i < 10; i++) { + // fr ++; + // int t1 = result >> i; + // int t2 = result * i; + // result = result + t1 - t2; + // fr = fr / i; + // result += (int)fr; + // } + // return result; + // + const uint16_t data[] = SIX_REGISTERS_CODE_ITEM( + Instruction::CONST_4 | 0 << 12 | 2 << 8, // const/4 v2, #int 0 + Instruction::CONST_HIGH16 | 0 << 8, 0x4120, // const/high16 v0, #float 10.0 // #41200000 + Instruction::CONST_4 | 1 << 12 | 1 << 8, // const/4 v1, #int 1 + Instruction::CONST_16 | 5 << 8, 0x000a, // const/16 v5, #int 10 + Instruction::IF_GE | 5 << 12 | 1 << 8, 0x0014, // if-ge v1, v5, 001a // +0014 + Instruction::CONST_HIGH16 | 5 << 8, 0x3f80, // const/high16 v5, #float 1.0 // #3f800000 + Instruction::ADD_FLOAT_2ADDR | 5 << 12 | 0 << 8, // add-float/2addr v0, v5 + Instruction::SHR_INT | 3 << 8, 1 << 8 | 2 , // shr-int v3, v2, v1 + Instruction::MUL_INT | 4 << 8, 1 << 8 | 2, // mul-int v4, v2, v1 + Instruction::ADD_INT | 5 << 8, 3 << 8 | 2, // add-int v5, v2, v3 + Instruction::SUB_INT | 2 << 8, 4 << 8 | 5, // sub-int v2, v5, v4 + Instruction::INT_TO_FLOAT | 1 << 12 | 5 << 8, // int-to-float v5, v1 + Instruction::DIV_FLOAT_2ADDR | 5 << 12 | 0 << 8, // div-float/2addr v0, v5 + Instruction::FLOAT_TO_INT | 0 << 12 | 5 << 8, // float-to-int v5, v0 + Instruction::ADD_INT_2ADDR | 5 << 12 | 2 << 8, // add-int/2addr v2, v5 + Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8 | 1, // add-int/lit8 v1, v1, #int 1 // #01 + Instruction::GOTO | 0xeb << 8, // goto 0004 // -0015 + Instruction::RETURN | 2 << 8); // return v2 + + constexpr int kNumberOfRuns = 10; + for (int i = 0; i < kNumberOfRuns; ++i) { + CompileWithRandomSchedulerAndRun(data, true, 138774); + } +} + +} // namespace art diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index c5294107ae..e745c73091 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -97,7 +97,9 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { // class is initialized already or being initialized, and the call will not // be invoked once the method is deoptimized. - if (callee == codegen_->GetGraph()->GetArtMethod()) { + // We don't optimize for debuggable as it would prevent us from obsoleting the method in some + // situations. + if (callee == codegen_->GetGraph()->GetArtMethod() && !codegen_->GetGraph()->IsDebuggable()) { // Recursive call. method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive; code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf; diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index d6edb650ba..ae1e369999 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -497,11 +497,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() { // 4) Compute type of reference type instructions. The pass assumes that // NullConstant has been fixed up. - ReferenceTypePropagation(graph_, - class_loader_, - dex_cache_, - handles_, - /* is_first_run */ true).Run(); + ReferenceTypePropagation(graph_, dex_cache_, handles_, /* is_first_run */ true).Run(); // 5) HInstructionBuilder duplicated ArrayGet instructions with ambiguous type // (int/float or long/double) and marked ArraySets with ambiguous input type. diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 978f113ec4..45dac54115 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -48,11 +48,9 @@ namespace art { class SsaBuilder : public ValueObject { public: SsaBuilder(HGraph* graph, - Handle<mirror::ClassLoader> class_loader, Handle<mirror::DexCache> dex_cache, VariableSizedHandleScope* handles) : graph_(graph), - class_loader_(class_loader), dex_cache_(dex_cache), handles_(handles), agets_fixed_(false), @@ -117,7 +115,6 @@ class SsaBuilder : public ValueObject { void RemoveRedundantUninitializedStrings(); HGraph* graph_; - Handle<mirror::ClassLoader> class_loader_; Handle<mirror::DexCache> dex_cache_; VariableSizedHandleScope* const handles_; diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 1b9bd7eb31..668108daa4 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -16,6 +16,9 @@ #include "stack_map_stream.h" +#include <unordered_map> + +#include "base/stl_util.h" #include "art_method.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" @@ -40,6 +43,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, current_entry_.inline_infos_start_index = inline_infos_.size(); current_entry_.dex_register_map_hash = 0; current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound; + current_entry_.stack_mask_index = 0; if (num_dex_registers != 0) { current_entry_.live_dex_registers_mask = ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream); @@ -153,32 +157,43 @@ CodeOffset StackMapStream::ComputeMaxNativePcCodeOffset() const { } size_t StackMapStream::PrepareForFillIn() { - int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too. + const size_t stack_mask_size_in_bits = stack_mask_max_ + 1; // Need room for max element too. + const size_t number_of_stack_masks = PrepareStackMasks(stack_mask_size_in_bits); + const size_t register_mask_size_in_bits = MinimumBitsToStore(register_mask_max_); + const size_t number_of_register_masks = PrepareRegisterMasks(); dex_register_maps_size_ = ComputeDexRegisterMapsSize(); ComputeInlineInfoEncoding(); // needs dex_register_maps_size_. inline_info_size_ = inline_infos_.size() * inline_info_encoding_.GetEntrySize(); CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset(); - // The stack map contains compressed native offsets. - size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset.CompressedValue(), - dex_pc_max_, - dex_register_maps_size_, - inline_info_size_, - register_mask_max_, - stack_mask_number_of_bits); + // The stack map contains compressed native PC offsets. + const size_t stack_map_size = stack_map_encoding_.SetFromSizes( + max_native_pc_offset.CompressedValue(), + dex_pc_max_, + dex_register_maps_size_, + inline_info_size_, + number_of_register_masks, + number_of_stack_masks); stack_maps_size_ = RoundUp(stack_maps_.size() * stack_map_size, kBitsPerByte) / kBitsPerByte; dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize(); - - size_t non_header_size = + const size_t stack_masks_bits = number_of_stack_masks * stack_mask_size_in_bits; + const size_t register_masks_bits = number_of_register_masks * register_mask_size_in_bits; + // Register masks are last, stack masks are right before that last. + // They are both bit packed / aligned. + const size_t non_header_size = stack_maps_size_ + dex_register_location_catalog_size_ + dex_register_maps_size_ + - inline_info_size_; + inline_info_size_ + + RoundUp(stack_masks_bits + register_masks_bits, kBitsPerByte) / kBitsPerByte; // Prepare the CodeInfo variable-sized encoding. CodeInfoEncoding code_info_encoding; code_info_encoding.non_header_size = non_header_size; code_info_encoding.number_of_stack_maps = stack_maps_.size(); - code_info_encoding.stack_map_size_in_bits = stack_map_size; + code_info_encoding.number_of_stack_masks = number_of_stack_masks; + code_info_encoding.number_of_register_masks = number_of_register_masks; + code_info_encoding.stack_mask_size_in_bits = stack_mask_size_in_bits; + code_info_encoding.register_mask_size_in_bits = register_mask_size_in_bits; code_info_encoding.stack_map_encoding = stack_map_encoding_; code_info_encoding.inline_info_encoding = inline_info_encoding_; code_info_encoding.number_of_location_catalog_entries = location_catalog_entries_.size(); @@ -321,18 +336,8 @@ void StackMapStream::FillIn(MemoryRegion region) { stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc); stack_map.SetNativePcCodeOffset(stack_map_encoding_, entry.native_pc_code_offset); - stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask); - size_t number_of_stack_mask_bits = code_info.GetNumberOfStackMaskBits(encoding); - if (entry.sp_mask != nullptr) { - for (size_t bit = 0; bit < number_of_stack_mask_bits; bit++) { - stack_map.SetStackMaskBit(stack_map_encoding_, bit, entry.sp_mask->IsBitSet(bit)); - } - } else { - // The MemoryRegion does not have to be zeroed, so make sure we clear the bits. - for (size_t bit = 0; bit < number_of_stack_mask_bits; bit++) { - stack_map.SetStackMaskBit(stack_map_encoding_, bit, false); - } - } + stack_map.SetRegisterMaskIndex(stack_map_encoding_, entry.register_mask_index); + stack_map.SetStackMaskIndex(stack_map_encoding_, entry.stack_mask_index); if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) { // No dex map available. @@ -353,7 +358,7 @@ void StackMapStream::FillIn(MemoryRegion region) { next_dex_register_map_offset += register_region.size(); DexRegisterMap dex_register_map(register_region); stack_map.SetDexRegisterMapOffset( - stack_map_encoding_, register_region.start() - dex_register_locations_region.start()); + stack_map_encoding_, register_region.begin() - dex_register_locations_region.begin()); // Set the dex register location. FillInDexRegisterMap(dex_register_map, @@ -373,7 +378,7 @@ void StackMapStream::FillIn(MemoryRegion region) { // Currently relative to the dex register map. stack_map.SetInlineDescriptorOffset( - stack_map_encoding_, inline_region.start() - dex_register_locations_region.start()); + stack_map_encoding_, inline_region.begin() - dex_register_locations_region.begin()); inline_info.SetDepth(inline_info_encoding_, entry.inlining_depth); DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size()); @@ -408,7 +413,7 @@ void StackMapStream::FillIn(MemoryRegion region) { DexRegisterMap dex_register_map(register_region); inline_info.SetDexRegisterMapOffsetAtDepth( inline_info_encoding_, - depth, register_region.start() - dex_register_locations_region.start()); + depth, register_region.begin() - dex_register_locations_region.begin()); FillInDexRegisterMap(dex_register_map, inline_entry.num_dex_registers, @@ -423,6 +428,25 @@ void StackMapStream::FillIn(MemoryRegion region) { } } + // Write stack masks table. + size_t stack_mask_bits = encoding.stack_mask_size_in_bits; + if (stack_mask_bits > 0) { + size_t stack_mask_bytes = RoundUp(stack_mask_bits, kBitsPerByte) / kBitsPerByte; + for (size_t i = 0; i < encoding.number_of_stack_masks; ++i) { + MemoryRegion source(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes); + BitMemoryRegion stack_mask = code_info.GetStackMask(encoding, i); + for (size_t bit_index = 0; bit_index < encoding.stack_mask_size_in_bits; ++bit_index) { + stack_mask.StoreBit(bit_index, source.LoadBit(bit_index)); + } + } + } + + // Write register masks table. + for (size_t i = 0; i < encoding.number_of_register_masks; ++i) { + BitMemoryRegion register_mask = code_info.GetRegisterMask(encoding, i); + register_mask.StoreBits(0, register_masks_[i], encoding.register_mask_size_in_bits); + } + // Verify all written data in debug build. if (kIsDebugBuild) { CheckCodeInfo(region); @@ -536,6 +560,38 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info, } } +size_t StackMapStream::PrepareRegisterMasks() { + register_masks_.resize(stack_maps_.size(), 0u); + std::unordered_map<uint32_t, size_t> dedupe; + for (StackMapEntry& stack_map : stack_maps_) { + const size_t index = dedupe.size(); + stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second; + register_masks_[index] = stack_map.register_mask; + } + return dedupe.size(); +} + +size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) { + // Preallocate memory since we do not want it to move (the dedup map will point into it). + const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte; + stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u); + // For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later + // when copying out from stack_masks_. + std::unordered_map<MemoryRegion, + size_t, + FNVHash<MemoryRegion>, + MemoryRegion::ContentEquals> dedup(stack_maps_.size()); + for (StackMapEntry& stack_map : stack_maps_) { + size_t index = dedup.size(); + MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size); + for (size_t i = 0; i < entry_size_in_bits; i++) { + stack_mask.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i)); + } + stack_map.stack_mask_index = dedup.emplace(stack_mask, index).first->second; + } + return dedup.size(); +} + // Check that all StackMapStream inputs are correctly encoded by trying to read them back. void StackMapStream::CheckCodeInfo(MemoryRegion region) const { CodeInfo code_info(region); @@ -550,16 +606,19 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding, instruction_set_), entry.native_pc_code_offset.Uint32Value(instruction_set_)); DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc); - DCHECK_EQ(stack_map.GetRegisterMask(stack_map_encoding), entry.register_mask); - size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(encoding); + DCHECK_EQ(stack_map.GetRegisterMaskIndex(stack_map_encoding), entry.register_mask_index); + DCHECK_EQ(code_info.GetRegisterMaskOf(encoding, stack_map), entry.register_mask); + const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(encoding); + DCHECK_EQ(stack_map.GetStackMaskIndex(stack_map_encoding), entry.stack_mask_index); + BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map); if (entry.sp_mask != nullptr) { - DCHECK_GE(num_stack_mask_bits, entry.sp_mask->GetNumberOfBits()); + DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits()); for (size_t b = 0; b < num_stack_mask_bits; b++) { - DCHECK_EQ(stack_map.GetStackMaskBit(stack_map_encoding, b), entry.sp_mask->IsBitSet(b)); + DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b)); } } else { for (size_t b = 0; b < num_stack_mask_bits; b++) { - DCHECK_EQ(stack_map.GetStackMaskBit(stack_map_encoding, b), 0u); + DCHECK_EQ(stack_mask.LoadBit(b), 0u); } } diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 8fec472437..b1069a17be 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -68,6 +68,8 @@ class StackMapStream : public ValueObject { location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)), dex_register_locations_(allocator->Adapter(kArenaAllocStackMapStream)), inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)), + stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)), + register_masks_(allocator->Adapter(kArenaAllocStackMapStream)), stack_mask_max_(-1), dex_pc_max_(0), register_mask_max_(0), @@ -107,6 +109,8 @@ class StackMapStream : public ValueObject { BitVector* live_dex_registers_mask; uint32_t dex_register_map_hash; size_t same_dex_register_map_as_; + uint32_t stack_mask_index; + uint32_t register_mask_index; }; struct InlineInfoEntry { @@ -160,6 +164,12 @@ class StackMapStream : public ValueObject { CodeOffset ComputeMaxNativePcCodeOffset() const; + // Returns the number of unique stack masks. + size_t PrepareStackMasks(size_t entry_size_in_bits); + + // Returns the number of unique register masks. + size_t PrepareRegisterMasks(); + // Returns the index of an entry with the same dex register map as the current_entry, // or kNoSameDexMapFound if no such entry exists. size_t FindEntryWithTheSameDexMap(); @@ -193,6 +203,8 @@ class StackMapStream : public ValueObject { // A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`. ArenaVector<size_t> dex_register_locations_; ArenaVector<InlineInfoEntry> inline_infos_; + ArenaVector<uint8_t> stack_masks_; + ArenaVector<uint32_t> register_masks_; int stack_mask_max_; uint32_t dex_pc_max_; uint32_t register_mask_max_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index da4597e385..ce6d5c2b22 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -27,15 +27,16 @@ namespace art { // Check that the stack mask of given stack map is identical // to the given bit vector. Returns true if they are same. static bool CheckStackMask( - int number_of_bits, + const CodeInfo& code_info, + const CodeInfoEncoding& encoding, const StackMap& stack_map, - StackMapEncoding& encoding, const BitVector& bit_vector) { - if (bit_vector.GetHighestBitSet() >= number_of_bits) { + BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map); + if (bit_vector.GetNumberOfBits() > encoding.stack_mask_size_in_bits) { return false; } - for (int i = 0; i < number_of_bits; ++i) { - if (stack_map.GetStackMaskBit(encoding, i) != bit_vector.IsBitSet(i)) { + for (size_t i = 0; i < encoding.stack_mask_size_in_bits; ++i) { + if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) { return false; } } @@ -79,12 +80,9 @@ TEST(StackMapTest, Test1) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding))); ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map)); - ASSERT_TRUE(CheckStackMask(code_info.GetNumberOfStackMaskBits(encoding), - stack_map, - encoding.stack_map_encoding, - sp_mask)); + ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask)); ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); DexRegisterMap dex_register_map = @@ -197,12 +195,9 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding))); ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map)); - ASSERT_TRUE(CheckStackMask(code_info.GetNumberOfStackMaskBits(encoding), - stack_map, - encoding.stack_map_encoding, - sp_mask1)); + ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1)); ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); DexRegisterMap dex_register_map = @@ -259,12 +254,9 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding))); ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(encoding, stack_map)); - ASSERT_TRUE(CheckStackMask(code_info.GetNumberOfStackMaskBits(encoding), - stack_map, - encoding.stack_map_encoding, - sp_mask2)); + ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask2)); ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); DexRegisterMap dex_register_map = @@ -316,12 +308,9 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding))); ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(encoding, stack_map)); - ASSERT_TRUE(CheckStackMask(code_info.GetNumberOfStackMaskBits(encoding), - stack_map, - encoding.stack_map_encoding, - sp_mask3)); + ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask3)); ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); DexRegisterMap dex_register_map = @@ -373,12 +362,9 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding))); ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(encoding, stack_map)); - ASSERT_TRUE(CheckStackMask(code_info.GetNumberOfStackMaskBits(encoding), - stack_map, - encoding.stack_map_encoding, - sp_mask4)); + ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask4)); ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); DexRegisterMap dex_register_map = @@ -458,7 +444,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding))); ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map)); ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); DexRegisterMap dex_register_map = @@ -657,7 +643,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding))); ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map)); ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding)); @@ -667,7 +653,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding))); ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding)); ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA)); - ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding.stack_map_encoding)); + ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(encoding, stack_map)); ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding)); ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding)); @@ -854,4 +840,33 @@ TEST(StackMapTest, CodeOffsetTest) { EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment); } + +TEST(StackMapTest, TestDeduplicateStackMask) { + ArenaPool pool; + ArenaAllocator arena(&pool); + StackMapStream stream(&arena, kRuntimeISA); + + ArenaBitVector sp_mask(&arena, 0, true); + sp_mask.SetBit(1); + sp_mask.SetBit(4); + stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0); + stream.EndStackMapEntry(); + stream.BeginStackMapEntry(0, 8, 0x3, &sp_mask, 0, 0); + stream.EndStackMapEntry(); + + size_t size = stream.PrepareForFillIn(); + void* memory = arena.Alloc(size, kArenaAllocMisc); + MemoryRegion region(memory, size); + stream.FillIn(region); + + CodeInfo code_info(region); + CodeInfoEncoding encoding = code_info.ExtractEncoding(); + ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding)); + + StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4, encoding); + StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8, encoding); + EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map_encoding), + stack_map2.GetStackMaskIndex(encoding.stack_map_encoding)); +} + } // namespace art diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h index e7edf96722..d76cb1c1df 100644 --- a/compiler/utils/assembler_test_base.h +++ b/compiler/utils/assembler_test_base.h @@ -26,6 +26,7 @@ #include "android-base/strings.h" #include "common_runtime_test.h" // For ScratchFile +#include "exec_utils.h" #include "utils.h" namespace art { diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index d3b15ac8cf..a24d49e08d 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1057,6 +1057,25 @@ void X86Assembler::andpd(XmmRegister dst, const Address& src) { } +void X86Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + +void X86Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + void X86Assembler::fldl(const Address& src) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0xDD); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index a93616c3e5..4056ca67fb 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -472,6 +472,9 @@ class X86Assembler FINAL : public Assembler { void orpd(XmmRegister dst, XmmRegister src); void orps(XmmRegister dst, XmmRegister src); + void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm); + void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm); + void flds(const Address& src); void fstps(const Address& dst); void fsts(const Address& dst); diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index 4d60a12cb9..1768d8b715 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -468,51 +468,43 @@ TEST_F(AssemblerX86Test, MovupdAddr) { } TEST_F(AssemblerX86Test, AddPS) { - GetAssembler()->addps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "addps %xmm1, %xmm0\n"; - DriverStr(expected, "addps"); + DriverStr(RepeatFF(&x86::X86Assembler::addps, "addps %{reg2}, %{reg1}"), "addps"); } TEST_F(AssemblerX86Test, AddPD) { - GetAssembler()->addpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "addpd %xmm1, %xmm0\n"; - DriverStr(expected, "addpd"); + DriverStr(RepeatFF(&x86::X86Assembler::addpd, "addpd %{reg2}, %{reg1}"), "addpd"); } TEST_F(AssemblerX86Test, SubPS) { - GetAssembler()->subps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "subps %xmm1, %xmm0\n"; - DriverStr(expected, "subps"); + DriverStr(RepeatFF(&x86::X86Assembler::subps, "subps %{reg2}, %{reg1}"), "subps"); } TEST_F(AssemblerX86Test, SubPD) { - GetAssembler()->subpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "subpd %xmm1, %xmm0\n"; - DriverStr(expected, "subpd"); + DriverStr(RepeatFF(&x86::X86Assembler::subpd, "subpd %{reg2}, %{reg1}"), "subpd"); } TEST_F(AssemblerX86Test, MulPS) { - GetAssembler()->mulps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "mulps %xmm1, %xmm0\n"; - DriverStr(expected, "mulps"); + DriverStr(RepeatFF(&x86::X86Assembler::mulps, "mulps %{reg2}, %{reg1}"), "mulps"); } TEST_F(AssemblerX86Test, MulPD) { - GetAssembler()->mulpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "mulpd %xmm1, %xmm0\n"; - DriverStr(expected, "mulpd"); + DriverStr(RepeatFF(&x86::X86Assembler::mulpd, "mulpd %{reg2}, %{reg1}"), "mulpd"); } TEST_F(AssemblerX86Test, DivPS) { - GetAssembler()->divps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "divps %xmm1, %xmm0\n"; - DriverStr(expected, "divps"); + DriverStr(RepeatFF(&x86::X86Assembler::divps, "divps %{reg2}, %{reg1}"), "divps"); } TEST_F(AssemblerX86Test, DivPD) { - GetAssembler()->divpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1)); - const char* expected = "divpd %xmm1, %xmm0\n"; - DriverStr(expected, "divpd"); + DriverStr(RepeatFF(&x86::X86Assembler::divpd, "divpd %{reg2}, %{reg1}"), "divpd"); +} + +TEST_F(AssemblerX86Test, ShufPS) { + DriverStr(RepeatFFI(&x86::X86Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps"); +} + +TEST_F(AssemblerX86Test, ShufPD) { + DriverStr(RepeatFFI(&x86::X86Assembler::shufpd, 1, "shufpd ${imm}, %{reg2}, %{reg1}"), "shufpd"); } ///////////////// diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 2366b68f11..c2c44ab58c 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -1213,6 +1213,28 @@ void X86_64Assembler::orps(XmmRegister dst, XmmRegister src) { EmitXmmRegisterOperand(dst.LowBits(), src); } + +void X86_64Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + +void X86_64Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + void X86_64Assembler::fldl(const Address& src) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0xDD); diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 5923a41fe3..e140b45a00 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -495,6 +495,9 @@ class X86_64Assembler FINAL : public Assembler { void orpd(XmmRegister dst, XmmRegister src); void orps(XmmRegister dst, XmmRegister src); + void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm); + void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm); + void flds(const Address& src); void fstps(const Address& dst); void fsts(const Address& dst); diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index 2812c34406..efa5cc97ea 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -1203,6 +1203,14 @@ TEST_F(AssemblerX86_64Test, Orpd) { DriverStr(RepeatFF(&x86_64::X86_64Assembler::orpd, "orpd %{reg2}, %{reg1}"), "orpd"); } +TEST_F(AssemblerX86_64Test, Shufps) { + DriverStr(RepeatFFI(&x86_64::X86_64Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps"); +} + +TEST_F(AssemblerX86_64Test, Shufpd) { + DriverStr(RepeatFFI(&x86_64::X86_64Assembler::shufpd, 1, "shufpd ${imm}, %{reg2}, %{reg1}"), "shufpd"); +} + TEST_F(AssemblerX86_64Test, UcomissAddress) { GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address( x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 19f0f1c182..196d8d4220 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1282,13 +1282,10 @@ class Dex2Oat FINAL { DCHECK_EQ(input_vdex_fd_, -1); if (!input_vdex_.empty()) { std::string error_msg; - input_vdex_file_.reset(VdexFile::Open(input_vdex_, - /* writable */ false, - /* low_4gb */ false, - &error_msg)); - if (input_vdex_file_ != nullptr && !input_vdex_file_->IsValid()) { - input_vdex_file_.reset(nullptr); - } + input_vdex_file_ = VdexFile::Open(input_vdex_, + /* writable */ false, + /* low_4gb */ false, + &error_msg); } DCHECK_EQ(output_vdex_fd_, -1); @@ -1330,19 +1327,16 @@ class Dex2Oat FINAL { PLOG(WARNING) << "Failed getting length of vdex file"; } else { std::string error_msg; - input_vdex_file_.reset(VdexFile::Open(input_vdex_fd_, - s.st_size, - "vdex", - /* writable */ false, - /* low_4gb */ false, - &error_msg)); + input_vdex_file_ = VdexFile::Open(input_vdex_fd_, + s.st_size, + "vdex", + /* writable */ false, + /* low_4gb */ false, + &error_msg); // If there's any problem with the passed vdex, just warn and proceed // without it. if (input_vdex_file_ == nullptr) { - PLOG(WARNING) << "Failed opening vdex file " << error_msg; - } else if (!input_vdex_file_->IsValid()) { - PLOG(WARNING) << "Existing vdex file is invalid"; - input_vdex_file_.reset(nullptr); + PLOG(WARNING) << "Failed opening vdex file: " << error_msg; } } } diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc index 53dda6a995..640f387a80 100644 --- a/dexdump/dexdump_test.cc +++ b/dexdump/dexdump_test.cc @@ -23,6 +23,7 @@ #include "common_runtime_test.h" #include "runtime/arch/instruction_set.h" +#include "runtime/exec_utils.h" #include "runtime/os.h" #include "runtime/utils.h" #include "utils.h" diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc index 46a1c43548..da1e1d26dc 100644 --- a/dexlayout/dexlayout_test.cc +++ b/dexlayout/dexlayout_test.cc @@ -23,6 +23,7 @@ #include "base/unix_file/fd_file.h" #include "common_runtime_test.h" +#include "exec_utils.h" #include "utils.h" namespace art { diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc index 13209427c9..173a456982 100644 --- a/dexlist/dexlist_test.cc +++ b/dexlist/dexlist_test.cc @@ -23,6 +23,7 @@ #include "common_runtime_test.h" #include "runtime/arch/instruction_set.h" +#include "runtime/exec_utils.h" #include "runtime/gc/heap.h" #include "runtime/gc/space/image_space.h" #include "runtime/os.h" diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc index 3f2afc0696..0d46b2ea7a 100644 --- a/imgdiag/imgdiag_test.cc +++ b/imgdiag/imgdiag_test.cc @@ -24,6 +24,7 @@ #include "runtime/os.h" #include "runtime/arch/instruction_set.h" +#include "runtime/exec_utils.h" #include "runtime/utils.h" #include "runtime/gc/space/image_space.h" #include "runtime/gc/heap.h" diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 9b4d3e1156..0f02da77a1 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -589,16 +589,17 @@ class OatDumper { kByteKindCodeInfoInlineInfo, kByteKindCodeInfoEncoding, kByteKindCodeInfoOther, + kByteKindCodeInfoStackMasks, + kByteKindCodeInfoRegisterMasks, kByteKindStackMapNativePc, kByteKindStackMapDexPc, kByteKindStackMapDexRegisterMap, kByteKindStackMapInlineInfo, - kByteKindStackMapRegisterMask, - kByteKindStackMapMask, - kByteKindStackMapOther, + kByteKindStackMapRegisterMaskIndex, + kByteKindStackMapStackMaskIndex, kByteKindCount, kByteKindStackMapFirst = kByteKindCodeInfoOther, - kByteKindStackMapLast = kByteKindStackMapOther, + kByteKindStackMapLast = kByteKindStackMapStackMaskIndex, }; int64_t bits[kByteKindCount] = {}; // Since code has deduplication, seen tracks already seen pointers to avoid double counting @@ -626,48 +627,45 @@ class OatDumper { const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst, bits + kByteKindStackMapLast + 1, 0u); - Dump(os, "Code ", bits[kByteKindCode], sum); - Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum); - Dump(os, "CodeInfoEncoding ", bits[kByteKindCodeInfoEncoding], sum); - Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum); - Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum); - Dump(os, "CodeInfoInlineInfo ", bits[kByteKindCodeInfoInlineInfo], sum); - Dump(os, "CodeInfoStackMap ", stack_map_bits, sum); + Dump(os, "Code ", bits[kByteKindCode], sum); + Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum); + Dump(os, "CodeInfoEncoding ", bits[kByteKindCodeInfoEncoding], sum); + Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum); + Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum); + Dump(os, "CodeInfoInlineInfo ", bits[kByteKindCodeInfoInlineInfo], sum); + Dump(os, "CodeInfoStackMasks ", bits[kByteKindCodeInfoStackMasks], sum); + Dump(os, "CodeInfoRegisterMasks ", bits[kByteKindCodeInfoRegisterMasks], sum); + Dump(os, "CodeInfoStackMap ", stack_map_bits, sum); { ScopedIndentation indent1(&os); Dump(os, - "StackMapNativePc ", + "StackMapNativePc ", bits[kByteKindStackMapNativePc], stack_map_bits, "stack map"); Dump(os, - "StackMapDexPcEncoding ", + "StackMapDexPcEncoding ", bits[kByteKindStackMapDexPc], stack_map_bits, "stack map"); Dump(os, - "StackMapDexRegisterMap ", + "StackMapDexRegisterMap ", bits[kByteKindStackMapDexRegisterMap], stack_map_bits, "stack map"); Dump(os, - "StackMapInlineInfo ", + "StackMapInlineInfo ", bits[kByteKindStackMapInlineInfo], stack_map_bits, "stack map"); Dump(os, - "StackMapRegisterMaskEncoding ", - bits[kByteKindStackMapRegisterMask], + "StackMapRegisterMaskIndex ", + bits[kByteKindStackMapRegisterMaskIndex], stack_map_bits, "stack map"); Dump(os, - "StackMapMask ", - bits[kByteKindStackMapMask], - stack_map_bits, - "stack map"); - Dump(os, - "StackMapOther ", - bits[kByteKindStackMapOther], + "StackMapStackMaskIndex ", + bits[kByteKindStackMapStackMaskIndex], stack_map_bits, "stack map"); } @@ -1573,18 +1571,18 @@ class OatDumper { Stats::kByteKindStackMapInlineInfo, stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps); stats_.AddBits( - Stats::kByteKindStackMapRegisterMask, - stack_map_encoding.GetRegisterMaskEncoding().BitSize() * num_stack_maps); - const size_t stack_mask_bits = encoding.stack_map_size_in_bits - - stack_map_encoding.GetStackMaskBitOffset(); + Stats::kByteKindStackMapRegisterMaskIndex, + stack_map_encoding.GetRegisterMaskIndexEncoding().BitSize() * num_stack_maps); + stats_.AddBits( + Stats::kByteKindStackMapStackMaskIndex, + stack_map_encoding.GetStackMaskIndexEncoding().BitSize() * num_stack_maps); stats_.AddBits( - Stats::kByteKindStackMapMask, - stack_mask_bits * num_stack_maps); - const size_t stack_map_bits = - stack_map_encoding.GetStackMaskBitOffset() + stack_mask_bits; + Stats::kByteKindCodeInfoStackMasks, + helper.GetCodeInfo().GetNumberOfStackMaskBits(encoding) * + encoding.number_of_stack_masks); stats_.AddBits( - Stats::kByteKindStackMapOther, - (encoding.stack_map_size_in_bits - stack_map_bits) * num_stack_maps); + Stats::kByteKindCodeInfoRegisterMasks, + encoding.register_mask_size_in_bits * encoding.number_of_stack_masks); const size_t stack_map_bytes = helper.GetCodeInfo().GetStackMapsSize(encoding); const size_t location_catalog_bytes = helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding); @@ -2180,14 +2178,9 @@ class ImageDumper { ScopedIndentation indent2(&state->vios_); auto* resolved_types = dex_cache->GetResolvedTypes(); for (size_t i = 0; i < num_types; ++i) { - auto pair = resolved_types[i].load(std::memory_order_relaxed); + auto* elem = resolved_types[i].Read(); size_t run = 0; - for (size_t j = i + 1; j != num_types; ++j) { - auto other_pair = resolved_types[j].load(std::memory_order_relaxed); - if (pair.index != other_pair.index || - pair.object.Read() != other_pair.object.Read()) { - break; - } + for (size_t j = i + 1; j != num_types && elem == resolved_types[j].Read(); ++j) { ++run; } if (run == 0) { @@ -2197,13 +2190,12 @@ class ImageDumper { i = i + run; } std::string msg; - auto* elem = pair.object.Read(); if (elem == nullptr) { msg = "null"; } else { msg = elem->PrettyClass(); } - os << StringPrintf("%p %u %s\n", elem, pair.index, msg.c_str()); + os << StringPrintf("%p %s\n", elem, msg.c_str()); } } } diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc index ba57d1860c..503cd4d581 100644 --- a/oatdump/oatdump_test.cc +++ b/oatdump/oatdump_test.cc @@ -24,6 +24,7 @@ #include "base/unix_file/fd_file.h" #include "runtime/arch/instruction_set.h" +#include "runtime/exec_utils.h" #include "runtime/gc/heap.h" #include "runtime/gc/space/image_space.h" #include "runtime/os.h" diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 2546822613..9a73830f99 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -643,8 +643,8 @@ void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots if (orig_strings != nullptr) { orig_dex_cache->FixupStrings(RelocatedCopyOf(orig_strings), RelocatedPointerVisitor(this)); } - mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes(); - mirror::TypeDexCacheType* relocated_types = RelocatedAddressOfPointer(orig_types); + GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* relocated_types = RelocatedAddressOfPointer(orig_types); copy_dex_cache->SetField64<false>( mirror::DexCache::ResolvedTypesOffset(), static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_types))); diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc index 2f40fef42e..a6c3cf067b 100644 --- a/profman/profile_assistant_test.cc +++ b/profman/profile_assistant_test.cc @@ -18,6 +18,7 @@ #include "base/unix_file/fd_file.h" #include "common_runtime_test.h" +#include "exec_utils.h" #include "profile_assistant.h" #include "jit/profile_compilation_info.h" #include "utils.h" diff --git a/runtime/Android.bp b/runtime/Android.bp index 540df5a554..276f3043d9 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -57,6 +57,7 @@ cc_defaults { "dex_file_verifier.cc", "dex_instruction.cc", "elf_file.cc", + "exec_utils.cc", "fault_handler.cc", "gc/allocation_record.cc", "gc/allocator/dlmalloc.cc", @@ -576,6 +577,7 @@ art_cc_test { "type_lookup_table_test.cc", "utf_test.cc", "utils_test.cc", + "vdex_file_test.cc", "verifier/method_verifier_test.cc", "verifier/reg_type_test.cc", "zip_archive_test.cc", diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h index 99aea62468..7ef9a7abb5 100644 --- a/runtime/arch/instruction_set.h +++ b/runtime/arch/instruction_set.h @@ -68,8 +68,8 @@ static constexpr size_t kArmAlignment = 8; // ARM64 instruction alignment. This is the recommended alignment for maximum performance. static constexpr size_t kArm64Alignment = 16; -// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned. -// TODO: Can this be 4? +// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned, +// but 64-bit literals must be 8-byte aligned. static constexpr size_t kMipsAlignment = 8; // X86 instruction alignment. This is the recommended alignment for maximum performance. @@ -80,8 +80,8 @@ static constexpr size_t kThumb2InstructionAlignment = 2; static constexpr size_t kArm64InstructionAlignment = 4; static constexpr size_t kX86InstructionAlignment = 1; static constexpr size_t kX86_64InstructionAlignment = 1; -static constexpr size_t kMipsInstructionAlignment = 2; -static constexpr size_t kMips64InstructionAlignment = 2; +static constexpr size_t kMipsInstructionAlignment = 4; +static constexpr size_t kMips64InstructionAlignment = 4; const char* GetInstructionSetString(InstructionSet isa); diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h index 16b73c681f..80af8e7bde 100644 --- a/runtime/art_field-inl.h +++ b/runtime/art_field-inl.h @@ -311,8 +311,6 @@ inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) { template <bool kResolve> inline ObjPtr<mirror::Class> ArtField::GetType() { - // TODO: Refactor this function into two functions, ResolveType() and LookupType() - // so that we can properly annotate it with no-suspension possible / suspension possible. const uint32_t field_index = GetDexFieldIndex(); ObjPtr<mirror::Class> declaring_class = GetDeclaringClass(); if (UNLIKELY(declaring_class->IsProxyClass())) { @@ -322,16 +320,9 @@ inline ObjPtr<mirror::Class> ArtField::GetType() { const DexFile* const dex_file = dex_cache->GetDexFile(); const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index); ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(field_id.type_idx_); - if (UNLIKELY(type == nullptr)) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - if (kResolve) { - type = class_linker->ResolveType(*dex_file, field_id.type_idx_, declaring_class); - CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); - } else { - type = class_linker->LookupResolvedType( - *dex_file, field_id.type_idx_, dex_cache, declaring_class->GetClassLoader()); - DCHECK(!Thread::Current()->IsExceptionPending()); - } + if (kResolve && UNLIKELY(type == nullptr)) { + type = ResolveGetType(field_id.type_idx_); + CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); } return type; } diff --git a/runtime/art_field.cc b/runtime/art_field.cc index 7e131040be..a4a6e5a4fb 100644 --- a/runtime/art_field.cc +++ b/runtime/art_field.cc @@ -48,6 +48,10 @@ ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) { return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor); } +ObjPtr<mirror::Class> ArtField::ResolveGetType(dex::TypeIndex type_idx) { + return Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this); +} + ObjPtr<mirror::String> ArtField::ResolveGetStringName(Thread* self, const DexFile& dex_file, dex::StringIndex string_idx, diff --git a/runtime/art_field.h b/runtime/art_field.h index 75dd981136..427e103749 100644 --- a/runtime/art_field.h +++ b/runtime/art_field.h @@ -217,6 +217,8 @@ class ArtField FINAL { private: ObjPtr<mirror::Class> ProxyFindSystemClass(const char* descriptor) REQUIRES_SHARED(Locks::mutator_lock_); + ObjPtr<mirror::Class> ResolveGetType(dex::TypeIndex type_idx) + REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<mirror::String> ResolveGetStringName(Thread* self, const DexFile& dex_file, dex::StringIndex string_idx, diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index efcdbbff5a..950f1aa9f4 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -175,19 +175,12 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerS } inline mirror::Class* ArtMethod::GetClassFromTypeIndex(dex::TypeIndex type_idx, bool resolve) { - // TODO: Refactor this function into two functions, Resolve...() and Lookup...() - // so that we can properly annotate it with no-suspension possible / suspension possible. ObjPtr<mirror::DexCache> dex_cache = GetDexCache(); ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx); - if (UNLIKELY(type == nullptr)) { + if (UNLIKELY(type == nullptr) && resolve) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - if (resolve) { - type = class_linker->ResolveType(type_idx, this); - CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); - } else { - type = class_linker->LookupResolvedType( - *dex_cache->GetDexFile(), type_idx, dex_cache, GetClassLoader()); - } + type = class_linker->ResolveType(type_idx, this); + CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); } return type.Ptr(); } @@ -381,9 +374,10 @@ inline mirror::DexCache* ArtMethod::GetDexCache() { } } -template<ReadBarrierOption kReadBarrierOption> inline bool ArtMethod::IsProxyMethod() { - return GetDeclaringClass<kReadBarrierOption>()->IsProxyClass(); + // Avoid read barrier since the from-space version of the class will have the correct proxy class + // flags since they are constant for the lifetime of the class. + return GetDeclaringClass<kWithoutReadBarrier>()->IsProxyClass(); } inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(PointerSize pointer_size) { diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 61ff41742b..6cb8544617 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -446,6 +446,8 @@ static const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, PointerSize pointer_size, bool* found) REQUIRES_SHARED(Locks::mutator_lock_) { + // We shouldn't be calling this with obsolete methods. + DCHECK(!method->IsObsolete()); // Although we overwrite the trampoline of non-static methods, we may get here via the resolution // method for direct methods (or virtual methods made direct). mirror::Class* declaring_class = method->GetDeclaringClass(); diff --git a/runtime/art_method.h b/runtime/art_method.h index e4db2c7324..383630363e 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -201,6 +201,10 @@ class ArtMethod FINAL { return (GetAccessFlags() & kAccCompileDontBother) == 0; } + void SetDontCompile() { + AddAccessFlags(kAccCompileDontBother); + } + // A default conflict method is a special sentinel method that stands for a conflict between // multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one // attempts to do so. @@ -226,7 +230,7 @@ class ArtMethod FINAL { void SetIsObsolete() { // TODO We should really support redefining intrinsic if possible. DCHECK(!IsIntrinsic()); - SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod); + AddAccessFlags(kAccObsoleteMethod); } template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> @@ -251,7 +255,6 @@ class ArtMethod FINAL { return (GetAccessFlags() & kAccVarargs) != 0; } - template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_); bool SkipAccessChecks() { diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index 61e0aabbaf..9fdb0cc9d0 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -84,6 +84,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = { "Verifier ", "CallingConv ", "CHA ", + "Scheduler ", }; template <bool kCount> @@ -144,8 +145,11 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first, } } +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winstantiation-after-specialization" // Explicitly instantiate the used implementation. template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>; +#pragma GCC diagnostic pop void ArenaAllocatorMemoryTool::DoMakeDefined(void* ptr, size_t size) { MEMORY_TOOL_MAKE_DEFINED(ptr, size); diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 6c764cb715..245ab3b24f 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -96,6 +96,7 @@ enum ArenaAllocKind { kArenaAllocVerifier, kArenaAllocCallingConvention, kArenaAllocCHA, + kArenaAllocScheduler, kNumArenaAllocKinds }; diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h index 9d45707596..3f6f5d6221 100644 --- a/runtime/base/iteration_range.h +++ b/runtime/base/iteration_range.h @@ -55,7 +55,7 @@ inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) { } template <typename Container> -inline auto ReverseRange(Container& c) { +inline auto ReverseRange(Container&& c) { typedef typename std::reverse_iterator<decltype(c.begin())> riter; return MakeIterationRange(riter(c.end()), riter(c.begin())); } diff --git a/runtime/bit_memory_region.h b/runtime/bit_memory_region.h index 90a198193e..c3b5be458e 100644 --- a/runtime/bit_memory_region.h +++ b/runtime/bit_memory_region.h @@ -26,7 +26,7 @@ namespace art { class BitMemoryRegion FINAL : public ValueObject { public: BitMemoryRegion() = default; - BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_size) { + ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_size) { bit_start_ = bit_offset % kBitsPerByte; const size_t start = bit_offset / kBitsPerByte; const size_t end = (bit_offset + bit_size + kBitsPerByte - 1) / kBitsPerByte; diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index 93fdaa6161..a955cb5acb 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -67,7 +67,8 @@ class CheckReferenceMapVisitor : public StackVisitor { uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_; DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers); - uint32_t register_mask = stack_map.GetRegisterMask(encoding.stack_map_encoding); + uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map); + BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map); for (int i = 0; i < number_of_references; ++i) { int reg = registers[i]; CHECK(reg < m->GetCodeItem()->registers_size_); @@ -80,8 +81,7 @@ class CheckReferenceMapVisitor : public StackVisitor { break; case DexRegisterLocation::Kind::kInStack: DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0); - CHECK(stack_map.GetStackMaskBit(encoding.stack_map_encoding, - location.GetValue() / kFrameSlotSize)); + CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize)); break; case DexRegisterLocation::Kind::kInRegister: case DexRegisterLocation::Kind::kInRegisterHigh: diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index e928344fb6..3438810069 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -78,18 +78,6 @@ inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx, return string.Ptr(); } -inline ObjPtr<mirror::Class> ClassLinker::LookupResolvedType( - dex::TypeIndex type_idx, - ObjPtr<mirror::DexCache> dex_cache, - ObjPtr<mirror::ClassLoader> class_loader) { - ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx); - if (type == nullptr) { - type = Runtime::Current()->GetClassLinker()->LookupResolvedType( - *dex_cache->GetDexFile(), type_idx, dex_cache, class_loader); - } - return type; -} - inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer) { Thread::PoisonObjectPointersIfDebug(); if (kIsDebugBuild) { @@ -103,6 +91,25 @@ inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMetho Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader())); const DexFile& dex_file = *dex_cache->GetDexFile(); resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); + // Note: We cannot check here to see whether we added the type to the cache. The type + // might be an erroneous class, which results in it being hidden from us. + } + return resolved_type.Ptr(); +} + +inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtField* referrer) { + Thread::PoisonObjectPointersIfDebug(); + ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass(); + ObjPtr<mirror::DexCache> dex_cache_ptr = declaring_class->GetDexCache(); + ObjPtr<mirror::Class> resolved_type = dex_cache_ptr->GetResolvedType(type_idx); + if (UNLIKELY(resolved_type == nullptr)) { + StackHandleScope<2> hs(Thread::Current()); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr)); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader())); + const DexFile& dex_file = *dex_cache->GetDexFile(); + resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader); + // Note: We cannot check here to see whether we added the type to the cache. The type + // might be an erroneous class, which results in it being hidden from us. } return resolved_type.Ptr(); } @@ -226,7 +233,7 @@ template<ReadBarrierOption kReadBarrierOption> ArtMethod* ClassLinker::FindMethodForProxy(ObjPtr<mirror::Class> proxy_class, ArtMethod* proxy_method) { DCHECK(proxy_class->IsProxyClass()); - DCHECK(proxy_method->IsProxyMethod<kReadBarrierOption>()); + DCHECK(proxy_method->IsProxyMethod()); { Thread* const self = Thread::Current(); ReaderMutexLock mu(self, *Locks::dex_lock_); diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 866936739a..edd6e3b522 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -1171,23 +1171,6 @@ static void CopyNonNull(const T* src, size_t count, T* dst, const NullPred& pred } } -template <typename T> -static void CopyDexCachePairs(const std::atomic<mirror::DexCachePair<T>>* src, - size_t count, - std::atomic<mirror::DexCachePair<T>>* dst) { - DCHECK_NE(count, 0u); - DCHECK(!src[0].load(std::memory_order_relaxed).object.IsNull() || - src[0].load(std::memory_order_relaxed).index != 0u); - for (size_t i = 0; i < count; ++i) { - DCHECK_EQ(dst[i].load(std::memory_order_relaxed).index, 0u); - DCHECK(dst[i].load(std::memory_order_relaxed).object.IsNull()); - mirror::DexCachePair<T> source = src[i].load(std::memory_order_relaxed); - if (source.index != 0u || !source.object.IsNull()) { - dst[i].store(source, std::memory_order_relaxed); - } - } -} - bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( gc::space::ImageSpace* space, Handle<mirror::ClassLoader> class_loader, @@ -1241,10 +1224,7 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( if (dex_file->NumStringIds() < num_strings) { num_strings = dex_file->NumStringIds(); } - size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize; - if (dex_file->NumTypeIds() < num_types) { - num_types = dex_file->NumTypeIds(); - } + const size_t num_types = dex_file->NumTypeIds(); const size_t num_methods = dex_file->NumMethodIds(); const size_t num_fields = dex_file->NumFieldIds(); size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize; @@ -1263,14 +1243,28 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( mirror::StringDexCacheType* const image_resolved_strings = dex_cache->GetStrings(); mirror::StringDexCacheType* const strings = reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset()); - CopyDexCachePairs(image_resolved_strings, num_strings, strings); + for (size_t j = 0; j < num_strings; ++j) { + DCHECK_EQ(strings[j].load(std::memory_order_relaxed).index, 0u); + DCHECK(strings[j].load(std::memory_order_relaxed).object.IsNull()); + strings[j].store(image_resolved_strings[j].load(std::memory_order_relaxed), + std::memory_order_relaxed); + } + mirror::StringDexCachePair::Initialize(strings); dex_cache->SetStrings(strings); } if (num_types != 0u) { - mirror::TypeDexCacheType* const image_resolved_types = dex_cache->GetResolvedTypes(); - mirror::TypeDexCacheType* const types = - reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset()); - CopyDexCachePairs(image_resolved_types, num_types, types); + GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* const types = + reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset()); + for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) { + DCHECK(types[j].IsNull()); + } + CopyNonNull(image_resolved_types, + num_types, + types, + [](const GcRoot<mirror::Class>& elem) { + return elem.IsNull(); + }); dex_cache->SetResolvedTypes(types); } if (num_methods != 0u) { @@ -1311,7 +1305,15 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( mirror::MethodTypeDexCacheType* const method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>( raw_arrays + layout.MethodTypesOffset()); - CopyDexCachePairs(image_resolved_method_types, num_method_types, method_types); + for (size_t j = 0; j < num_method_types; ++j) { + DCHECK_EQ(method_types[j].load(std::memory_order_relaxed).index, 0u); + DCHECK(method_types[j].load(std::memory_order_relaxed).object.IsNull()); + method_types[j].store( + image_resolved_method_types[j].load(std::memory_order_relaxed), + std::memory_order_relaxed); + } + + mirror::MethodTypeDexCachePair::Initialize(method_types); dex_cache->SetResolvedMethodTypes(method_types); } } @@ -1333,11 +1335,11 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( } if (kIsDebugBuild) { CHECK(new_class_set != nullptr); - mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes(); const size_t num_types = dex_cache->NumResolvedTypes(); - for (size_t j = 0; j != num_types; ++j) { + for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) { // The image space is not yet added to the heap, avoid read barriers. - ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read(); + ObjPtr<mirror::Class> klass = types[j].Read(); if (space->HasAddress(klass.Ptr())) { DCHECK(!klass->IsErroneous()) << klass->GetStatus(); auto it = new_class_set->Find(ClassTable::TableSlot(klass)); @@ -1698,9 +1700,9 @@ bool ClassLinker::AddImageSpace( // The current dex file field is bogus, overwrite it so that we can get the dex file in the // loop below. h_dex_cache->SetDexFile(dex_file.get()); - mirror::TypeDexCacheType* const types = h_dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* const types = h_dex_cache->GetResolvedTypes(); for (int32_t j = 0, num_types = h_dex_cache->NumResolvedTypes(); j < num_types; j++) { - ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read(); + ObjPtr<mirror::Class> klass = types[j].Read(); if (klass != nullptr) { DCHECK(!klass->IsErroneous()) << klass->GetStatus(); } @@ -7696,9 +7698,7 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t utf16_length; const char* utf8_data = dex_file.StringDataAndUtf16LengthByIdx(string_idx, &utf16_length); ObjPtr<mirror::String> string = intern_table_->InternStrong(utf16_length, utf8_data); - if (string != nullptr) { - dex_cache->SetResolvedString(string_idx, string); - } + dex_cache->SetResolvedString(string_idx, string); return string.Ptr(); } @@ -7741,7 +7741,6 @@ ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(const DexFile& dex_file, } } if (type != nullptr && type->IsResolved()) { - dex_cache->SetResolvedType(type_idx, type); return type.Ptr(); } return nullptr; @@ -7764,12 +7763,6 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, Thread::PoisonObjectPointersIfDebug(); ObjPtr<mirror::Class> resolved = dex_cache->GetResolvedType(type_idx); if (resolved == nullptr) { - // TODO: Avoid this lookup as it duplicates work done in FindClass(). It is here - // as a workaround for FastNative JNI to avoid AssertNoPendingException() when - // trying to resolve annotations while an exception may be pending. Bug: 34659969 - resolved = LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()); - } - if (resolved == nullptr) { Thread* self = Thread::Current(); const char* descriptor = dex_file.StringByTypeIdx(type_idx); resolved = FindClass(self, descriptor, class_loader); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 21edd513ac..5042fb7609 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -262,6 +262,10 @@ class ClassLinker { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_); + mirror::Class* ResolveType(dex::TypeIndex type_idx, ArtField* referrer) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_); + // Look up a resolved type with the given ID from the DexFile. The ClassLoader is used to search // for the type, since it may be referenced from but not contained within the given DexFile. ObjPtr<mirror::Class> LookupResolvedType(const DexFile& dex_file, @@ -269,10 +273,6 @@ class ClassLinker { ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_); - static ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_idx, - ObjPtr<mirror::DexCache> dex_cache, - ObjPtr<mirror::ClassLoader> class_loader) - REQUIRES_SHARED(Locks::mutator_lock_); // Resolve a type with the given ID from the DexFile, storing the // result in DexCache. The ClassLoader is used to search for the diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 6eee0bd617..17510bb598 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -914,7 +914,7 @@ TEST_F(ClassLinkerTest, LookupResolvedType) { class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()), klass); // Zero out the resolved type and make sure LookupResolvedType still finds it. - dex_cache->ClearResolvedType(type_idx); + dex_cache->SetResolvedType(type_idx, nullptr); EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr); EXPECT_OBJ_PTR_EQ( class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()), @@ -949,7 +949,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeArray) { class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()), array_klass); // Zero out the resolved type and make sure LookupResolvedType() still finds it. - dex_cache->ClearResolvedType(array_idx); + dex_cache->SetResolvedType(array_idx, nullptr); EXPECT_TRUE(dex_cache->GetResolvedType(array_idx) == nullptr); EXPECT_OBJ_PTR_EQ( class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()), @@ -972,7 +972,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) { class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()), klass.Get()); // Zero out the resolved type and make sure LookupResolvedType still finds it. - dex_cache->ClearResolvedType(type_idx); + dex_cache->SetResolvedType(type_idx, nullptr); EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr); EXPECT_OBJ_PTR_EQ( class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()), @@ -990,7 +990,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) { class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()), klass.Get()); // Zero out the resolved type and make sure LookupResolvedType() still finds it. - dex_cache->ClearResolvedType(type_idx); + dex_cache->SetResolvedType(type_idx, nullptr); EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr); EXPECT_OBJ_PTR_EQ( class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()), diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h index 7ae9f03c83..8b0c51c998 100644 --- a/runtime/dex2oat_environment_test.h +++ b/runtime/dex2oat_environment_test.h @@ -25,6 +25,7 @@ #include "common_runtime_test.h" #include "compiler_callbacks.h" +#include "exec_utils.h" #include "gc/heap.h" #include "gc/space/image_space.h" #include "oat_file_assistant.h" diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc index 0fec856865..9dca4c0621 100644 --- a/runtime/dex_file_test.cc +++ b/runtime/dex_file_test.cc @@ -338,13 +338,16 @@ TEST_F(DexFileTest, ClassDefs) { ScopedObjectAccess soa(Thread::Current()); std::unique_ptr<const DexFile> raw(OpenTestDexFile("Nested")); ASSERT_TRUE(raw.get() != nullptr); - EXPECT_EQ(2U, raw->NumClassDefs()); + EXPECT_EQ(3U, raw->NumClassDefs()); const DexFile::ClassDef& c0 = raw->GetClassDef(0); - EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c0)); + EXPECT_STREQ("LNested$1;", raw->GetClassDescriptor(c0)); const DexFile::ClassDef& c1 = raw->GetClassDef(1); - EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c1)); + EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c1)); + + const DexFile::ClassDef& c2 = raw->GetClassDef(2); + EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c2)); } TEST_F(DexFileTest, GetMethodSignature) { diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 1b267eb991..28aca6c905 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -76,6 +76,10 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, // Lookup the declaring class of the inlined method. const DexFile* dex_file = caller->GetDexFile(); const DexFile::MethodId& method_id = dex_file->GetMethodId(method_index); + ArtMethod* inlined_method = caller->GetDexCacheResolvedMethod(method_index, kRuntimePointerSize); + if (inlined_method != nullptr && !inlined_method->IsRuntimeMethod()) { + return inlined_method; + } const char* descriptor = dex_file->StringByTypeIdx(method_id.class_idx_); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Thread* self = Thread::Current(); @@ -92,8 +96,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, const char* method_name = dex_file->GetMethodName(method_id); const Signature signature = dex_file->GetMethodSignature(method_id); - ArtMethod* inlined_method = - klass->FindDeclaredDirectMethod(method_name, signature, kRuntimePointerSize); + inlined_method = klass->FindDeclaredDirectMethod(method_name, signature, kRuntimePointerSize); if (inlined_method == nullptr) { inlined_method = klass->FindDeclaredVirtualMethod(method_name, signature, kRuntimePointerSize); if (inlined_method == nullptr) { @@ -103,6 +106,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, << "This must be due to duplicate classes or playing wrongly with class loaders"; } } + caller->SetDexCacheResolvedMethod(method_index, inlined_method, kRuntimePointerSize); return inlined_method; } @@ -705,10 +709,10 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx, return resolved_method; } else if (type == kSuper) { // TODO This lookup is rather slow. - ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache(); - dex::TypeIndex method_type_idx = dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_; - ObjPtr<mirror::Class> method_reference_class = ClassLinker::LookupResolvedType( - method_type_idx, dex_cache, referrer->GetClassLoader()); + dex::TypeIndex method_type_idx = + referrer->GetDexFile()->GetMethodId(method_idx).class_idx_; + mirror::Class* method_reference_class = + referrer->GetDexCache()->GetResolvedType(method_type_idx); if (method_reference_class == nullptr) { // Need to do full type resolution... return nullptr; diff --git a/runtime/exec_utils.cc b/runtime/exec_utils.cc new file mode 100644 index 0000000000..9efb1a353c --- /dev/null +++ b/runtime/exec_utils.cc @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "exec_utils.h" + +#include <sys/types.h> +#include <sys/wait.h> +#include <string> +#include <vector> + +#include "android-base/stringprintf.h" +#include "android-base/strings.h" + +#include "runtime.h" + +namespace art { + +using android::base::StringAppendF; +using android::base::StringPrintf; + +int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) { + const std::string command_line(android::base::Join(arg_vector, ' ')); + CHECK_GE(arg_vector.size(), 1U) << command_line; + + // Convert the args to char pointers. + const char* program = arg_vector[0].c_str(); + std::vector<char*> args; + for (size_t i = 0; i < arg_vector.size(); ++i) { + const std::string& arg = arg_vector[i]; + char* arg_str = const_cast<char*>(arg.c_str()); + CHECK(arg_str != nullptr) << i; + args.push_back(arg_str); + } + args.push_back(nullptr); + + // fork and exec + pid_t pid = fork(); + if (pid == 0) { + // no allocation allowed between fork and exec + + // change process groups, so we don't get reaped by ProcessManager + setpgid(0, 0); + + // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc. + // Use the snapshot of the environment from the time the runtime was created. + char** envp = (Runtime::Current() == nullptr) ? nullptr : Runtime::Current()->GetEnvSnapshot(); + if (envp == nullptr) { + execv(program, &args[0]); + } else { + execve(program, &args[0], envp); + } + PLOG(ERROR) << "Failed to execve(" << command_line << ")"; + // _exit to avoid atexit handlers in child. + _exit(1); + } else { + if (pid == -1) { + *error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s", + command_line.c_str(), strerror(errno)); + return -1; + } + + // wait for subprocess to finish + int status = -1; + pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0)); + if (got_pid != pid) { + *error_msg = StringPrintf("Failed after fork for execv(%s) because waitpid failed: " + "wanted %d, got %d: %s", + command_line.c_str(), pid, got_pid, strerror(errno)); + return -1; + } + if (WIFEXITED(status)) { + return WEXITSTATUS(status); + } + return -1; + } +} + +bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) { + int status = ExecAndReturnCode(arg_vector, error_msg); + if (status != 0) { + const std::string command_line(android::base::Join(arg_vector, ' ')); + *error_msg = StringPrintf("Failed execv(%s) because non-0 exit status", + command_line.c_str()); + return false; + } + return true; +} + +} // namespace art diff --git a/runtime/exec_utils.h b/runtime/exec_utils.h new file mode 100644 index 0000000000..093f7b8d80 --- /dev/null +++ b/runtime/exec_utils.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_EXEC_UTILS_H_ +#define ART_RUNTIME_EXEC_UTILS_H_ + +#include <string> +#include <vector> + +namespace art { + +// Wrapper on fork/execv to run a command in a subprocess. +// Both of these spawn child processes using the environment as it was set when the single instance +// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it +// will use the current environment settings. +bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg); +int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg); + +} // namespace art + +#endif // ART_RUNTIME_EXEC_UTILS_H_ diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 6044053b4f..0819ba04f7 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -25,6 +25,7 @@ #include "gc/accounting/heap_bitmap-inl.h" #include "gc/accounting/mod_union_table-inl.h" #include "gc/accounting/space_bitmap-inl.h" +#include "gc/gc_pause_listener.h" #include "gc/reference_processor.h" #include "gc/space/image_space.h" #include "gc/space/space-inl.h" @@ -139,7 +140,7 @@ void ConcurrentCopying::RunPhases() { // Verify no from space refs. This causes a pause. if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); - ScopedPause pause(this); + ScopedPause pause(this, false); CheckEmptyMarkStack(); if (kVerboseMode) { LOG(INFO) << "Verifying no from-space refs"; @@ -439,8 +440,27 @@ void ConcurrentCopying::FlipThreadRoots() { gc_barrier_->Init(self, 0); ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); FlipCallback flip_callback(this); + + // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if + // necessary. This is slightly over-reporting, as this includes the time to actually suspend + // threads. + { + GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener(); + if (pause_listener != nullptr) { + pause_listener->StartPause(); + } + } + size_t barrier_count = Runtime::Current()->FlipThreadRoots( &thread_flip_visitor, &flip_callback, this); + + { + GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener(); + if (pause_listener != nullptr) { + pause_listener->EndPause(); + } + } + { ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); gc_barrier_->Increment(self, barrier_count); @@ -857,7 +877,10 @@ void ConcurrentCopying::IssueEmptyCheckpoint() { thread->ReadFlag(kEmptyCheckpointRequest)) { // Found a runnable thread that hasn't responded to the empty checkpoint request. // Assume it's stuck and safe to dump its stack. - thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); + thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT), + /*dump_native_stack*/ true, + /*backtrace_map*/ nullptr, + /*force_dump_stack*/ true); } } } diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc index 01bcb7df19..14fd332b57 100644 --- a/runtime/gc/collector/garbage_collector.cc +++ b/runtime/gc/collector/garbage_collector.cc @@ -158,22 +158,26 @@ void GarbageCollector::ResetMeasurements() { total_freed_bytes_ = 0; } -GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector) - : start_time_(NanoTime()), collector_(collector) { +GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting) + : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) { Runtime* runtime = Runtime::Current(); runtime->GetThreadList()->SuspendAll(__FUNCTION__); - GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener(); - if (pause_listener != nullptr) { - pause_listener->StartPause(); + if (with_reporting) { + GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener(); + if (pause_listener != nullptr) { + pause_listener->StartPause(); + } } } GarbageCollector::ScopedPause::~ScopedPause() { collector_->RegisterPause(NanoTime() - start_time_); Runtime* runtime = Runtime::Current(); - GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener(); - if (pause_listener != nullptr) { - pause_listener->EndPause(); + if (with_reporting_) { + GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener(); + if (pause_listener != nullptr) { + pause_listener->EndPause(); + } } runtime->GetThreadList()->ResumeAll(); } diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 0177e2a1ad..95601d736d 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -126,12 +126,14 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark public: class SCOPED_LOCKABLE ScopedPause { public: - explicit ScopedPause(GarbageCollector* collector) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_); + explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true) + EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_); ~ScopedPause() UNLOCK_FUNCTION(); private: const uint64_t start_time_; GarbageCollector* const collector_; + bool with_reporting_; }; GarbageCollector(Heap* heap, const std::string& name); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 70449797c1..aa15714595 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -293,8 +293,13 @@ Heap::Heap(size_t initial_size, if (foreground_collector_type_ == kCollectorTypeCC) { // Need to use a low address so that we can allocate a contiguous // 2 * Xmx space when there's no image (dex2oat for target). +#if defined(__LP64__) CHECK_GE(300 * MB, non_moving_space_capacity); requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity; +#else + // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000. + requested_alloc_space_begin = reinterpret_cast<uint8_t*>(0x20000000); +#endif } // Load image space(s). @@ -369,7 +374,12 @@ Heap::Heap(size_t initial_size, &error_str)); CHECK(non_moving_space_mem_map != nullptr) << error_str; // Try to reserve virtual memory at a lower address if we have a separate non moving space. +#if defined(__LP64__) request_begin = reinterpret_cast<uint8_t*>(300 * MB); +#else + // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000. + request_begin = reinterpret_cast<uint8_t*>(0x20000000) + non_moving_space_capacity; +#endif } // Attempt to create 2 mem maps at or after the requested begin. if (foreground_collector_type_ != kCollectorTypeCC) { @@ -3352,7 +3362,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { void Heap::PreGcVerification(collector::GarbageCollector* gc) { if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) { - collector::GarbageCollector::ScopedPause pause(gc); + collector::GarbageCollector::ScopedPause pause(gc, false); PreGcVerificationPaused(gc); } } @@ -3420,7 +3430,7 @@ void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) { void Heap::PostGcVerification(collector::GarbageCollector* gc) { if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) { - collector::GarbageCollector::ScopedPause pause(gc); + collector::GarbageCollector::ScopedPause pause(gc, false); PostGcVerificationPaused(gc); } } diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index e56f0dc613..ffbca525d9 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -32,6 +32,7 @@ #include "base/scoped_flock.h" #include "base/systrace.h" #include "base/time_utils.h" +#include "exec_utils.h" #include "gc/accounting/space_bitmap-inl.h" #include "image-inl.h" #include "image_space_fs.h" @@ -1225,9 +1226,9 @@ class ImageSpaceLoader { } dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter); } - mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes(); if (types != nullptr) { - mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types); + GcRoot<mirror::Class>* new_types = fixup_adapter.ForwardObject(types); if (types != new_types) { dex_cache->SetResolvedTypes(new_types); } diff --git a/runtime/image.cc b/runtime/image.cc index 87f429568d..54b099eb14 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -25,7 +25,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '7', '\0' }; // hash-based DexCache types +const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '6', '\0' }; // Erroneous resolved class. ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index c235317020..28bcb97105 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -438,22 +438,14 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) { // about ALWAYS_INLINE (-Werror, -Wgcc-compat) in definitions. // -// b/30419309 -#if defined(__i386__) -#define IF_X86_OPTNONE_ELSE_ALWAYS_INLINE __attribute__((optnone)) -#else -#define IF_X86_OPTNONE_ELSE_ALWAYS_INLINE ALWAYS_INLINE -#endif - template <bool is_range, bool do_assignability_check> -IF_X86_OPTNONE_ELSE_ALWAYS_INLINE -static bool DoCallCommon(ArtMethod* called_method, - Thread* self, - ShadowFrame& shadow_frame, - JValue* result, - uint16_t number_of_inputs, - uint32_t (&arg)[Instruction::kMaxVarArgRegs], - uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_); +static ALWAYS_INLINE bool DoCallCommon(ArtMethod* called_method, + Thread* self, + ShadowFrame& shadow_frame, + JValue* result, + uint16_t number_of_inputs, + uint32_t (&arg)[Instruction::kMaxVarArgRegs], + uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_); template <bool is_range> ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame, diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S index 07b12100fd..c2bca91ebf 100644 --- a/runtime/interpreter/mterp/mips64/bincmp.S +++ b/runtime/interpreter/mterp/mips64/bincmp.S @@ -6,7 +6,6 @@ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S index 27ce580642..44e77a41d8 100644 --- a/runtime/interpreter/mterp/mips64/op_packed_switch.S +++ b/runtime/interpreter/mterp/mips64/op_packed_switch.S @@ -10,7 +10,6 @@ */ /* op vAA, +BBBBBBBB */ .extern $func - .extern MterpProfileBranch lh a0, 2(rPC) # a0 <- bbbb (lo) lh a1, 4(rPC) # a1 <- BBBB (hi) srl a3, rINST, 8 # a3 <- AA diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index 369c2614a7..75ab91acba 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -768,38 +768,32 @@ extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method, return MterpSetUpHotnessCountdown(method, shadow_frame); } -// TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates. -extern "C" size_t MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset) - REQUIRES_SHARED(Locks::mutator_lock_) { - ArtMethod* method = shadow_frame->GetMethod(); - JValue* result = shadow_frame->GetResultRegister(); - uint32_t dex_pc = shadow_frame->GetDexPC(); - jit::Jit* jit = Runtime::Current()->GetJit(); - if ((jit != nullptr) && (offset <= 0)) { - jit->AddSamples(self, method, 1, /*with_backedges*/ true); - } - int16_t countdown_value = MterpSetUpHotnessCountdown(method, shadow_frame); - if (countdown_value == jit::kJitCheckForOSR) { - return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result); - } else { - return false; - } -} - extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self, ShadowFrame* shadow_frame, int32_t offset) REQUIRES_SHARED(Locks::mutator_lock_) { - ArtMethod* method = shadow_frame->GetMethod(); - JValue* result = shadow_frame->GetResultRegister(); - uint32_t dex_pc = shadow_frame->GetDexPC(); - jit::Jit* jit = Runtime::Current()->GetJit(); - if (offset <= 0) { - // Keep updating hotness in case a compilation request was dropped. Eventually it will retry. - jit->AddSamples(self, method, 1, /*with_backedges*/ true); + int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1; + bool did_osr = false; + /* + * To reduce the cost of polling the compiler to determine whether the requested OSR + * compilation has completed, only check every Nth time. NOTE: the "osr_countdown <= 0" + * condition is satisfied either by the decrement below or the initial setting of + * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1. + */ + if (osr_countdown <= 0) { + ArtMethod* method = shadow_frame->GetMethod(); + JValue* result = shadow_frame->GetResultRegister(); + uint32_t dex_pc = shadow_frame->GetDexPC(); + jit::Jit* jit = Runtime::Current()->GetJit(); + osr_countdown = jit::Jit::kJitRecheckOSRThreshold; + if (offset <= 0) { + // Keep updating hotness in case a compilation request was dropped. Eventually it will retry. + jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true); + } + did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result); } - // Assumes caller has already determined that an OSR check is appropriate. - return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result); + shadow_frame->SetCachedHotnessCountdown(osr_countdown); + return did_osr; } } // namespace interpreter diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S index bf096664df..013bb32e8f 100644 --- a/runtime/interpreter/mterp/out/mterp_mips64.S +++ b/runtime/interpreter/mterp/out/mterp_mips64.S @@ -1174,7 +1174,6 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, +BBBBBBBB */ .extern MterpDoPackedSwitch - .extern MterpProfileBranch lh a0, 2(rPC) # a0 <- bbbb (lo) lh a1, 4(rPC) # a1 <- BBBB (hi) srl a3, rINST, 8 # a3 <- AA @@ -1201,7 +1200,6 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, +BBBBBBBB */ .extern MterpDoSparseSwitch - .extern MterpProfileBranch lh a0, 2(rPC) # a0 <- bbbb (lo) lh a1, 4(rPC) # a1 <- BBBB (hi) srl a3, rINST, 8 # a3 <- AA @@ -1396,7 +1394,6 @@ artMterpAsmInstructionStart = .L_op_nop * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) @@ -1423,7 +1420,6 @@ artMterpAsmInstructionStart = .L_op_nop * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) @@ -1450,7 +1446,6 @@ artMterpAsmInstructionStart = .L_op_nop * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) @@ -1477,7 +1472,6 @@ artMterpAsmInstructionStart = .L_op_nop * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) @@ -1504,7 +1498,6 @@ artMterpAsmInstructionStart = .L_op_nop * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) @@ -1531,7 +1524,6 @@ artMterpAsmInstructionStart = .L_op_nop * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le */ /* if-cmp vA, vB, +CCCC */ - .extern MterpProfileBranch ext a2, rINST, 8, 4 # a2 <- A ext a3, rINST, 12, 4 # a3 <- B lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index feb6e0857a..371e2f1e65 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -401,6 +401,25 @@ void UnstartedRuntime::UnstartedClassGetDeclaredConstructor( result->SetL(constructor); } +void UnstartedRuntime::UnstartedClassGetDeclaringClass( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + StackHandleScope<1> hs(self); + Handle<mirror::Class> klass(hs.NewHandle( + reinterpret_cast<mirror::Class*>(shadow_frame->GetVRegReference(arg_offset)))); + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { + result->SetL(nullptr); + return; + } + // Return null for anonymous classes. + JValue is_anon_result; + UnstartedClassIsAnonymousClass(self, shadow_frame, &is_anon_result, arg_offset); + if (is_anon_result.GetZ() != 0) { + result->SetL(nullptr); + return; + } + result->SetL(annotations::GetDeclaringClass(klass)); +} + void UnstartedRuntime::UnstartedClassGetEnclosingClass( Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { StackHandleScope<1> hs(self); @@ -420,6 +439,23 @@ void UnstartedRuntime::UnstartedClassGetInnerClassFlags( result->SetI(mirror::Class::GetInnerClassFlags(klass, default_value)); } +void UnstartedRuntime::UnstartedClassIsAnonymousClass( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + StackHandleScope<1> hs(self); + Handle<mirror::Class> klass(hs.NewHandle( + reinterpret_cast<mirror::Class*>(shadow_frame->GetVRegReference(arg_offset)))); + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { + result->SetZ(false); + return; + } + mirror::String* class_name = nullptr; + if (!annotations::GetInnerClass(klass, &class_name)) { + result->SetZ(false); + return; + } + result->SetZ(class_name == nullptr); +} + static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file, const char* entry_name, size_t* size, diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h index b8553b5771..96b35e4e9c 100644 --- a/runtime/interpreter/unstarted_runtime_list.h +++ b/runtime/interpreter/unstarted_runtime_list.h @@ -28,8 +28,10 @@ V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \ V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \ V(ClassGetDeclaredConstructor, "java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructorInternal(java.lang.Class[])") \ + V(ClassGetDeclaringClass, "java.lang.Class java.lang.Class.getDeclaringClass()") \ V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \ V(ClassGetInnerClassFlags, "int java.lang.Class.getInnerClassFlags(int)") \ + V(ClassIsAnonymousClass, "boolean java.lang.Class.isAnonymousClass()") \ V(ClassLoaderGetResourceAsStream, "java.io.InputStream java.lang.ClassLoader.getResourceAsStream(java.lang.String)") \ V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \ V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \ diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index b190c81aff..ae55f4c2ef 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -885,5 +885,64 @@ TEST_F(UnstartedRuntimeTest, Pow) { ShadowFrame::DeleteDeoptimizedFrame(tmp); } +TEST_F(UnstartedRuntimeTest, IsAnonymousClass) { + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + + JValue result; + ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0); + + mirror::Class* class_klass = mirror::Class::GetJavaLangClass(); + shadow_frame->SetVRegReference(0, class_klass); + UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0); + EXPECT_EQ(result.GetZ(), 0); + + jobject class_loader = LoadDex("Nested"); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader))); + mirror::Class* c = class_linker_->FindClass(soa.Self(), "LNested$1;", loader); + ASSERT_TRUE(c != nullptr); + shadow_frame->SetVRegReference(0, c); + UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0); + EXPECT_EQ(result.GetZ(), 1); + + ShadowFrame::DeleteDeoptimizedFrame(shadow_frame); +} + +TEST_F(UnstartedRuntimeTest, GetDeclaringClass) { + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + + JValue result; + ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0); + + jobject class_loader = LoadDex("Nested"); + StackHandleScope<4> hs(self); + Handle<mirror::ClassLoader> loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader))); + + Handle<mirror::Class> nested_klass(hs.NewHandle( + class_linker_->FindClass(soa.Self(), "LNested;", loader))); + Handle<mirror::Class> inner_klass(hs.NewHandle( + class_linker_->FindClass(soa.Self(), "LNested$Inner;", loader))); + Handle<mirror::Class> anon_klass(hs.NewHandle( + class_linker_->FindClass(soa.Self(), "LNested$1;", loader))); + + shadow_frame->SetVRegReference(0, nested_klass.Get()); + UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0); + EXPECT_EQ(result.GetL(), nullptr); + + shadow_frame->SetVRegReference(0, inner_klass.Get()); + UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0); + EXPECT_EQ(result.GetL(), nested_klass.Get()); + + shadow_frame->SetVRegReference(0, anon_klass.Get()); + UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0); + EXPECT_EQ(result.GetL(), nullptr); + + ShadowFrame::DeleteDeoptimizedFrame(shadow_frame); +} + } // namespace interpreter } // namespace art diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 4112142a4f..d566799340 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -54,6 +54,8 @@ class Jit { static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000; static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000; static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500; + // How frequently should the interpreter check to see if OSR compilation is ready. + static constexpr int16_t kJitRecheckOSRThreshold = 100; virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 45611a93f7..f5151b588a 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -1141,8 +1141,17 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* return nullptr; } if (kIsDebugBuild && method != nullptr) { - DCHECK_EQ(it->second, method) - << ArtMethod::PrettyMethod(method) << " " << ArtMethod::PrettyMethod(it->second) << " " + // When we are walking the stack to redefine classes and creating obsolete methods it is + // possible that we might have updated the method_code_map by making this method obsolete in a + // previous frame. Therefore we should just check that the non-obsolete version of this method + // is the one we expect. We change to the non-obsolete versions in the error message since the + // obsolete version of the method might not be fully initialized yet. This situation can only + // occur when we are in the process of allocating and setting up obsolete methods. Otherwise + // method and it->second should be identical. (See runtime/openjdkjvmti/ti_redefine.cc for more + // information.) + DCHECK_EQ(it->second->GetNonObsoleteMethod(), method->GetNonObsoleteMethod()) + << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " " + << ArtMethod::PrettyMethod(it->second->GetNonObsoleteMethod()) << " " << std::hex << pc; } return method_header; diff --git a/runtime/memory_region.cc b/runtime/memory_region.cc index b0ecab40c5..13cc5c99bc 100644 --- a/runtime/memory_region.cc +++ b/runtime/memory_region.cc @@ -29,8 +29,7 @@ void MemoryRegion::CopyFrom(size_t offset, const MemoryRegion& from) const { CHECK_GT(from.size(), 0U); CHECK_GE(this->size(), from.size()); CHECK_LE(offset, this->size() - from.size()); - memmove(reinterpret_cast<void*>(start() + offset), - from.pointer(), from.size()); + memmove(reinterpret_cast<void*>(begin() + offset), from.pointer(), from.size()); } void MemoryRegion::StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) { diff --git a/runtime/memory_region.h b/runtime/memory_region.h index f55dff7a50..7cf5d49d70 100644 --- a/runtime/memory_region.h +++ b/runtime/memory_region.h @@ -35,6 +35,12 @@ namespace art { // of the region. class MemoryRegion FINAL : public ValueObject { public: + struct ContentEquals { + constexpr bool operator()(const MemoryRegion& lhs, const MemoryRegion& rhs) const { + return lhs.size() == rhs.size() && memcmp(lhs.begin(), rhs.begin(), lhs.size()) == 0; + } + }; + MemoryRegion() : pointer_(nullptr), size_(0) {} MemoryRegion(void* pointer_in, uintptr_t size_in) : pointer_(pointer_in), size_(size_in) {} @@ -46,8 +52,8 @@ class MemoryRegion FINAL : public ValueObject { return OFFSETOF_MEMBER(MemoryRegion, pointer_); } - uint8_t* start() const { return reinterpret_cast<uint8_t*>(pointer_); } - uint8_t* end() const { return start() + size_; } + uint8_t* begin() const { return reinterpret_cast<uint8_t*>(pointer_); } + uint8_t* end() const { return begin() + size_; } // Load value of type `T` at `offset`. The memory address corresponding // to `offset` should be word-aligned (on ARM, this is a requirement). @@ -131,7 +137,7 @@ class MemoryRegion FINAL : public ValueObject { // Do not touch any memory if the range is empty. return 0; } - const uint8_t* address = start() + bit_offset / kBitsPerByte; + const uint8_t* address = begin() + bit_offset / kBitsPerByte; const uint32_t shift = bit_offset & (kBitsPerByte - 1); // Load the value (reading only the strictly needed bytes). const uint32_t load_bit_count = shift + length; @@ -165,11 +171,18 @@ class MemoryRegion FINAL : public ValueObject { void CopyFrom(size_t offset, const MemoryRegion& from) const; + template<class Vector> + void CopyFromVector(size_t offset, Vector& vector) const { + if (!vector.empty()) { + CopyFrom(offset, MemoryRegion(vector.data(), vector.size())); + } + } + // Compute a sub memory region based on an existing one. ALWAYS_INLINE MemoryRegion Subregion(uintptr_t offset, uintptr_t size_in) const { CHECK_GE(this->size(), size_in); CHECK_LE(offset, this->size() - size_in); - return MemoryRegion(reinterpret_cast<void*>(start() + offset), size_in); + return MemoryRegion(reinterpret_cast<void*>(begin() + offset), size_in); } // Compute an extended memory region based on an existing one. @@ -183,7 +196,7 @@ class MemoryRegion FINAL : public ValueObject { ALWAYS_INLINE T* ComputeInternalPointer(size_t offset) const { CHECK_GE(size(), sizeof(T)); CHECK_LE(offset, size() - sizeof(T)); - return reinterpret_cast<T*>(start() + offset); + return reinterpret_cast<T*>(begin() + offset); } // Locate the bit with the given offset. Returns a pointer to the byte diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 85636fb5b1..f08d4daf95 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -951,8 +951,7 @@ ObjPtr<Class> Class::GetDirectInterface(Thread* self, ObjPtr<Class> klass, uint3 return interfaces->Get(idx); } else { dex::TypeIndex type_idx = klass->GetDirectInterfaceTypeIdx(idx); - ObjPtr<Class> interface = ClassLinker::LookupResolvedType( - type_idx, klass->GetDexCache(), klass->GetClassLoader()); + ObjPtr<Class> interface = klass->GetDexCache()->GetResolvedType(type_idx); return interface; } } diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index bef3ad29a3..a59bb7b880 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -40,22 +40,14 @@ inline uint32_t DexCache::ClassSize(PointerSize pointer_size) { return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } -inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) { +inline mirror::String* DexCache::GetResolvedString(dex::StringIndex string_idx) { DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds()); - const uint32_t slot_idx = string_idx.index_ % kDexCacheStringCacheSize; - DCHECK_LT(slot_idx, NumStrings()); - return slot_idx; + return StringDexCachePair::Lookup(GetStrings(), string_idx.index_, NumStrings()).Read(); } -inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) { - return GetStrings()[StringSlotIndex(string_idx)].load( - std::memory_order_relaxed).GetObjectForIndex(string_idx.index_); -} - -inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<String> resolved) { - DCHECK(resolved != nullptr); - GetStrings()[StringSlotIndex(string_idx)].store( - StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed); +inline void DexCache::SetResolvedString(dex::StringIndex string_idx, + ObjPtr<mirror::String> resolved) { + StringDexCachePair::Assign(GetStrings(), string_idx.index_, resolved.Ptr(), NumStrings()); Runtime* const runtime = Runtime::Current(); if (UNLIKELY(runtime->IsActiveTransaction())) { DCHECK(runtime->IsAotCompiler()); @@ -66,70 +58,50 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<Stri } inline void DexCache::ClearString(dex::StringIndex string_idx) { + const uint32_t slot_idx = string_idx.index_ % NumStrings(); DCHECK(Runtime::Current()->IsAotCompiler()); - uint32_t slot_idx = StringSlotIndex(string_idx); StringDexCacheType* slot = &GetStrings()[slot_idx]; // This is racy but should only be called from the transactional interpreter. if (slot->load(std::memory_order_relaxed).index == string_idx.index_) { - StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx)); + StringDexCachePair cleared( + nullptr, + StringDexCachePair::InvalidIndexForSlot(slot_idx)); slot->store(cleared, std::memory_order_relaxed); } } -inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) { - DCHECK_LT(type_idx.index_, GetDexFile()->NumTypeIds()); - const uint32_t slot_idx = type_idx.index_ % kDexCacheTypeCacheSize; - DCHECK_LT(slot_idx, NumResolvedTypes()); - return slot_idx; -} - inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) { // It is theorized that a load acquire is not required since obtaining the resolved class will // always have an address dependency or a lock. - return GetResolvedTypes()[TypeSlotIndex(type_idx)].load( - std::memory_order_relaxed).GetObjectForIndex(type_idx.index_); + DCHECK_LT(type_idx.index_, NumResolvedTypes()); + return GetResolvedTypes()[type_idx.index_].Read(); } inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) { - DCHECK(resolved != nullptr); + DCHECK_LT(type_idx.index_, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB. // TODO default transaction support. // Use a release store for SetResolvedType. This is done to prevent other threads from seeing a // class but not necessarily seeing the loaded members like the static fields array. // See b/32075261. - GetResolvedTypes()[TypeSlotIndex(type_idx)].store( - TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release); + reinterpret_cast<Atomic<GcRoot<mirror::Class>>&>(GetResolvedTypes()[type_idx.index_]). + StoreRelease(GcRoot<Class>(resolved)); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this); } -inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) { - DCHECK(Runtime::Current()->IsAotCompiler()); - uint32_t slot_idx = TypeSlotIndex(type_idx); - TypeDexCacheType* slot = &GetResolvedTypes()[slot_idx]; - // This is racy but should only be called from the single-threaded ImageWriter and tests. - if (slot->load(std::memory_order_relaxed).index == type_idx.index_) { - TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx)); - slot->store(cleared, std::memory_order_relaxed); - } -} - -inline uint32_t DexCache::MethodTypeSlotIndex(uint32_t proto_idx) { +inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds()); - const uint32_t slot_idx = proto_idx % kDexCacheMethodTypeCacheSize; - DCHECK_LT(slot_idx, NumResolvedMethodTypes()); - return slot_idx; -} - -inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) { - return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load( - std::memory_order_relaxed).GetObjectForIndex(proto_idx); + return MethodTypeDexCachePair::Lookup( + GetResolvedMethodTypes(), proto_idx, NumResolvedMethodTypes()).Read(); } inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) { - DCHECK(resolved != nullptr); - GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store( - MethodTypeDexCachePair(resolved, proto_idx), std::memory_order_relaxed); + DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); + DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds()); + + MethodTypeDexCachePair::Assign(GetResolvedMethodTypes(), proto_idx, resolved, + NumResolvedMethodTypes()); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this); } @@ -226,49 +198,49 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor); // Visit arrays after. if (kVisitNativeRoots) { - VisitDexCachePairs<String, kReadBarrierOption, Visitor>( + VisitDexCachePairs<mirror::String, kReadBarrierOption, Visitor>( GetStrings(), NumStrings(), visitor); - VisitDexCachePairs<Class, kReadBarrierOption, Visitor>( - GetResolvedTypes(), NumResolvedTypes(), visitor); + GcRoot<mirror::Class>* resolved_types = GetResolvedTypes(); + for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) { + visitor.VisitRootIfNonNull(resolved_types[i].AddressWithoutBarrier()); + } - VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>( + VisitDexCachePairs<mirror::MethodType, kReadBarrierOption, Visitor>( GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor); } } template <ReadBarrierOption kReadBarrierOption, typename Visitor> -inline void DexCache::FixupStrings(StringDexCacheType* dest, const Visitor& visitor) { - StringDexCacheType* src = GetStrings(); +inline void DexCache::FixupStrings(mirror::StringDexCacheType* dest, const Visitor& visitor) { + mirror::StringDexCacheType* src = GetStrings(); for (size_t i = 0, count = NumStrings(); i < count; ++i) { StringDexCachePair source = src[i].load(std::memory_order_relaxed); - String* ptr = source.object.Read<kReadBarrierOption>(); - String* new_source = visitor(ptr); + mirror::String* ptr = source.object.Read<kReadBarrierOption>(); + mirror::String* new_source = visitor(ptr); source.object = GcRoot<String>(new_source); dest[i].store(source, std::memory_order_relaxed); } } template <ReadBarrierOption kReadBarrierOption, typename Visitor> -inline void DexCache::FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor) { - TypeDexCacheType* src = GetResolvedTypes(); +inline void DexCache::FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor) { + GcRoot<mirror::Class>* src = GetResolvedTypes(); for (size_t i = 0, count = NumResolvedTypes(); i < count; ++i) { - TypeDexCachePair source = src[i].load(std::memory_order_relaxed); - Class* ptr = source.object.Read<kReadBarrierOption>(); - Class* new_source = visitor(ptr); - source.object = GcRoot<Class>(new_source); - dest[i].store(source, std::memory_order_relaxed); + mirror::Class* source = src[i].Read<kReadBarrierOption>(); + mirror::Class* new_source = visitor(source); + dest[i] = GcRoot<mirror::Class>(new_source); } } template <ReadBarrierOption kReadBarrierOption, typename Visitor> -inline void DexCache::FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, +inline void DexCache::FixupResolvedMethodTypes(mirror::MethodTypeDexCacheType* dest, const Visitor& visitor) { - MethodTypeDexCacheType* src = GetResolvedMethodTypes(); + mirror::MethodTypeDexCacheType* src = GetResolvedMethodTypes(); for (size_t i = 0, count = NumResolvedMethodTypes(); i < count; ++i) { MethodTypeDexCachePair source = src[i].load(std::memory_order_relaxed); - MethodType* ptr = source.object.Read<kReadBarrierOption>(); - MethodType* new_source = visitor(ptr); + mirror::MethodType* ptr = source.object.Read<kReadBarrierOption>(); + mirror::MethodType* new_source = visitor(ptr); source.object = GcRoot<MethodType>(new_source); dest[i].store(source, std::memory_order_relaxed); } diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc index 3103a92c83..741cf3bb47 100644 --- a/runtime/mirror/dex_cache.cc +++ b/runtime/mirror/dex_cache.cc @@ -58,8 +58,8 @@ void DexCache::InitializeDexCache(Thread* self, mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr : reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset()); - mirror::TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr : - reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset()); + GcRoot<mirror::Class>* types = (dex_file->NumTypeIds() == 0u) ? nullptr : + reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset()); ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr : reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset()); ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr : @@ -69,10 +69,6 @@ void DexCache::InitializeDexCache(Thread* self, if (dex_file->NumStringIds() < num_strings) { num_strings = dex_file->NumStringIds(); } - size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize; - if (dex_file->NumTypeIds() < num_types) { - num_types = dex_file->NumTypeIds(); - } // Note that we allocate the method type dex caches regardless of this flag, // and we make sure here that they're not used by the runtime. This is in the @@ -108,9 +104,8 @@ void DexCache::InitializeDexCache(Thread* self, CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u); CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull()); } - for (size_t i = 0; i < num_types; ++i) { - CHECK_EQ(types[i].load(std::memory_order_relaxed).index, 0u); - CHECK(types[i].load(std::memory_order_relaxed).object.IsNull()); + for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) { + CHECK(types[i].IsNull()); } for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) { CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr); @@ -126,9 +121,6 @@ void DexCache::InitializeDexCache(Thread* self, if (strings != nullptr) { mirror::StringDexCachePair::Initialize(strings); } - if (types != nullptr) { - mirror::TypeDexCachePair::Initialize(types); - } if (method_types != nullptr) { mirror::MethodTypeDexCachePair::Initialize(method_types); } @@ -137,7 +129,7 @@ void DexCache::InitializeDexCache(Thread* self, strings, num_strings, types, - num_types, + dex_file->NumTypeIds(), methods, dex_file->NumMethodIds(), fields, @@ -151,7 +143,7 @@ void DexCache::Init(const DexFile* dex_file, ObjPtr<String> location, StringDexCacheType* strings, uint32_t num_strings, - TypeDexCacheType* resolved_types, + GcRoot<Class>* resolved_types, uint32_t num_resolved_types, ArtMethod** resolved_methods, uint32_t num_resolved_methods, diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index e68b0c7219..6f88cc5df4 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -18,14 +18,14 @@ #define ART_RUNTIME_MIRROR_DEX_CACHE_H_ #include "array.h" -#include "base/bit_utils.h" +#include "art_field.h" +#include "class.h" #include "dex_file_types.h" #include "object.h" #include "object_array.h" namespace art { -class ArtField; class ArtMethod; struct DexCacheOffsets; class DexFile; @@ -36,7 +36,6 @@ class Thread; namespace mirror { -class Class; class MethodType; class String; @@ -61,7 +60,7 @@ template <typename T> struct PACKED(8) DexCachePair { // it's always non-null if the id branch succeeds (except for the 0th id). // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail // the lookup id == stored id branch. - DexCachePair(ObjPtr<T> object, uint32_t index) + DexCachePair(T* object, uint32_t index) : object(object), index(index) {} DexCachePair() = default; @@ -75,28 +74,39 @@ template <typename T> struct PACKED(8) DexCachePair { dex_cache[0].store(first_elem, std::memory_order_relaxed); } + static GcRoot<T> Lookup(std::atomic<DexCachePair<T>>* dex_cache, + uint32_t idx, + uint32_t cache_size) { + DCHECK_NE(cache_size, 0u); + DexCachePair<T> element = dex_cache[idx % cache_size].load(std::memory_order_relaxed); + if (idx != element.index) { + return GcRoot<T>(nullptr); + } + + DCHECK(!element.object.IsNull()); + return element.object; + } + + static void Assign(std::atomic<DexCachePair<T>>* dex_cache, + uint32_t idx, + T* object, + uint32_t cache_size) { + DCHECK_LT(idx % cache_size, cache_size); + dex_cache[idx % cache_size].store( + DexCachePair<T>(object, idx), std::memory_order_relaxed); + } + static uint32_t InvalidIndexForSlot(uint32_t slot) { // Since the cache size is a power of two, 0 will always map to slot 0. // Use 1 for slot 0 and 0 for all other slots. return (slot == 0) ? 1u : 0u; } - - T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) { - if (idx != index) { - return nullptr; - } - DCHECK(!object.IsNull()); - return object.Read(); - } }; -using TypeDexCachePair = DexCachePair<Class>; -using TypeDexCacheType = std::atomic<TypeDexCachePair>; - -using StringDexCachePair = DexCachePair<String>; +using StringDexCachePair = DexCachePair<mirror::String>; using StringDexCacheType = std::atomic<StringDexCachePair>; -using MethodTypeDexCachePair = DexCachePair<MethodType>; +using MethodTypeDexCachePair = DexCachePair<mirror::MethodType>; using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>; // C++ mirror of java.lang.DexCache. @@ -105,11 +115,6 @@ class MANAGED DexCache FINAL : public Object { // Size of java.lang.DexCache.class. static uint32_t ClassSize(PointerSize pointer_size); - // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. - static constexpr size_t kDexCacheTypeCacheSize = 1024; - static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize), - "Type dex cache size is not a power of 2."); - // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. static constexpr size_t kDexCacheStringCacheSize = 1024; static_assert(IsPowerOfTwo(kDexCacheStringCacheSize), @@ -121,10 +126,6 @@ class MANAGED DexCache FINAL : public Object { static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize), "MethodType dex cache size is not a power of 2."); - static constexpr size_t StaticTypeSize() { - return kDexCacheTypeCacheSize; - } - static constexpr size_t StaticStringSize() { return kDexCacheStringCacheSize; } @@ -155,7 +156,7 @@ class MANAGED DexCache FINAL : public Object { REQUIRES_SHARED(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor> - void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor) + void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor> @@ -210,7 +211,7 @@ class MANAGED DexCache FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_); } - String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE + mirror::String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_); void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE @@ -225,8 +226,6 @@ class MANAGED DexCache FINAL : public Object { void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) REQUIRES_SHARED(Locks::mutator_lock_); - void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_); @@ -255,11 +254,11 @@ class MANAGED DexCache FINAL : public Object { SetFieldPtr<false>(StringsOffset(), strings); } - TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { - return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset()); + GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { + return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset()); } - void SetResolvedTypes(TypeDexCacheType* resolved_types) + void SetResolvedTypes(GcRoot<Class>* resolved_types) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types); @@ -324,7 +323,7 @@ class MANAGED DexCache FINAL : public Object { SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); } - void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_); + void SetLocation(ObjPtr<mirror::String> location) REQUIRES_SHARED(Locks::mutator_lock_); // NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField** // provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(), @@ -341,7 +340,7 @@ class MANAGED DexCache FINAL : public Object { ObjPtr<String> location, StringDexCacheType* strings, uint32_t num_strings, - TypeDexCacheType* resolved_types, + GcRoot<Class>* resolved_types, uint32_t num_resolved_types, ArtMethod** resolved_methods, uint32_t num_resolved_methods, @@ -352,16 +351,12 @@ class MANAGED DexCache FINAL : public Object { PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); - uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_); - uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); - uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_); - // Visit instance fields of the dex cache as well as its associated arrays. template <bool kVisitNativeRoots, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor> - void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) + void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); HeapReference<Object> dex_; @@ -371,7 +366,7 @@ class MANAGED DexCache FINAL : public Object { uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with // num_resolved_method_types_ elements. uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements. - uint64_t resolved_types_; // TypeDexCacheType*, array with num_resolved_types_ elements. + uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements. uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_ // elements. diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc index 5693f67646..8f978e122c 100644 --- a/runtime/mirror/dex_cache_test.cc +++ b/runtime/mirror/dex_cache_test.cc @@ -51,8 +51,7 @@ TEST_F(DexCacheTest, Open) { EXPECT_TRUE(dex_cache->StaticStringSize() == dex_cache->NumStrings() || java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings()); - EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes() - || java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes()); + EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes()); EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods()); EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields()); EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes() diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc index 0b667fec45..f1c350f23c 100644 --- a/runtime/native/java_lang_DexCache.cc +++ b/runtime/native/java_lang_DexCache.cc @@ -53,7 +53,7 @@ static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) { static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) { ScopedFastNativeObjectAccess soa(env); ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds()); + CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes()); return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(dex::TypeIndex(type_index))); } @@ -69,11 +69,8 @@ static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint typ jobject type) { ScopedFastNativeObjectAccess soa(env); ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds()); - ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type); - if (t != nullptr) { - dex_cache->SetResolvedType(dex::TypeIndex(type_index), t); - } + CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes()); + dex_cache->SetResolvedType(dex::TypeIndex(type_index), soa.Decode<mirror::Class>(type)); } static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index, @@ -81,10 +78,7 @@ static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint s ScopedFastNativeObjectAccess soa(env); ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds()); - ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string); - if (s != nullptr) { - dex_cache->SetResolvedString(dex::StringIndex(string_index), s); - } + dex_cache->SetResolvedString(dex::StringIndex(string_index), soa.Decode<mirror::String>(string)); } static JNINativeMethod gMethods[] = { diff --git a/runtime/oat.h b/runtime/oat.h index 62f010ba97..532c9681c3 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '1', '0', '6', '\0' }; // hash-based DexCache types + static constexpr uint8_t kOatVersion[] = { '1', '0', '9', '\0' }; // Register mask change. static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index d47f1b5611..31eb1ccdc8 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -193,7 +193,7 @@ bool OatFileBase::LoadVdex(const std::string& vdex_filename, bool writable, bool low_4gb, std::string* error_msg) { - vdex_.reset(VdexFile::Open(vdex_filename, writable, low_4gb, error_msg)); + vdex_ = VdexFile::Open(vdex_filename, writable, low_4gb, error_msg); if (vdex_.get() == nullptr) { *error_msg = StringPrintf("Failed to load vdex file '%s' %s", vdex_filename.c_str(), diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index b19ace5464..77cdd28d3a 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -25,6 +25,7 @@ #include "base/logging.h" #include "compiler_filter.h" #include "class_linker.h" +#include "exec_utils.h" #include "gc/heap.h" #include "gc/space/image_space.h" #include "image.h" @@ -33,6 +34,7 @@ #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "utils.h" +#include "vdex_file.h" namespace art { @@ -216,28 +218,38 @@ std::string OatFileAssistant::GetStatusDump() { bool oat_file_exists = false; bool odex_file_exists = false; if (oat_.Status() != kOatCannotOpen) { - // If we can open the file, neither Filename nor GetFile should return null. + // If we can open the file, Filename should not return null. CHECK(oat_.Filename() != nullptr); - CHECK(oat_.GetFile() != nullptr); oat_file_exists = true; - status << *oat_.Filename() << " [compilation_filter="; - status << CompilerFilter::NameOfFilter(oat_.GetFile()->GetCompilerFilter()); - status << ", status=" << oat_.Status(); + status << *oat_.Filename() << "[status=" << oat_.Status() << ", "; + const OatFile* file = oat_.GetFile(); + if (file == nullptr) { + // If the file is null even though the status is not kOatCannotOpen, it + // means we must have a vdex file with no corresponding oat file. In + // this case we cannot determine the compilation filter. Indicate that + // we have only the vdex file instead. + status << "vdex-only"; + } else { + status << "compilation_filter=" << CompilerFilter::NameOfFilter(file->GetCompilerFilter()); + } } if (odex_.Status() != kOatCannotOpen) { - // If we can open the file, neither Filename nor GetFile should return null. + // If we can open the file, Filename should not return null. CHECK(odex_.Filename() != nullptr); - CHECK(odex_.GetFile() != nullptr); odex_file_exists = true; if (oat_file_exists) { status << "] "; } - status << *odex_.Filename() << " [compilation_filter="; - status << CompilerFilter::NameOfFilter(odex_.GetFile()->GetCompilerFilter()); - status << ", status=" << odex_.Status(); + status << *odex_.Filename() << "[status=" << odex_.Status() << ", "; + const OatFile* file = odex_.GetFile(); + if (file == nullptr) { + status << "vdex-only"; + } else { + status << "compilation_filter=" << CompilerFilter::NameOfFilter(file->GetCompilerFilter()); + } } if (!oat_file_exists && !odex_file_exists) { @@ -303,24 +315,60 @@ OatFileAssistant::OatStatus OatFileAssistant::OatFileStatus() { return oat_.Status(); } -OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& file) { - // Verify the ART_USE_READ_BARRIER state. - const bool is_cc = file.GetOatHeader().IsConcurrentCopying(); - constexpr bool kRuntimeIsCC = kUseReadBarrier; - if (is_cc != kRuntimeIsCC) { - return kOatCannotOpen; +bool OatFileAssistant::DexChecksumUpToDate(const VdexFile& file, std::string* error_msg) { + if (file.GetHeader().GetNumberOfDexFiles() <= 0) { + VLOG(oat) << "Vdex does not contain any dex files"; + return false; } - // Verify the dex checksum. + // TODO: Use GetRequiredDexChecksum to get secondary checksums as well, not + // just the primary. Because otherwise we may fail to see a secondary + // checksum failure in the case when the original (multidex) files are + // stripped but we have a newer odex file. + const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum(); + if (dex_checksum_pointer != nullptr) { + uint32_t actual_checksum = file.GetLocationChecksum(0); + if (*dex_checksum_pointer != actual_checksum) { + VLOG(oat) << "Dex checksum does not match for primary dex: " << dex_location_ + << ". Expected: " << *dex_checksum_pointer + << ", Actual: " << actual_checksum; + return false; + } + } + + // Verify the dex checksums for any secondary multidex files + for (uint32_t i = 1; i < file.GetHeader().GetNumberOfDexFiles(); i++) { + std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location_.c_str()); + uint32_t expected_secondary_checksum = 0; + if (DexFile::GetChecksum(secondary_dex_location.c_str(), + &expected_secondary_checksum, + error_msg)) { + uint32_t actual_secondary_checksum = file.GetLocationChecksum(i); + if (expected_secondary_checksum != actual_secondary_checksum) { + VLOG(oat) << "Dex checksum does not match for secondary dex: " + << secondary_dex_location + << ". Expected: " << expected_secondary_checksum + << ", Actual: " << actual_secondary_checksum; + return false; + } + } else { + // If we can't get the checksum for the secondary location, we assume + // the dex checksum is up to date for this and all other secondary dex + // files. + break; + } + } + return true; +} + +bool OatFileAssistant::DexChecksumUpToDate(const OatFile& file, std::string* error_msg) { // Note: GetOatDexFile will return null if the dex checksum doesn't match // what we provide, which verifies the primary dex checksum for us. - std::string error_msg; const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum(); const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile( - dex_location_.c_str(), dex_checksum_pointer, &error_msg); + dex_location_.c_str(), dex_checksum_pointer, error_msg); if (oat_dex_file == nullptr) { - LOG(ERROR) << error_msg; - return kOatDexOutOfDate; + return false; } // Verify the dex checksums for any secondary multidex files @@ -335,7 +383,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& uint32_t expected_secondary_checksum = 0; if (DexFile::GetChecksum(secondary_dex_location.c_str(), - &expected_secondary_checksum, &error_msg)) { + &expected_secondary_checksum, error_msg)) { uint32_t actual_secondary_checksum = secondary_oat_dex_file->GetDexFileLocationChecksum(); if (expected_secondary_checksum != actual_secondary_checksum) { @@ -343,7 +391,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& << secondary_dex_location << ". Expected: " << expected_secondary_checksum << ", Actual: " << actual_secondary_checksum; - return kOatDexOutOfDate; + return false; } } else { // If we can't get the checksum for the secondary location, we assume @@ -352,6 +400,35 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& break; } } + return true; +} + +OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& file) { + // Verify the ART_USE_READ_BARRIER state. + // TODO: Don't fully reject files due to read barrier state. If they contain + // compiled code and are otherwise okay, we should return something like + // kOatRelocationOutOfDate. If they don't contain compiled code, the read + // barrier state doesn't matter. + const bool is_cc = file.GetOatHeader().IsConcurrentCopying(); + constexpr bool kRuntimeIsCC = kUseReadBarrier; + if (is_cc != kRuntimeIsCC) { + return kOatCannotOpen; + } + + // Verify the dex checksum. + std::string error_msg; + if (kIsVdexEnabled) { + VdexFile* vdex = file.GetVdexFile(); + if (!DexChecksumUpToDate(*vdex, &error_msg)) { + LOG(ERROR) << error_msg; + return kOatDexOutOfDate; + } + } else { + if (!DexChecksumUpToDate(file, &error_msg)) { + LOG(ERROR) << error_msg; + return kOatDexOutOfDate; + } + } CompilerFilter::Filter current_compiler_filter = file.GetCompilerFilter(); @@ -777,7 +854,27 @@ OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() { status_attempted_ = true; const OatFile* file = GetFile(); if (file == nullptr) { - status_ = kOatCannotOpen; + // Check to see if there is a vdex file we can make use of. + std::string error_msg; + std::string vdex_filename = ReplaceFileExtension(filename_, "vdex"); + std::unique_ptr<VdexFile> vdex = VdexFile::Open(vdex_filename, + /*writeable*/false, + /*low_4gb*/false, + &error_msg); + if (vdex == nullptr) { + status_ = kOatCannotOpen; + VLOG(oat) << "unable to open vdex file " << vdex_filename << ": " << error_msg; + } else { + if (oat_file_assistant_->DexChecksumUpToDate(*vdex, &error_msg)) { + // The vdex file does not contain enough information to determine + // whether it is up to date with respect to the boot image, so we + // assume it is out of date. + VLOG(oat) << error_msg; + status_ = kOatBootImageOutOfDate; + } else { + status_ = kOatDexOutOfDate; + } + } } else { status_ = oat_file_assistant_->GivenOatFileStatus(*file); VLOG(oat) << file->GetLocation() << " is " << status_ diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index 588a698be7..6d47ad2228 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -379,6 +379,16 @@ class OatFileAssistant { // Return info for the best oat file. OatFileInfo& GetBestInfo(); + // Returns true if the dex checksums in the given vdex file are up to date + // with respect to the dex location. If the dex checksums are not up to + // date, error_msg is updated with a message describing the problem. + bool DexChecksumUpToDate(const VdexFile& file, std::string* error_msg); + + // Returns true if the dex checksums in the given oat file are up to date + // with respect to the dex location. If the dex checksums are not up to + // date, error_msg is updated with a message describing the problem. + bool DexChecksumUpToDate(const OatFile& file, std::string* error_msg); + // Return the status for a given opened oat file with respect to the dex // location. OatStatus GivenOatFileStatus(const OatFile& file); diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index 577200847a..f777340cfd 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -111,27 +111,84 @@ TEST_F(OatFileAssistantTest, OatUpToDate) { EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles()); } -// Case: We have a DEX file and ODEX file for a different dex location. -// Expect: The status is kDex2OatNeeded. -TEST_F(OatFileAssistantTest, OatForDifferentDex) { - // Generate an odex file for OatForDifferentDex_A.jar - std::string dex_location_a = GetScratchDir() + "/OatForDifferentDex_A.jar"; - std::string odex_location = GetOdexDir() + "/OatForDifferentDex.odex"; - Copy(GetDexSrc1(), dex_location_a); - GenerateOdexForTest(dex_location_a, odex_location, CompilerFilter::kSpeed); - - // Try to use that odex file for OatForDifferentDex.jar - std::string dex_location = GetScratchDir() + "/OatForDifferentDex.jar"; +// Case: We have a DEX file and up-to-date (ODEX) VDEX file for it, but no +// ODEX file. +TEST_F(OatFileAssistantTest, VdexUpToDateNoOdex) { + // This test case is only meaningful if vdex is enabled. + if (!kIsVdexEnabled) { + return; + } + + std::string dex_location = GetScratchDir() + "/VdexUpToDateNoOdex.jar"; + std::string oat_location = GetOdexDir() + "/VdexUpToDateNoOdex.oat"; + Copy(GetDexSrc1(), dex_location); - OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false); + // Generating and deleting the oat file should have the side effect of + // creating an up-to-date vdex file. + GenerateOdexForTest(dex_location, oat_location, CompilerFilter::kSpeed); + ASSERT_EQ(0, unlink(oat_location.c_str())); + + OatFileAssistant oat_file_assistant(dex_location.c_str(), + oat_location.c_str(), + kRuntimeISA, + false); + + // Even though the vdex file is up to date, because we don't have the oat + // file, we can't know that the vdex depends on the boot image and is up to + // date with respect to the boot image. Instead we must assume the vdex file + // depends on the boot image and is out of date with respect to the boot + // image. + EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); + + // Make sure we don't crash in this case when we dump the status. We don't + // care what the actual dumped value is. + oat_file_assistant.GetStatusDump(); +} + +// Case: We have a DEX file and empty VDEX and ODEX files. +TEST_F(OatFileAssistantTest, EmptyVdexOdex) { + std::string dex_location = GetScratchDir() + "/EmptyVdexOdex.jar"; + std::string odex_location = GetOdexDir() + "/EmptyVdexOdex.oat"; + std::string vdex_location = GetOdexDir() + "/EmptyVdexOdex.vdex"; + + Copy(GetDexSrc1(), dex_location); + ScratchFile vdex_file(vdex_location.c_str()); + ScratchFile odex_file(odex_location.c_str()); + OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false); EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); +} - EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatDexOutOfDate, oat_file_assistant.OdexFileStatus()); - EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus()); +// Case: We have a DEX file and up-to-date (OAT) VDEX file for it, but no OAT +// file. +TEST_F(OatFileAssistantTest, VdexUpToDateNoOat) { + // This test case is only meaningful if vdex is enabled. + if (!kIsVdexEnabled) { + return; + } + + std::string dex_location = GetScratchDir() + "/VdexUpToDateNoOat.jar"; + std::string oat_location; + std::string error_msg; + ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename( + dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg; + + Copy(GetDexSrc1(), dex_location); + GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed); + ASSERT_EQ(0, unlink(oat_location.c_str())); + + OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false); + + // Even though the vdex file is up to date, because we don't have the oat + // file, we can't know that the vdex depends on the boot image and is up to + // date with respect to the boot image. Instead we must assume the vdex file + // depends on the boot image and is out of date with respect to the boot + // image. + EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); } // Case: We have a DEX file and speed-profile OAT file for it. @@ -254,6 +311,56 @@ TEST_F(OatFileAssistantTest, OatDexOutOfDate) { EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles()); } +// Case: We have a DEX file and an (ODEX) VDEX file out of date with respect +// to the dex checksum, but no ODEX file. +TEST_F(OatFileAssistantTest, VdexDexOutOfDate) { + // This test case is only meaningful if vdex is enabled. + if (!kIsVdexEnabled) { + return; + } + + std::string dex_location = GetScratchDir() + "/VdexDexOutOfDate.jar"; + std::string oat_location = GetOdexDir() + "/VdexDexOutOfDate.oat"; + + Copy(GetDexSrc1(), dex_location); + GenerateOdexForTest(dex_location, oat_location, CompilerFilter::kSpeed); + ASSERT_EQ(0, unlink(oat_location.c_str())); + Copy(GetDexSrc2(), dex_location); + + OatFileAssistant oat_file_assistant(dex_location.c_str(), + oat_location.c_str(), + kRuntimeISA, + false); + + EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); +} + +// Case: We have a MultiDEX (ODEX) VDEX file where the secondary dex file is +// out of date and there is no corresponding ODEX file. +TEST_F(OatFileAssistantTest, VdexMultiDexSecondaryOutOfDate) { + // This test case is only meaningful if vdex is enabled. + if (!kIsVdexEnabled) { + return; + } + + std::string dex_location = GetScratchDir() + "/VdexMultiDexSecondaryOutOfDate.jar"; + std::string oat_location = GetOdexDir() + "/VdexMultiDexSecondaryOutOfDate.oat"; + + Copy(GetMultiDexSrc1(), dex_location); + GenerateOdexForTest(dex_location, oat_location, CompilerFilter::kSpeed); + ASSERT_EQ(0, unlink(oat_location.c_str())); + Copy(GetMultiDexSrc2(), dex_location); + + OatFileAssistant oat_file_assistant(dex_location.c_str(), + oat_location.c_str(), + kRuntimeISA, + false); + + EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); +} + // Case: We have a DEX file and an OAT file out of date with respect to the // boot image. TEST_F(OatFileAssistantTest, OatImageOutOfDate) { @@ -945,6 +1052,4 @@ TEST_F(OatFileAssistantTest, DexOptStatusValues) { // - Dex is stripped, don't have odex. // - Oat file corrupted after status check, before reload unexecutable // because it's unrelocated and no dex2oat -// * Test unrelocated specific target compilation type can be relocated to -// make it up to date. } // namespace art diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc index b68fc60c6c..afec0bfac0 100644 --- a/runtime/openjdkjvmti/ti_class_loader.cc +++ b/runtime/openjdkjvmti/ti_class_loader.cc @@ -61,14 +61,20 @@ namespace openjdkjvmti { bool ClassLoaderHelper::AddToClassLoader(art::Thread* self, art::Handle<art::mirror::ClassLoader> loader, const art::DexFile* dex_file) { - art::StackHandleScope<2> hs(self); - art::Handle<art::mirror::Object> java_dex_file_obj(hs.NewHandle(FindSourceDexFileObject(self, - loader))); + art::ScopedObjectAccessUnchecked soa(self); + art::StackHandleScope<3> hs(self); + if (art::ClassLinker::IsBootClassLoader(soa, loader.Get())) { + art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, *dex_file); + return true; + } + art::Handle<art::mirror::Object> java_dex_file_obj( + hs.NewHandle(FindSourceDexFileObject(self, loader))); if (java_dex_file_obj.IsNull()) { return false; } + art::Handle<art::mirror::LongArray> old_cookie(hs.NewHandle(GetDexFileCookie(java_dex_file_obj))); art::Handle<art::mirror::LongArray> cookie(hs.NewHandle( - AllocateNewDexFileCookie(self, java_dex_file_obj, dex_file))); + AllocateNewDexFileCookie(self, old_cookie, dex_file))); if (cookie.IsNull()) { return false; } @@ -94,12 +100,8 @@ void ClassLoaderHelper::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_ } } -// TODO Really wishing I had that mirror of java.lang.DexFile now. -art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie( - art::Thread* self, - art::Handle<art::mirror::Object> java_dex_file_obj, - const art::DexFile* dex_file) { - art::StackHandleScope<2> hs(self); +art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::GetDexFileCookie( + art::Handle<art::mirror::Object> java_dex_file_obj) { // mCookie is nulled out if the DexFile has been closed but mInternalCookie sticks around until // the object is finalized. Since they always point to the same array if mCookie is not null we // just use the mInternalCookie field. We will update one or both of these fields later. @@ -108,9 +110,15 @@ art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie( "mInternalCookie", "Ljava/lang/Object;"); // TODO Add check that mCookie is either null or same as mInternalCookie CHECK(internal_cookie_field != nullptr); - art::Handle<art::mirror::LongArray> cookie( - hs.NewHandle(internal_cookie_field->GetObject(java_dex_file_obj.Get())->AsLongArray())); - // TODO Maybe make these non-fatal. + return internal_cookie_field->GetObject(java_dex_file_obj.Get())->AsLongArray(); +} + +// TODO Really wishing I had that mirror of java.lang.DexFile now. +art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie( + art::Thread* self, + art::Handle<art::mirror::LongArray> cookie, + const art::DexFile* dex_file) { + art::StackHandleScope<1> hs(self); CHECK(cookie.Get() != nullptr); CHECK_GE(cookie->GetLength(), 1); art::Handle<art::mirror::LongArray> new_cookie( @@ -123,8 +131,9 @@ art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie( // TODO Should I clear this field? // TODO This is a really crappy thing here with the first element being different. new_cookie->SetWithoutChecks<false>(0, cookie->GetWithoutChecks(0)); + // This must match the casts in runtime/native/dalvik_system_DexFile.cc:ConvertDexFilesToJavaArray new_cookie->SetWithoutChecks<false>( - 1, static_cast<int64_t>(reinterpret_cast<intptr_t>(dex_file))); + 1, static_cast<int64_t>(reinterpret_cast<uintptr_t>(dex_file))); new_cookie->Memcpy(2, cookie.Get(), 1, cookie->GetLength() - 1); return new_cookie.Get(); } diff --git a/runtime/openjdkjvmti/ti_class_loader.h b/runtime/openjdkjvmti/ti_class_loader.h index 17ed0eb196..1ac49886cb 100644 --- a/runtime/openjdkjvmti/ti_class_loader.h +++ b/runtime/openjdkjvmti/ti_class_loader.h @@ -82,9 +82,12 @@ class ClassLoaderHelper { art::Thread* self, art::Handle<art::mirror::ClassLoader> loader) REQUIRES_SHARED(art::Locks::mutator_lock_); + static art::ObjPtr<art::mirror::LongArray> GetDexFileCookie( + art::Handle<art::mirror::Object> java_dex_file) REQUIRES_SHARED(art::Locks::mutator_lock_); + static art::ObjPtr<art::mirror::LongArray> AllocateNewDexFileCookie( art::Thread* self, - art::Handle<art::mirror::Object> java_dex_file, + art::Handle<art::mirror::LongArray> old_dex_file_cookie, const art::DexFile* new_dex_file) REQUIRES_SHARED(art::Locks::mutator_lock_); static void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc index da4757f50f..b7257f8994 100644 --- a/runtime/openjdkjvmti/ti_redefine.cc +++ b/runtime/openjdkjvmti/ti_redefine.cc @@ -121,6 +121,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor { new_obsolete_method->CopyFrom(old_method, ptr_size); DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass()); new_obsolete_method->SetIsObsolete(); + new_obsolete_method->SetDontCompile(); obsolete_maps_->insert({old_method, new_obsolete_method}); // Update JIT Data structures to point to the new method. art::jit::Jit* jit = art::Runtime::Current()->GetJit(); @@ -445,7 +446,8 @@ void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(art::mirror::C art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking"); art::mirror::ClassExt* ext = art_klass->GetExtData(); CHECK(ext->GetObsoleteMethods() != nullptr); - CallbackCtx ctx(art_klass->GetClassLoader()->GetAllocator()); + art::ClassLinker* linker = driver_->runtime_->GetClassLinker(); + CallbackCtx ctx(linker->GetAllocatorForClassLoader(art_klass->GetClassLoader())); // Add all the declared methods to the map for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) { ctx.obsolete_methods.insert(&m); @@ -700,35 +702,85 @@ class RedefinitionDataHolder { DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder); }; +// Looks through the previously allocated cookies to see if we need to update them with another new +// dexfile. This is so that even if multiple classes with the same classloader are redefined at +// once they are all added to the classloader. +bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie( + int32_t klass_index, + art::Handle<art::mirror::ClassLoader> source_class_loader, + art::Handle<art::mirror::Object> dex_file_obj, + /*out*/RedefinitionDataHolder* holder) { + art::StackHandleScope<2> hs(driver_->self_); + art::MutableHandle<art::mirror::LongArray> old_cookie( + hs.NewHandle<art::mirror::LongArray>(nullptr)); + bool has_older_cookie = false; + // See if we already have a cookie that a previous redefinition got from the same classloader. + for (int32_t i = 0; i < klass_index; i++) { + if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) { + // Since every instance of this classloader should have the same cookie associated with it we + // can stop looking here. + has_older_cookie = true; + old_cookie.Assign(holder->GetNewDexFileCookie(i)); + break; + } + } + if (old_cookie.IsNull()) { + // No older cookie. Get it directly from the dex_file_obj + // We should not have seen this classloader elsewhere. + CHECK(!has_older_cookie); + old_cookie.Assign(ClassLoaderHelper::GetDexFileCookie(dex_file_obj)); + } + // Use the old cookie to generate the new one with the new DexFile* added in. + art::Handle<art::mirror::LongArray> + new_cookie(hs.NewHandle(ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_, + old_cookie, + dex_file_.get()))); + // Make sure the allocation worked. + if (new_cookie.IsNull()) { + return false; + } + + // Save the cookie. + holder->SetNewDexFileCookie(klass_index, new_cookie.Get()); + // If there are other copies of this same classloader we need to make sure that we all have the + // same cookie. + if (has_older_cookie) { + for (int32_t i = 0; i < klass_index; i++) { + // We will let the GC take care of the cookie we allocated for this one. + if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) { + holder->SetNewDexFileCookie(i, new_cookie.Get()); + } + } + } + + return true; +} + bool Redefiner::ClassRedefinition::FinishRemainingAllocations( int32_t klass_index, /*out*/RedefinitionDataHolder* holder) { + art::ScopedObjectAccessUnchecked soa(driver_->self_); art::StackHandleScope<2> hs(driver_->self_); holder->SetMirrorClass(klass_index, GetMirrorClass()); // This shouldn't allocate art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader())); - holder->SetSourceClassLoader(klass_index, loader.Get()); - if (loader.Get() == nullptr) { - // TODO Better error msg. - RecordFailure(ERR(INTERNAL), "Unable to find class loader!"); - return false; - } - art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle( - ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader))); - holder->SetJavaDexFile(klass_index, dex_file_obj.Get()); - if (dex_file_obj.Get() == nullptr) { - // TODO Better error msg. - RecordFailure(ERR(INTERNAL), "Unable to find class loader!"); - return false; - } - holder->SetNewDexFileCookie(klass_index, - ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_, - dex_file_obj, - dex_file_.get()).Ptr()); - if (holder->GetNewDexFileCookie(klass_index) == nullptr) { - driver_->self_->AssertPendingOOMException(); - driver_->self_->ClearException(); - RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader"); - return false; + // The bootclasspath is handled specially so it doesn't have a j.l.DexFile. + if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) { + holder->SetSourceClassLoader(klass_index, loader.Get()); + art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle( + ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader))); + holder->SetJavaDexFile(klass_index, dex_file_obj.Get()); + if (dex_file_obj.Get() == nullptr) { + // TODO Better error msg. + RecordFailure(ERR(INTERNAL), "Unable to find dex file!"); + return false; + } + // Allocate the new dex file cookie. + if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) { + driver_->self_->AssertPendingOOMException(); + driver_->self_->ClearException(); + RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader"); + return false; + } } holder->SetNewDexCache(klass_index, CreateNewDexCache(loader)); if (holder->GetNewDexCache(klass_index) == nullptr) { @@ -815,6 +867,13 @@ jvmtiError Redefiner::Run() { // cleaned up by the GC eventually. return result_; } + int32_t counter = 0; + for (Redefiner::ClassRedefinition& redef : redefinitions_) { + if (holder.GetSourceClassLoader(counter) == nullptr) { + runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile()); + } + counter++; + } // Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done // allocating so no deadlocks. art::gc::Heap* heap = runtime_->GetHeap(); @@ -833,16 +892,19 @@ jvmtiError Redefiner::Run() { // TODO We need to update all debugger MethodIDs so they note the method they point to is // obsolete or implement some other well defined semantics. // TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods. - int32_t cnt = 0; + counter = 0; for (Redefiner::ClassRedefinition& redef : redefinitions_) { art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition"); - art::mirror::Class* klass = holder.GetMirrorClass(cnt); - ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(cnt), - holder.GetNewDexFileCookie(cnt)); + if (holder.GetSourceClassLoader(counter) != nullptr) { + ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter), + holder.GetNewDexFileCookie(counter)); + } + art::mirror::Class* klass = holder.GetMirrorClass(counter); // TODO Rewrite so we don't do a stack walk for each and every class. redef.FindAndAllocateObsoleteMethods(klass); - redef.UpdateClass(klass, holder.GetNewDexCache(cnt), holder.GetOriginalDexFileBytes(cnt)); - cnt++; + redef.UpdateClass(klass, holder.GetNewDexCache(counter), + holder.GetOriginalDexFileBytes(counter)); + counter++; } // TODO Verify the new Class. // TODO Shrink the obsolete method maps if possible? diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h index fc7a3b3dec..5aa7dde55c 100644 --- a/runtime/openjdkjvmti/ti_redefine.h +++ b/runtime/openjdkjvmti/ti_redefine.h @@ -127,6 +127,10 @@ class Redefiner { art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_); art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_); + const art::DexFile& GetDexFile() { + return *dex_file_; + } + art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader) REQUIRES_SHARED(art::Locks::mutator_lock_); @@ -141,6 +145,13 @@ class Redefiner { bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder) REQUIRES_SHARED(art::Locks::mutator_lock_); + bool AllocateAndRememberNewDexFileCookie( + int32_t klass_index, + art::Handle<art::mirror::ClassLoader> source_class_loader, + art::Handle<art::mirror::Object> dex_file_obj, + /*out*/RedefinitionDataHolder* holder) + REQUIRES_SHARED(art::Locks::mutator_lock_); + void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) REQUIRES(art::Locks::mutator_lock_); diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 4e76951189..bf995095de 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -407,7 +407,8 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { CodeInfoEncoding encoding = code_info.ExtractEncoding(); StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); const size_t number_of_vregs = m->GetCodeItem()->registers_size_; - uint32_t register_mask = stack_map.GetRegisterMask(encoding.stack_map_encoding); + uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map); + BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map); DexRegisterMap vreg_map = IsInInlinedFrame() ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1, code_info.GetInlineInfoOf(stack_map, encoding), @@ -440,8 +441,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset; value = *reinterpret_cast<const uint32_t*>(addr); uint32_t bit = (offset >> 2); - if (code_info.GetNumberOfStackMaskBits(encoding) > bit && - stack_map.GetStackMaskBit(encoding.stack_map_encoding, bit)) { + if (bit < encoding.stack_mask_size_in_bits && stack_mask.LoadBit(bit)) { is_reference = true; } break; diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc index 70aff37961..36901293bb 100644 --- a/runtime/runtime_common.cc +++ b/runtime/runtime_common.cc @@ -136,12 +136,14 @@ struct UContext { void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const; void DumpX86Flags(std::ostream& os, uint32_t flags) const; + // Print some of the information from the status register (CPSR on ARMv7, PSTATE on ARMv8). + template <typename RegisterType> + void DumpArmStatusRegister(std::ostream& os, RegisterType status_register) const; mcontext_t& context; }; void UContext::Dump(std::ostream& os) const { - // TODO: support non-x86 hosts. #if defined(__APPLE__) && defined(__i386__) DumpRegister32(os, "eax", context->__ss.__eax); DumpRegister32(os, "ebx", context->__ss.__ebx); @@ -229,7 +231,53 @@ void UContext::Dump(std::ostream& os) const { DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF); DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF); os << '\n'; +#elif defined(__linux__) && defined(__arm__) + DumpRegister32(os, "r0", context.arm_r0); + DumpRegister32(os, "r1", context.arm_r1); + DumpRegister32(os, "r2", context.arm_r2); + DumpRegister32(os, "r3", context.arm_r3); + os << '\n'; + + DumpRegister32(os, "r4", context.arm_r4); + DumpRegister32(os, "r5", context.arm_r5); + DumpRegister32(os, "r6", context.arm_r6); + DumpRegister32(os, "r7", context.arm_r7); + os << '\n'; + + DumpRegister32(os, "r8", context.arm_r8); + DumpRegister32(os, "r9", context.arm_r9); + DumpRegister32(os, "r10", context.arm_r10); + DumpRegister32(os, "fp", context.arm_fp); + os << '\n'; + + DumpRegister32(os, "ip", context.arm_ip); + DumpRegister32(os, "sp", context.arm_sp); + DumpRegister32(os, "lr", context.arm_lr); + DumpRegister32(os, "pc", context.arm_pc); + os << '\n'; + + DumpRegister32(os, "cpsr", context.arm_cpsr); + DumpArmStatusRegister(os, context.arm_cpsr); + os << '\n'; +#elif defined(__linux__) && defined(__aarch64__) + for (size_t i = 0; i <= 30; ++i) { + std::string reg_name = "x" + std::to_string(i); + DumpRegister64(os, reg_name.c_str(), context.regs[i]); + if (i % 4 == 3) { + os << '\n'; + } + } + os << '\n'; + + DumpRegister64(os, "sp", context.sp); + DumpRegister64(os, "pc", context.pc); + os << '\n'; + + DumpRegister64(os, "pstate", context.pstate); + DumpArmStatusRegister(os, context.pstate); + os << '\n'; #else + // TODO: Add support for MIPS32 and MIPS64. os << "Unknown architecture/word size/OS in ucontext dump"; #endif } @@ -274,6 +322,30 @@ void UContext::DumpX86Flags(std::ostream& os, uint32_t flags) const { os << " ]"; } +template <typename RegisterType> +void UContext::DumpArmStatusRegister(std::ostream& os, RegisterType status_register) const { + // Condition flags. + constexpr RegisterType kFlagV = 1U << 28; + constexpr RegisterType kFlagC = 1U << 29; + constexpr RegisterType kFlagZ = 1U << 30; + constexpr RegisterType kFlagN = 1U << 31; + + os << " ["; + if ((status_register & kFlagN) != 0) { + os << " N"; + } + if ((status_register & kFlagZ) != 0) { + os << " Z"; + } + if ((status_register & kFlagC) != 0) { + os << " C"; + } + if ((status_register & kFlagV) != 0) { + os << " V"; + } + os << " ]"; +} + int GetTimeoutSignal() { #if defined(__APPLE__) // Mac does not support realtime signals. diff --git a/runtime/stack.cc b/runtime/stack.cc index f9efc0b88f..5ad00a4e55 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -625,7 +625,7 @@ void StackVisitor::SetMethod(ArtMethod* method) { } else { DCHECK(cur_quick_frame_ != nullptr); CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!"; - *cur_quick_frame_ = method; + *cur_quick_frame_ = method; } } diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc index e093293e75..4e7c3f4f9f 100644 --- a/runtime/stack_map.cc +++ b/runtime/stack_map.cc @@ -97,8 +97,9 @@ void StackMapEncoding::Dump(VariableIndentationOutputStream* vios) const { << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_) << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_) << ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_) - << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_bit_offset_) - << ", stack_mask_bit_offset=" << static_cast<uint32_t>(stack_mask_bit_offset_) + << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_index_bit_offset_) + << ", stack_mask_index_bit_offset=" << static_cast<uint32_t>(stack_mask_index_bit_offset_) + << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_) << ")\n"; } @@ -198,16 +199,17 @@ void StackMap::Dump(VariableIndentationOutputStream* vios, << "StackMap" << header_suffix << std::hex << " [native_pc=0x" << code_offset + pc_offset << "]" - << " [entry_size=0x" << encoding.stack_map_size_in_bits << " bits]" + << " [entry_size=0x" << encoding.stack_map_encoding.BitSize() << " bits]" << " (dex_pc=0x" << GetDexPc(stack_map_encoding) << ", native_pc_offset=0x" << pc_offset << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding) << ", inline_info_offset=0x" << GetInlineDescriptorOffset(stack_map_encoding) - << ", register_mask=0x" << GetRegisterMask(stack_map_encoding) + << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this) << std::dec << ", stack_mask=0b"; - for (size_t i = 0, e = code_info.GetNumberOfStackMaskBits(encoding); i < e; ++i) { - vios->Stream() << GetStackMaskBit(stack_map_encoding, e - i - 1); + BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this); + for (size_t i = 0, e = encoding.stack_mask_size_in_bits; i < e; ++i) { + vios->Stream() << stack_mask.LoadBit(e - i - 1); } vios->Stream() << ")\n"; if (HasDexRegisterMap(stack_map_encoding)) { diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 679218d5be..062404dbf2 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -694,35 +694,35 @@ class StackMapEncoding { size_t dex_pc_max, size_t dex_register_map_size, size_t inline_info_size, - size_t register_mask_max, - size_t stack_mask_bit_size) { - size_t bit_offset = 0; - DCHECK_EQ(kNativePcBitOffset, bit_offset); - bit_offset += MinimumBitsToStore(native_pc_max); + size_t number_of_register_masks, + size_t number_of_stack_masks) { + total_bit_size_ = 0; + DCHECK_EQ(kNativePcBitOffset, total_bit_size_); + total_bit_size_ += MinimumBitsToStore(native_pc_max); - dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset); - bit_offset += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max); + dex_pc_bit_offset_ = total_bit_size_; + total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max); // We also need +1 for kNoDexRegisterMap, but since the size is strictly // greater than any offset we might try to encode, we already implicitly have it. - dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset); - bit_offset += MinimumBitsToStore(dex_register_map_size); + dex_register_map_bit_offset_ = total_bit_size_; + total_bit_size_ += MinimumBitsToStore(dex_register_map_size); // We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly // greater than the offset we might try to encode, we already implicitly have it. // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits). - inline_info_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset); + inline_info_bit_offset_ = total_bit_size_; if (inline_info_size != 0) { - bit_offset += MinimumBitsToStore(dex_register_map_size + inline_info_size); + total_bit_size_ += MinimumBitsToStore(dex_register_map_size + inline_info_size); } - register_mask_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset); - bit_offset += MinimumBitsToStore(register_mask_max); + register_mask_index_bit_offset_ = total_bit_size_; + total_bit_size_ += MinimumBitsToStore(number_of_register_masks); - stack_mask_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset); - bit_offset += stack_mask_bit_size; + stack_mask_index_bit_offset_ = total_bit_size_; + total_bit_size_ += MinimumBitsToStore(number_of_stack_masks); - return bit_offset; + return total_bit_size_; } ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const { @@ -735,18 +735,18 @@ class StackMapEncoding { return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */); } ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const { - return FieldEncoding(inline_info_bit_offset_, register_mask_bit_offset_, -1 /* min_value */); + return FieldEncoding(inline_info_bit_offset_, + register_mask_index_bit_offset_, + -1 /* min_value */); } - ALWAYS_INLINE FieldEncoding GetRegisterMaskEncoding() const { - return FieldEncoding(register_mask_bit_offset_, stack_mask_bit_offset_); + ALWAYS_INLINE FieldEncoding GetRegisterMaskIndexEncoding() const { + return FieldEncoding(register_mask_index_bit_offset_, stack_mask_index_bit_offset_); } - ALWAYS_INLINE size_t GetStackMaskBitOffset() const { - // The end offset is not encoded. It is implicitly the end of stack map entry. - return stack_mask_bit_offset_; + ALWAYS_INLINE FieldEncoding GetStackMaskIndexEncoding() const { + return FieldEncoding(stack_mask_index_bit_offset_, total_bit_size_); } - ALWAYS_INLINE size_t GetNumberOfStackMaskBits(size_t stack_map_bits) const { - // Note that the stack mask bits are last. - return stack_map_bits - GetStackMaskBitOffset(); + ALWAYS_INLINE size_t BitSize() const { + return total_bit_size_; } void Dump(VariableIndentationOutputStream* vios) const; @@ -756,8 +756,9 @@ class StackMapEncoding { uint8_t dex_pc_bit_offset_; uint8_t dex_register_map_bit_offset_; uint8_t inline_info_bit_offset_; - uint8_t register_mask_bit_offset_; - uint8_t stack_mask_bit_offset_; + uint8_t register_mask_index_bit_offset_; + uint8_t stack_mask_index_bit_offset_; + uint8_t total_bit_size_; }; /** @@ -770,8 +771,8 @@ class StackMapEncoding { * * The information is of the form: * - * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask, - * stack_mask]. + * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask_index, + * stack_mask_index]. */ class StackMap { public: @@ -816,20 +817,20 @@ class StackMap { encoding.GetInlineInfoEncoding().Store(region_, offset); } - ALWAYS_INLINE uint32_t GetRegisterMask(const StackMapEncoding& encoding) const { - return encoding.GetRegisterMaskEncoding().Load(region_); + ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const { + return encoding.GetRegisterMaskIndexEncoding().Load(region_); } - ALWAYS_INLINE void SetRegisterMask(const StackMapEncoding& encoding, uint32_t mask) { - encoding.GetRegisterMaskEncoding().Store(region_, mask); + ALWAYS_INLINE void SetRegisterMaskIndex(const StackMapEncoding& encoding, uint32_t mask) { + encoding.GetRegisterMaskIndexEncoding().Store(region_, mask); } - ALWAYS_INLINE bool GetStackMaskBit(const StackMapEncoding& encoding, size_t index) const { - return region_.LoadBit(encoding.GetStackMaskBitOffset() + index); + ALWAYS_INLINE uint32_t GetStackMaskIndex(const StackMapEncoding& encoding) const { + return encoding.GetStackMaskIndexEncoding().Load(region_); } - ALWAYS_INLINE void SetStackMaskBit(const StackMapEncoding& encoding, size_t index, bool value) { - region_.StoreBit(encoding.GetStackMaskBitOffset() + index, value); + ALWAYS_INLINE void SetStackMaskIndex(const StackMapEncoding& encoding, uint32_t mask) { + encoding.GetStackMaskIndexEncoding().Store(region_, mask); } ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const { @@ -1031,7 +1032,10 @@ class InlineInfo { struct CodeInfoEncoding { uint32_t non_header_size; uint32_t number_of_stack_maps; - uint32_t stack_map_size_in_bits; + uint32_t number_of_stack_masks; + uint32_t number_of_register_masks; + uint32_t stack_mask_size_in_bits; + uint32_t register_mask_size_in_bits; uint32_t number_of_location_catalog_entries; StackMapEncoding stack_map_encoding; InlineInfoEncoding inline_info_encoding; @@ -1043,7 +1047,10 @@ struct CodeInfoEncoding { const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data); non_header_size = DecodeUnsignedLeb128(&ptr); number_of_stack_maps = DecodeUnsignedLeb128(&ptr); - stack_map_size_in_bits = DecodeUnsignedLeb128(&ptr); + number_of_stack_masks = DecodeUnsignedLeb128(&ptr); + number_of_register_masks = DecodeUnsignedLeb128(&ptr); + stack_mask_size_in_bits = DecodeUnsignedLeb128(&ptr); + register_mask_size_in_bits = DecodeUnsignedLeb128(&ptr); number_of_location_catalog_entries = DecodeUnsignedLeb128(&ptr); static_assert(alignof(StackMapEncoding) == 1, "StackMapEncoding should not require alignment"); @@ -1064,7 +1071,10 @@ struct CodeInfoEncoding { void Compress(Vector* dest) const { EncodeUnsignedLeb128(dest, non_header_size); EncodeUnsignedLeb128(dest, number_of_stack_maps); - EncodeUnsignedLeb128(dest, stack_map_size_in_bits); + EncodeUnsignedLeb128(dest, number_of_stack_masks); + EncodeUnsignedLeb128(dest, number_of_register_masks); + EncodeUnsignedLeb128(dest, stack_mask_size_in_bits); + EncodeUnsignedLeb128(dest, register_mask_size_in_bits); EncodeUnsignedLeb128(dest, number_of_location_catalog_entries); const uint8_t* stack_map_ptr = reinterpret_cast<const uint8_t*>(&stack_map_encoding); dest->insert(dest->end(), stack_map_ptr, stack_map_ptr + sizeof(StackMapEncoding)); @@ -1098,7 +1108,7 @@ class CodeInfo { } CodeInfoEncoding ExtractEncoding() const { - CodeInfoEncoding encoding(region_.start()); + CodeInfoEncoding encoding(region_.begin()); AssertValidStackMap(encoding); return encoding; } @@ -1114,14 +1124,42 @@ class CodeInfo { } ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const { - return encoding.stack_map_encoding.GetNumberOfStackMaskBits(encoding.stack_map_size_in_bits); + return encoding.stack_mask_size_in_bits; } ALWAYS_INLINE StackMap GetStackMapAt(size_t i, const CodeInfoEncoding& encoding) const { - const size_t map_size = encoding.stack_map_size_in_bits; + const size_t map_size = encoding.stack_map_encoding.BitSize(); return StackMap(BitMemoryRegion(GetStackMaps(encoding), i * map_size, map_size)); } + BitMemoryRegion GetStackMask(const CodeInfoEncoding& encoding, size_t stack_mask_index) const { + // All stack mask data is stored before register map data (which is at the very end). + const size_t entry_size = GetNumberOfStackMaskBits(encoding); + const size_t register_mask_bits = + encoding.register_mask_size_in_bits * encoding.number_of_register_masks; + return BitMemoryRegion(region_, + region_.size_in_bits() - register_mask_bits - + entry_size * (stack_mask_index + 1), + entry_size); + } + + BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding, + const StackMap& stack_map) const { + return GetStackMask(encoding, stack_map.GetStackMaskIndex(encoding.stack_map_encoding)); + } + + BitMemoryRegion GetRegisterMask(const CodeInfoEncoding& encoding, size_t index) const { + const size_t entry_size = encoding.register_mask_size_in_bits; + return BitMemoryRegion(region_, + region_.size_in_bits() - entry_size * (index + 1), + entry_size); + } + + uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const { + size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map_encoding); + return GetRegisterMask(encoding, index).LoadBits(0u, encoding.register_mask_size_in_bits); + } + uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const { return encoding.number_of_location_catalog_entries; } @@ -1135,10 +1173,14 @@ class CodeInfo { return encoding.number_of_stack_maps; } + // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned. + ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const { + return encoding.stack_map_encoding.BitSize() * GetNumberOfStackMaps(encoding); + } + // Get the size of all the stack maps of this CodeInfo object, in bytes. size_t GetStackMapsSize(const CodeInfoEncoding& encoding) const { - return RoundUp(encoding.stack_map_size_in_bits * GetNumberOfStackMaps(encoding), kBitsPerByte) / - kBitsPerByte; + return RoundUp(GetStackMapsSizeInBits(encoding), kBitsPerByte) / kBitsPerByte; } uint32_t GetDexRegisterLocationCatalogOffset(const CodeInfoEncoding& encoding) const { @@ -1288,7 +1330,7 @@ class CodeInfo { << encoding.non_header_size << "\n" << encoding.number_of_location_catalog_entries << "\n" << encoding.number_of_stack_maps << "\n" - << encoding.stack_map_size_in_bits; + << encoding.stack_map_encoding.BitSize(); } } diff --git a/runtime/thread.cc b/runtime/thread.cc index 3c7a71aba9..d843de5e7f 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1047,9 +1047,10 @@ void Thread::ShortDump(std::ostream& os) const { << "]"; } -void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map) const { +void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, + bool force_dump_stack) const { DumpState(os); - DumpStack(os, dump_native_stack, backtrace_map); + DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); } mirror::String* Thread::GetThreadName() const { @@ -1750,7 +1751,8 @@ void Thread::DumpJavaStack(std::ostream& os) const { void Thread::DumpStack(std::ostream& os, bool dump_native_stack, - BacktraceMap* backtrace_map) const { + BacktraceMap* backtrace_map, + bool force_dump_stack) const { // TODO: we call this code when dying but may not have suspended the thread ourself. The // IsSuspended check is therefore racy with the use for dumping (normally we inhibit // the race with the thread_suspend_count_lock_). @@ -1761,11 +1763,11 @@ void Thread::DumpStack(std::ostream& os, // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. safe_to_dump = (safe_to_dump || dump_for_abort); } - if (safe_to_dump) { + if (safe_to_dump || force_dump_stack) { // If we're currently in native code, dump that stack before dumping the managed stack. - if (dump_native_stack && (dump_for_abort || ShouldShowNativeStack(this))) { + if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { DumpKernelStack(os, GetTid(), " kernel: ", false); - ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort); + ArtMethod* method = GetCurrentMethod(nullptr, !(dump_for_abort || force_dump_stack)); DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); } DumpJavaStack(os); @@ -2188,12 +2190,18 @@ void Thread::SetClassLoaderOverride(jobject class_loader_override) { tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); } -class CountStackDepthVisitor : public StackVisitor { +using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>; + +// Counts the stack trace depth and also fetches the first max_saved_frames frames. +class FetchStackTraceVisitor : public StackVisitor { public: - explicit CountStackDepthVisitor(Thread* thread) + explicit FetchStackTraceVisitor(Thread* thread, + ArtMethodDexPcPair* saved_frames = nullptr, + size_t max_saved_frames = 0) REQUIRES_SHARED(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), - depth_(0), skip_depth_(0), skipping_(true) {} + saved_frames_(saved_frames), + max_saved_frames_(max_saved_frames) {} bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. @@ -2206,6 +2214,10 @@ class CountStackDepthVisitor : public StackVisitor { } if (!skipping_) { if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). + if (depth_ < max_saved_frames_) { + saved_frames_[depth_].first = m; + saved_frames_[depth_].second = m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(); + } ++depth_; } } else { @@ -2214,20 +2226,22 @@ class CountStackDepthVisitor : public StackVisitor { return true; } - int GetDepth() const { + uint32_t GetDepth() const { return depth_; } - int GetSkipDepth() const { + uint32_t GetSkipDepth() const { return skip_depth_; } private: - uint32_t depth_; - uint32_t skip_depth_; - bool skipping_; + uint32_t depth_ = 0; + uint32_t skip_depth_ = 0; + bool skipping_ = true; + ArtMethodDexPcPair* saved_frames_; + const size_t max_saved_frames_; - DISALLOW_COPY_AND_ASSIGN(CountStackDepthVisitor); + DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); }; template<bool kTransactionActive> @@ -2237,8 +2251,6 @@ class BuildInternalStackTraceVisitor : public StackVisitor { : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), skip_depth_(skip_depth), - count_(0), - trace_(nullptr), pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { @@ -2290,17 +2302,21 @@ class BuildInternalStackTraceVisitor : public StackVisitor { if (m->IsRuntimeMethod()) { return true; // Ignore runtime frames (in particular callee save). } + AddFrame(m, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc()); + return true; + } + + void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); - trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, m, pointer_size_); + trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( trace_methods_and_pcs->GetLength() / 2 + count_, - m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(), + dex_pc, pointer_size_); // Save the declaring class of the method to ensure that the declaring classes of the methods // do not get unloaded while the stack trace is live. - trace_->Set(count_ + 1, m->GetDeclaringClass()); + trace_->Set(count_ + 1, method->GetDeclaringClass()); ++count_; - return true; } ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { @@ -2316,12 +2332,12 @@ class BuildInternalStackTraceVisitor : public StackVisitor { // How many more frames to skip. int32_t skip_depth_; // Current position down stack trace. - uint32_t count_; + uint32_t count_ = 0; // An object array where the first element is a pointer array that contains the ArtMethod // pointers on the stack and dex PCs. The rest of the elements are the declaring // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of // the i'th frame. - mirror::ObjectArray<mirror::Object>* trace_; + mirror::ObjectArray<mirror::Object>* trace_ = nullptr; // For cross compilation. const PointerSize pointer_size_; @@ -2330,11 +2346,15 @@ class BuildInternalStackTraceVisitor : public StackVisitor { template<bool kTransactionActive> jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { - // Compute depth of stack - CountStackDepthVisitor count_visitor(const_cast<Thread*>(this)); + // Compute depth of stack, save frames if possible to avoid needing to recompute many. + constexpr size_t kMaxSavedFrames = 256; + std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); + FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this), + &saved_frames[0], + kMaxSavedFrames); count_visitor.WalkStack(); - int32_t depth = count_visitor.GetDepth(); - int32_t skip_depth = count_visitor.GetSkipDepth(); + const uint32_t depth = count_visitor.GetDepth(); + const uint32_t skip_depth = count_visitor.GetSkipDepth(); // Build internal stack trace. BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), @@ -2343,7 +2363,16 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable if (!build_trace_visitor.Init(depth)) { return nullptr; // Allocation failed. } - build_trace_visitor.WalkStack(); + // If we saved all of the frames we don't even need to do the actual stack walk. This is faster + // than doing the stack walk twice. + if (depth < kMaxSavedFrames) { + for (size_t i = 0; i < depth; ++i) { + build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); + } + } else { + build_trace_visitor.WalkStack(); + } + mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); if (kIsDebugBuild) { ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); @@ -2362,9 +2391,10 @@ template jobject Thread::CreateInternalStackTrace<true>( const ScopedObjectAccessAlreadyRunnable& soa) const; bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { - CountStackDepthVisitor count_visitor(const_cast<Thread*>(this)); + // Only count the depth since we do not pass a stack frame array as an argument. + FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this)); count_visitor.WalkStack(); - return count_visitor.GetDepth() == exception->GetStackDepth(); + return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth()); } jobjectArray Thread::InternalStackTraceToStackTraceElementArray( @@ -3038,9 +3068,10 @@ class ReferenceMapVisitor : public StackVisitor { T vreg_info(m, code_info, encoding, map, visitor_); // Visit stack entries that hold pointers. - size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding); + const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding); + BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map); for (size_t i = 0; i < number_of_bits; ++i) { - if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) { + if (stack_mask.LoadBit(i)) { auto* ref_addr = vreg_base + i; mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { @@ -3048,12 +3079,12 @@ class ReferenceMapVisitor : public StackVisitor { vreg_info.VisitStack(&new_ref, i, this); if (ref != new_ref) { ref_addr->Assign(new_ref); - } + } } } } // Visit callee-save registers that hold pointers. - uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding); + uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map); for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { if (register_mask & (1 << i)) { mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); diff --git a/runtime/thread.h b/runtime/thread.h index b609e723e9..b59eac68e9 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -196,7 +196,8 @@ class Thread { // Dumps the detailed thread state and the thread stack (used for SIGQUIT). void Dump(std::ostream& os, bool dump_native_stack = true, - BacktraceMap* backtrace_map = nullptr) const + BacktraceMap* backtrace_map = nullptr, + bool force_dump_stack = false) const REQUIRES(!Locks::thread_suspend_count_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -1204,7 +1205,8 @@ class Thread { void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); void DumpStack(std::ostream& os, bool dump_native_stack = true, - BacktraceMap* backtrace_map = nullptr) const + BacktraceMap* backtrace_map = nullptr, + bool force_dump_stack = false) const REQUIRES(!Locks::thread_suspend_count_lock_) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/utils.cc b/runtime/utils.cc index 80a427b1e7..6a20eaf9e0 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -929,74 +929,6 @@ std::string GetSystemImageFilename(const char* location, const InstructionSet is return filename; } -int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) { - const std::string command_line(android::base::Join(arg_vector, ' ')); - CHECK_GE(arg_vector.size(), 1U) << command_line; - - // Convert the args to char pointers. - const char* program = arg_vector[0].c_str(); - std::vector<char*> args; - for (size_t i = 0; i < arg_vector.size(); ++i) { - const std::string& arg = arg_vector[i]; - char* arg_str = const_cast<char*>(arg.c_str()); - CHECK(arg_str != nullptr) << i; - args.push_back(arg_str); - } - args.push_back(nullptr); - - // fork and exec - pid_t pid = fork(); - if (pid == 0) { - // no allocation allowed between fork and exec - - // change process groups, so we don't get reaped by ProcessManager - setpgid(0, 0); - - // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc. - // Use the snapshot of the environment from the time the runtime was created. - char** envp = (Runtime::Current() == nullptr) ? nullptr : Runtime::Current()->GetEnvSnapshot(); - if (envp == nullptr) { - execv(program, &args[0]); - } else { - execve(program, &args[0], envp); - } - PLOG(ERROR) << "Failed to execve(" << command_line << ")"; - // _exit to avoid atexit handlers in child. - _exit(1); - } else { - if (pid == -1) { - *error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s", - command_line.c_str(), strerror(errno)); - return -1; - } - - // wait for subprocess to finish - int status = -1; - pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0)); - if (got_pid != pid) { - *error_msg = StringPrintf("Failed after fork for execv(%s) because waitpid failed: " - "wanted %d, got %d: %s", - command_line.c_str(), pid, got_pid, strerror(errno)); - return -1; - } - if (WIFEXITED(status)) { - return WEXITSTATUS(status); - } - return -1; - } -} - -bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) { - int status = ExecAndReturnCode(arg_vector, error_msg); - if (status != 0) { - const std::string command_line(android::base::Join(arg_vector, ' ')); - *error_msg = StringPrintf("Failed execv(%s) because non-0 exit status", - command_line.c_str()); - return false; - } - return true; -} - bool FileExists(const std::string& filename) { struct stat buffer; return stat(filename.c_str(), &buffer) == 0; diff --git a/runtime/utils.h b/runtime/utils.h index 5f53608f65..67438b5881 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -175,13 +175,6 @@ bool GetDalvikCacheFilename(const char* file_location, const char* cache_locatio // Returns the system location for an image std::string GetSystemImageFilename(const char* location, InstructionSet isa); -// Wrapper on fork/execv to run a command in a subprocess. -// Both of these spawn child processes using the environment as it was set when the single instance -// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it -// will use the current environment settings. -bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg); -int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg); - // Returns true if the file exists. bool FileExists(const std::string& filename); bool FileExistsAndNotEmpty(const std::string& filename); diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h index 2812c21004..bd1b044dae 100644 --- a/runtime/utils/dex_cache_arrays_layout-inl.h +++ b/runtime/utils/dex_cache_arrays_layout-inl.h @@ -48,11 +48,9 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, cons : DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) { } -constexpr size_t DexCacheArraysLayout::Alignment() { - // mirror::Type/String/MethodTypeDexCacheType alignment is 8, - // i.e. higher than or equal to the pointer alignment. - static_assert(alignof(mirror::TypeDexCacheType) == 8, - "Expecting alignof(ClassDexCacheType) == 8"); +inline constexpr size_t DexCacheArraysLayout::Alignment() { + // GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment. + static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4"); static_assert(alignof(mirror::StringDexCacheType) == 8, "Expecting alignof(StringDexCacheType) == 8"); static_assert(alignof(mirror::MethodTypeDexCacheType) == 8, @@ -62,22 +60,17 @@ constexpr size_t DexCacheArraysLayout::Alignment() { } template <typename T> -constexpr PointerSize GcRootAsPointerSize() { +static constexpr PointerSize GcRootAsPointerSize() { static_assert(sizeof(GcRoot<T>) == 4U, "Unexpected GcRoot size"); return PointerSize::k32; } inline size_t DexCacheArraysLayout::TypeOffset(dex::TypeIndex type_idx) const { - return types_offset_ + ElementOffset(PointerSize::k64, - type_idx.index_ % mirror::DexCache::kDexCacheTypeCacheSize); + return types_offset_ + ElementOffset(GcRootAsPointerSize<mirror::Class>(), type_idx.index_); } inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const { - size_t cache_size = mirror::DexCache::kDexCacheTypeCacheSize; - if (num_elements < cache_size) { - cache_size = num_elements; - } - return ArraySize(PointerSize::k64, cache_size); + return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements); } inline size_t DexCacheArraysLayout::TypesAlignment() const { diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc index 82d92fc2fc..02f1e1bbfe 100644 --- a/runtime/utils_test.cc +++ b/runtime/utils_test.cc @@ -21,6 +21,7 @@ #include "base/enums.h" #include "class_linker-inl.h" #include "common_runtime_test.h" +#include "exec_utils.h" #include "mirror/array.h" #include "mirror/array-inl.h" #include "mirror/object-inl.h" diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc index dabf8c8e93..2481c8ba46 100644 --- a/runtime/vdex_file.cc +++ b/runtime/vdex_file.cc @@ -49,10 +49,10 @@ VdexFile::Header::Header(uint32_t number_of_dex_files, DCHECK(IsVersionValid()); } -VdexFile* VdexFile::Open(const std::string& vdex_filename, - bool writable, - bool low_4gb, - std::string* error_msg) { +std::unique_ptr<VdexFile> VdexFile::Open(const std::string& vdex_filename, + bool writable, + bool low_4gb, + std::string* error_msg) { if (!OS::FileExists(vdex_filename.c_str())) { *error_msg = "File " + vdex_filename + " does not exist."; return nullptr; @@ -79,12 +79,12 @@ VdexFile* VdexFile::Open(const std::string& vdex_filename, return Open(vdex_file->Fd(), vdex_length, vdex_filename, writable, low_4gb, error_msg); } -VdexFile* VdexFile::Open(int file_fd, - size_t vdex_length, - const std::string& vdex_filename, - bool writable, - bool low_4gb, - std::string* error_msg) { +std::unique_ptr<VdexFile> VdexFile::Open(int file_fd, + size_t vdex_length, + const std::string& vdex_filename, + bool writable, + bool low_4gb, + std::string* error_msg) { std::unique_ptr<MemMap> mmap(MemMap::MapFile(vdex_length, writable ? PROT_READ | PROT_WRITE : PROT_READ, MAP_SHARED, @@ -98,8 +98,14 @@ VdexFile* VdexFile::Open(int file_fd, return nullptr; } + std::unique_ptr<VdexFile> vdex(new VdexFile(mmap.release())); + if (!vdex->IsValid()) { + *error_msg = "Vdex file is not valid"; + return nullptr; + } + *error_msg = "Success"; - return new VdexFile(mmap.release()); + return vdex; } const uint8_t* VdexFile::GetNextDexFileData(const uint8_t* cursor) const { diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h index 330b955c2a..7daf2f8d7b 100644 --- a/runtime/vdex_file.h +++ b/runtime/vdex_file.h @@ -73,17 +73,19 @@ class VdexFile { typedef uint32_t VdexChecksum; - static VdexFile* Open(const std::string& vdex_filename, - bool writable, - bool low_4gb, - std::string* error_msg); - - static VdexFile* Open(int file_fd, - size_t vdex_length, - const std::string& vdex_filename, - bool writable, - bool low_4gb, - std::string* error_msg); + // Returns nullptr if the vdex file cannot be opened or is not valid. + static std::unique_ptr<VdexFile> Open(const std::string& vdex_filename, + bool writable, + bool low_4gb, + std::string* error_msg); + + // Returns nullptr if the vdex file cannot be opened or is not valid. + static std::unique_ptr<VdexFile> Open(int file_fd, + size_t vdex_length, + const std::string& vdex_filename, + bool writable, + bool low_4gb, + std::string* error_msg); const uint8_t* Begin() const { return mmap_->Begin(); } const uint8_t* End() const { return mmap_->End(); } diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc new file mode 100644 index 0000000000..909e117ccc --- /dev/null +++ b/runtime/vdex_file_test.cc @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "vdex_file.h" + +#include <string> + +#include <gtest/gtest.h> + +#include "common_runtime_test.h" + +namespace art { + +class VdexFileTest : public CommonRuntimeTest { +}; + +TEST_F(VdexFileTest, OpenEmptyVdex) { + // Verify we fail to open an empty vdex file. + ScratchFile tmp; + std::string error_msg; + std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(), + 0, + tmp.GetFilename(), + /*writable*/false, + /*low_4gb*/false, + &error_msg); + EXPECT_TRUE(vdex == nullptr); + + vdex = VdexFile::Open(tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, &error_msg); + EXPECT_TRUE(vdex == nullptr); +} + +} // namespace art diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index ba429d8c3e..5f55f3fd29 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -415,12 +415,12 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self, result.kind = kSoftFailure; if (method != nullptr && !CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) { - method->AddAccessFlags(kAccCompileDontBother); + method->SetDontCompile(); } } if (method != nullptr) { if (verifier.HasInstructionThatWillThrow()) { - method->AddAccessFlags(kAccCompileDontBother); + method->SetDontCompile(); if (Runtime::Current()->IsAotCompiler() && (callbacks != nullptr) && !callbacks->IsBootImage()) { // When compiling apps, make HasInstructionThatWillThrow a soft error to trigger @@ -2399,8 +2399,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { const RegType& res_type = ResolveClassAndCheckAccess(type_idx); if (res_type.IsConflict()) { // If this is a primitive type, fail HARD. - ObjPtr<mirror::Class> klass = - ClassLinker::LookupResolvedType(type_idx, dex_cache_.Get(), class_loader_.Get()); + mirror::Class* klass = dex_cache_->GetResolvedType(type_idx); if (klass != nullptr && klass->IsPrimitive()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type " << dex_file_->StringByTypeIdx(type_idx) << " in instanceof in " @@ -3685,10 +3684,9 @@ inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) { } const RegType& MethodVerifier::ResolveClassAndCheckAccess(dex::TypeIndex class_idx) { - mirror::Class* klass = - ClassLinker::LookupResolvedType(class_idx, dex_cache_.Get(), class_loader_.Get()).Ptr(); + mirror::Class* klass = dex_cache_->GetResolvedType(class_idx); const RegType* result = nullptr; - if (klass != nullptr && !klass->IsErroneous()) { + if (klass != nullptr) { bool precise = klass->CannotBeAssignedFromOtherTypes(); if (precise && !IsInstantiableOrPrimitive(klass)) { const char* descriptor = dex_file_->StringByTypeIdx(class_idx); diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java index fad8a9f100..072f0e68ee 100644 --- a/test/082-inline-execute/src/Main.java +++ b/test/082-inline-execute/src/Main.java @@ -535,6 +535,8 @@ public class Main { Assert.assertEquals(Math.min(0.0f, Float.MAX_VALUE), 0.0f); Assert.assertEquals(Math.min(Float.MIN_VALUE, 0.0f), 0.0f); Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE); + // Should not have flush-to-zero behavior. + Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MIN_VALUE), Float.MIN_VALUE); } public static void test_Math_max_F() { @@ -548,8 +550,10 @@ public class Main { Assert.assertEquals(Math.max(1.0f, 0.0f), 1.0f); Assert.assertEquals(Math.max(0.0f, 1.0f), 1.0f); Assert.assertEquals(Math.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE); - Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE); Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE); + // Should not have flush-to-zero behavior. + Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE); + Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MIN_VALUE), Float.MIN_VALUE); } public static void test_Math_min_D() { @@ -565,6 +569,8 @@ public class Main { Assert.assertEquals(Math.min(0.0d, Double.MAX_VALUE), 0.0d); Assert.assertEquals(Math.min(Double.MIN_VALUE, 0.0d), 0.0d); Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE); + // Should not have flush-to-zero behavior. + Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MIN_VALUE), Double.MIN_VALUE); } public static void test_Math_max_D() { @@ -580,6 +586,9 @@ public class Main { Assert.assertEquals(Math.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE); Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE); Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE); + // Should not have flush-to-zero behavior. + Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE); + Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MIN_VALUE), Double.MIN_VALUE); } public static void test_Math_sqrt() { diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java index 7cc0b8b652..7509d9b4f3 100644 --- a/test/623-checker-loop-regressions/src/Main.java +++ b/test/623-checker-loop-regressions/src/Main.java @@ -154,8 +154,8 @@ public class Main { /// CHECK-NOT: Phi // /// CHECK-START: int Main.polynomialInt() instruction_simplifier$after_bce (after) - /// CHECK-DAG: <<Int:i\d+>> IntConstant -45 loop:none - /// CHECK-DAG: Return [<<Int>>] loop:none + /// CHECK-DAG: <<Int:i\d+>> IntConstant -45 loop:none + /// CHECK-DAG: Return [<<Int>>] loop:none static int polynomialInt() { int x = 0; for (int i = 0; i < 10; i++) { @@ -164,6 +164,81 @@ public class Main { return x; } + // Regression test for b/34779592 (found with fuzz testing): overflow for last value + // of division truncates to zero, for multiplication it simply truncates. + // + /// CHECK-START: int Main.geoIntDivLastValue(int) loop_optimization (before) + /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none + /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none + // + /// CHECK-START: int Main.geoIntDivLastValue(int) loop_optimization (after) + /// CHECK-NOT: Phi + // + /// CHECK-START: int Main.geoIntDivLastValue(int) instruction_simplifier$after_bce (after) + /// CHECK-DAG: <<Int:i\d+>> IntConstant 0 loop:none + /// CHECK-DAG: Return [<<Int>>] loop:none + static int geoIntDivLastValue(int x) { + for (int i = 0; i < 2; i++) { + x /= 1081788608; + } + return x; + } + + /// CHECK-START: int Main.geoIntMulLastValue(int) loop_optimization (before) + /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none + /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none + // + /// CHECK-START: int Main.geoIntMulLastValue(int) loop_optimization (after) + /// CHECK-NOT: Phi + // + /// CHECK-START: int Main.geoIntMulLastValue(int) instruction_simplifier$after_bce (after) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none + /// CHECK-DAG: <<Int:i\d+>> IntConstant -194211840 loop:none + /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Par>>,<<Int>>] loop:none + /// CHECK-DAG: Return [<<Mul>>] loop:none + static int geoIntMulLastValue(int x) { + for (int i = 0; i < 2; i++) { + x *= 1081788608; + } + return x; + } + + /// CHECK-START: long Main.geoLongDivLastValue(long) loop_optimization (before) + /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none + /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none + // + /// CHECK-START: long Main.geoLongDivLastValue(long) loop_optimization (after) + /// CHECK-NOT: Phi + // + /// CHECK-START: long Main.geoLongDivLastValue(long) instruction_simplifier$after_bce (after) + /// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none + /// CHECK-DAG: Return [<<Long>>] loop:none + static long geoLongDivLastValue(long x) { + for (int i = 0; i < 10; i++) { + x /= 1081788608; + } + return x; + } + + /// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (before) + /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none + /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none + // + /// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (after) + /// CHECK-NOT: Phi + // + /// CHECK-START: long Main.geoLongMulLastValue(long) instruction_simplifier$after_bce (after) + /// CHECK-DAG: <<Par:j\d+>> ParameterValue loop:none + /// CHECK-DAG: <<Long:j\d+>> LongConstant -8070450532247928832 loop:none + /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Par>>,<<Long>>] loop:none + /// CHECK-DAG: Return [<<Mul>>] loop:none + static long geoLongMulLastValue(long x) { + for (int i = 0; i < 10; i++) { + x *= 1081788608; + } + return x; + } + public static void main(String[] args) { expectEquals(10, earlyExitFirst(-1)); for (int i = 0; i <= 10; i++) { @@ -185,6 +260,42 @@ public class Main { expectEquals(-45, polynomialIntFromLong()); expectEquals(-45, polynomialInt()); + expectEquals(0, geoIntDivLastValue(0)); + expectEquals(0, geoIntDivLastValue(1)); + expectEquals(0, geoIntDivLastValue(2)); + expectEquals(0, geoIntDivLastValue(1081788608)); + expectEquals(0, geoIntDivLastValue(-1081788608)); + expectEquals(0, geoIntDivLastValue(2147483647)); + expectEquals(0, geoIntDivLastValue(-2147483648)); + + expectEquals( 0, geoIntMulLastValue(0)); + expectEquals( -194211840, geoIntMulLastValue(1)); + expectEquals( -388423680, geoIntMulLastValue(2)); + expectEquals(-1041498112, geoIntMulLastValue(1081788608)); + expectEquals( 1041498112, geoIntMulLastValue(-1081788608)); + expectEquals( 194211840, geoIntMulLastValue(2147483647)); + expectEquals( 0, geoIntMulLastValue(-2147483648)); + + expectEquals(0L, geoLongDivLastValue(0L)); + expectEquals(0L, geoLongDivLastValue(1L)); + expectEquals(0L, geoLongDivLastValue(2L)); + expectEquals(0L, geoLongDivLastValue(1081788608L)); + expectEquals(0L, geoLongDivLastValue(-1081788608L)); + expectEquals(0L, geoLongDivLastValue(2147483647L)); + expectEquals(0L, geoLongDivLastValue(-2147483648L)); + expectEquals(0L, geoLongDivLastValue(9223372036854775807L)); + expectEquals(0L, geoLongDivLastValue(-9223372036854775808L)); + + expectEquals( 0L, geoLongMulLastValue(0L)); + expectEquals(-8070450532247928832L, geoLongMulLastValue(1L)); + expectEquals( 2305843009213693952L, geoLongMulLastValue(2L)); + expectEquals( 0L, geoLongMulLastValue(1081788608L)); + expectEquals( 0L, geoLongMulLastValue(-1081788608L)); + expectEquals( 8070450532247928832L, geoLongMulLastValue(2147483647L)); + expectEquals( 0L, geoLongMulLastValue(-2147483648L)); + expectEquals( 8070450532247928832L, geoLongMulLastValue(9223372036854775807L)); + expectEquals( 0L, geoLongMulLastValue(-9223372036854775808L)); + System.out.println("passed"); } @@ -193,4 +304,10 @@ public class Main { throw new Error("Expected: " + expected + ", found: " + result); } } + + private static void expectEquals(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } } diff --git a/test/626-checker-arm64-scratch-register/src/Main.java b/test/626-checker-arm64-scratch-register/src/Main.java index aa211be33c..6dd4374116 100644 --- a/test/626-checker-arm64-scratch-register/src/Main.java +++ b/test/626-checker-arm64-scratch-register/src/Main.java @@ -95,8 +95,8 @@ public class Main { /// CHECK: str s1, [sp, #28] /// CHECK: ldr s1, [sp, #32] /// CHECK: str s31, [sp, #32] - /// CHECK: ldr w16, [sp, #20] - /// CHECK: str w16, [sp, #40] + /// CHECK: ldr s31, [sp, #20] + /// CHECK: str s31, [sp, #40] /// CHECK: str s12, [sp, #20] /// CHECK: fmov d12, d11 /// CHECK: fmov d11, d10 diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc index 6d4b645db6..b035896166 100644 --- a/test/626-const-class-linking/clear_dex_cache_types.cc +++ b/test/626-const-class-linking/clear_dex_cache_types.cc @@ -24,8 +24,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_nativeClearResolvedTypes(JNIEnv*, jc ScopedObjectAccess soa(Thread::Current()); mirror::DexCache* dex_cache = soa.Decode<mirror::Class>(cls)->GetDexCache(); for (size_t i = 0, num_types = dex_cache->NumResolvedTypes(); i != num_types; ++i) { - mirror::TypeDexCachePair cleared(nullptr, mirror::TypeDexCachePair::InvalidIndexForSlot(i)); - dex_cache->GetResolvedTypes()[i].store(cleared, std::memory_order_relaxed); + dex_cache->SetResolvedType(dex::TypeIndex(i), ObjPtr<mirror::Class>(nullptr)); } } diff --git a/test/706-checker-scheduler/expected.txt b/test/706-checker-scheduler/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/706-checker-scheduler/expected.txt diff --git a/test/706-checker-scheduler/info.txt b/test/706-checker-scheduler/info.txt new file mode 100644 index 0000000000..b4ad9b4378 --- /dev/null +++ b/test/706-checker-scheduler/info.txt @@ -0,0 +1 @@ +Tests for HInstruction scheduler. diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java new file mode 100644 index 0000000000..1721e4294e --- /dev/null +++ b/test/706-checker-scheduler/src/Main.java @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + static int static_variable = 0; + + /// CHECK-START-ARM64: int Main.arrayAccess() scheduler (before) + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<i0:i\d+>> Phi + /// CHECK: <<res0:i\d+>> Phi + /// CHECK: <<Array:i\d+>> IntermediateAddress + /// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<Array>>,<<i0>>] + /// CHECK: <<res1:i\d+>> Add [<<res0>>,<<ArrayGet1>>] + /// CHECK: <<i1:i\d+>> Add [<<i0>>,<<Const1>>] + /// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<Array>>,<<i1>>] + /// CHECK: Add [<<res1>>,<<ArrayGet2>>] + + /// CHECK-START-ARM64: int Main.arrayAccess() scheduler (after) + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<i0:i\d+>> Phi + /// CHECK: <<res0:i\d+>> Phi + /// CHECK: <<Array:i\d+>> IntermediateAddress + /// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<Array>>,<<i0>>] + /// CHECK: <<i1:i\d+>> Add [<<i0>>,<<Const1>>] + /// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<Array>>,<<i1>>] + /// CHECK: <<res1:i\d+>> Add [<<res0>>,<<ArrayGet1>>] + /// CHECK: Add [<<res1>>,<<ArrayGet2>>] + + public static int arrayAccess() { + int res = 0; + int [] array = new int[10]; + for (int i = 0; i < 9; i++) { + res += array[i]; + res += array[i + 1]; + } + return res; + } + + /// CHECK-START-ARM64: int Main.intDiv(int) scheduler (before) + /// CHECK: Sub + /// CHECK: DivZeroCheck + /// CHECK: Div + /// CHECK: StaticFieldSet + + /// CHECK-START-ARM64: int Main.intDiv(int) scheduler (after) + /// CHECK: Sub + /// CHECK-NOT: StaticFieldSet + /// CHECK: DivZeroCheck + /// CHECK-NOT: Sub + /// CHECK: Div + public static int intDiv(int arg) { + int res = 0; + int tmp = arg; + for (int i = 1; i < arg; i++) { + tmp -= i; + res = res / i; // div-zero check barrier. + static_variable++; + } + res += tmp; + return res; + } + + public static void main(String[] args) { + if ((arrayAccess() + intDiv(10)) != -35) { + System.out.println("FAIL"); + } + } +} diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc index 59801ff648..8f96ee63ef 100644 --- a/test/908-gc-start-finish/gc_callbacks.cc +++ b/test/908-gc-start-finish/gc_callbacks.cc @@ -38,43 +38,32 @@ static void JNICALL GarbageCollectionStart(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) { } extern "C" JNIEXPORT void JNICALL Java_Main_setupGcCallback( - JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) { + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) { jvmtiEventCallbacks callbacks; memset(&callbacks, 0, sizeof(jvmtiEventCallbacks)); callbacks.GarbageCollectionFinish = GarbageCollectionFinish; callbacks.GarbageCollectionStart = GarbageCollectionStart; jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks)); - if (ret != JVMTI_ERROR_NONE) { - char* err; - jvmti_env->GetErrorName(ret, &err); - printf("Error setting callbacks: %s\n", err); - jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); - } + JvmtiErrorToException(env, ret); } -extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env ATTRIBUTE_UNUSED, +extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jboolean enable) { jvmtiError ret = jvmti_env->SetEventNotificationMode( enable ? JVMTI_ENABLE : JVMTI_DISABLE, JVMTI_EVENT_GARBAGE_COLLECTION_START, nullptr); - if (ret != JVMTI_ERROR_NONE) { - char* err; - jvmti_env->GetErrorName(ret, &err); - printf("Error enabling/disabling gc callbacks: %s\n", err); - jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); + if (JvmtiErrorToException(env, ret)) { + return; } ret = jvmti_env->SetEventNotificationMode( enable ? JVMTI_ENABLE : JVMTI_DISABLE, JVMTI_EVENT_GARBAGE_COLLECTION_FINISH, nullptr); - if (ret != JVMTI_ERROR_NONE) { - char* err; - jvmti_env->GetErrorName(ret, &err); - printf("Error enabling/disabling gc callbacks: %s\n", err); - jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); + if (JvmtiErrorToException(env, ret)) { + return; } } diff --git a/test/934-load-transform/src/Main.java b/test/934-load-transform/src/Main.java index 3bd913bfe0..de312b03da 100644 --- a/test/934-load-transform/src/Main.java +++ b/test/934-load-transform/src/Main.java @@ -66,6 +66,9 @@ class Main { } public static void main(String[] args) { + // Don't pop transformations. Make sure that even if 2 threads race to define the class both + // will get the same result. + setPopRetransformations(false); addCommonTransformationResult("Transform", CLASS_BYTES, DEX_BYTES); enableCommonRetransformation(true); try { @@ -83,6 +86,7 @@ class Main { } } + private static native void setPopRetransformations(boolean should_pop); // Transforms the class private static native void enableCommonRetransformation(boolean enable); private static native void addCommonTransformationResult(String target_name, diff --git a/test/935-non-retransformable/src-ex/TestMain.java b/test/935-non-retransformable/src-ex/TestMain.java index aebcdee851..d412fba37a 100644 --- a/test/935-non-retransformable/src-ex/TestMain.java +++ b/test/935-non-retransformable/src-ex/TestMain.java @@ -17,19 +17,14 @@ import java.lang.reflect.Method; public class TestMain { - public static void runTest() { + public static void runTest() throws Exception { Transform t = new Transform(); - try { - // Call functions with reflection. Since the sayGoodbye function does not exist in the - // LTransform; when we compile this for the first time we need to use reflection. - Method hi = Transform.class.getMethod("sayHi"); - Method bye = Transform.class.getMethod("sayGoodbye"); - hi.invoke(t); - t.sayHi(); - bye.invoke(t); - } catch (Exception e) { - System.out.println("Unexpected error occured! " + e.toString()); - e.printStackTrace(); - } + // Call functions with reflection. Since the sayGoodbye function does not exist in the + // LTransform; when we compile this for the first time we need to use reflection. + Method hi = Transform.class.getMethod("sayHi"); + Method bye = Transform.class.getMethod("sayGoodbye"); + hi.invoke(t); + t.sayHi(); + bye.invoke(t); } } diff --git a/test/935-non-retransformable/src/Main.java b/test/935-non-retransformable/src/Main.java index 0d103ab86d..82ba197b7e 100644 --- a/test/935-non-retransformable/src/Main.java +++ b/test/935-non-retransformable/src/Main.java @@ -74,6 +74,7 @@ class Main { } public static void main(String[] args) { + setPopRetransformations(false); addCommonTransformationResult("Transform", CLASS_BYTES, DEX_BYTES); enableCommonRetransformation(true); try { @@ -86,6 +87,8 @@ class Main { Method run_test = klass.getMethod("runTest"); run_test.invoke(null); + // Remove the original transformation. It has been used by now. + popTransformationFor("Transform"); // Make sure we don't get called for transformation again. addCommonTransformationResult("Transform", new byte[0], new byte[0]); doCommonClassRetransformation(new_loader.loadClass("Transform")); @@ -102,4 +105,6 @@ class Main { private static native void addCommonTransformationResult(String target_name, byte[] class_bytes, byte[] dex_bytes); + private static native void setPopRetransformations(boolean should_pop); + private static native void popTransformationFor(String target_name); } diff --git a/test/938-load-transform-bcp/build b/test/938-load-transform-bcp/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/938-load-transform-bcp/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/938-load-transform-bcp/expected.txt b/test/938-load-transform-bcp/expected.txt new file mode 100644 index 0000000000..16c3f8f726 --- /dev/null +++ b/test/938-load-transform-bcp/expected.txt @@ -0,0 +1,2 @@ +ol.foo() -> 'This is foo for val=123' +ol.toString() -> 'This is toString() for val=123' diff --git a/test/938-load-transform-bcp/info.txt b/test/938-load-transform-bcp/info.txt new file mode 100644 index 0000000000..875a5f6ec1 --- /dev/null +++ b/test/938-load-transform-bcp/info.txt @@ -0,0 +1 @@ +Tests basic functions in the jvmti plugin. diff --git a/test/938-load-transform-bcp/run b/test/938-load-transform-bcp/run new file mode 100755 index 0000000000..adb1a1c507 --- /dev/null +++ b/test/938-load-transform-bcp/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti --no-app-image diff --git a/test/938-load-transform-bcp/src-ex/TestMain.java b/test/938-load-transform-bcp/src-ex/TestMain.java new file mode 100644 index 0000000000..3757a0f778 --- /dev/null +++ b/test/938-load-transform-bcp/src-ex/TestMain.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; +import java.util.OptionalLong; +public class TestMain { + public static void runTest() { + // This should be our redefined OptionalLong. + OptionalLong ol = OptionalLong.of(123); + try { + // OptionalLong is a class that is unlikely to be used by the time this test starts. + Method foo = OptionalLong.class.getMethod("foo"); + System.out.println("ol.foo() -> '" + (String)foo.invoke(ol) + "'"); + System.out.println("ol.toString() -> '" + ol.toString() + "'"); + } catch (Exception e) { + System.out.println( + "Exception occured (did something load OptionalLong before this test method!: " + + e.toString()); + e.printStackTrace(); + } + } +} diff --git a/test/938-load-transform-bcp/src/Main.java b/test/938-load-transform-bcp/src/Main.java new file mode 100644 index 0000000000..548489939e --- /dev/null +++ b/test/938-load-transform-bcp/src/Main.java @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.*; +import java.util.Base64; + +class Main { + public static String TEST_NAME = "938-load-transform-bcp"; + + /** + * base64 encoded class/dex file for + * + * // Yes this version of OptionalLong is not compatible with the real one but since it isn't used + * // for anything in the runtime initialization it should be fine. + * + * package java.util; + * public final class OptionalLong { + * private long val; + * + * private OptionalLong(long abc) { + * this.val = abc; + * } + * + * public static OptionalLong of(long abc) { + * return new OptionalLong(abc); + * } + * + * public String foo() { + * return "This is foo for val=" + val; + * } + * + * public String toString() { + * return "This is toString() for val=" + val; + * } + * } + */ + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAKQoADAAaCQADABsHABwKAAMAHQcAHgoABQAaCAAfCgAFACAKAAUAIQoABQAiCAAj" + + "BwAkAQADdmFsAQABSgEABjxpbml0PgEABChKKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAC" + + "b2YBABsoSilMamF2YS91dGlsL09wdGlvbmFsTG9uZzsBAANmb28BABQoKUxqYXZhL2xhbmcvU3Ry" + + "aW5nOwEACHRvU3RyaW5nAQAKU291cmNlRmlsZQEAEU9wdGlvbmFsTG9uZy5qYXZhDAAPACUMAA0A" + + "DgEAFmphdmEvdXRpbC9PcHRpb25hbExvbmcMAA8AEAEAF2phdmEvbGFuZy9TdHJpbmdCdWlsZGVy" + + "AQAUVGhpcyBpcyBmb28gZm9yIHZhbD0MACYAJwwAJgAoDAAXABYBABtUaGlzIGlzIHRvU3RyaW5n" + + "KCkgZm9yIHZhbD0BABBqYXZhL2xhbmcvT2JqZWN0AQADKClWAQAGYXBwZW5kAQAtKExqYXZhL2xh" + + "bmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7AQAcKEopTGphdmEvbGFuZy9TdHJp" + + "bmdCdWlsZGVyOwAxAAMADAAAAAEAAgANAA4AAAAEAAIADwAQAAEAEQAAACoAAwADAAAACiq3AAEq" + + "H7UAArEAAAABABIAAAAOAAMAAAAFAAQABgAJAAcACQATABQAAQARAAAAIQAEAAIAAAAJuwADWR63" + + "AASwAAAAAQASAAAABgABAAAACgABABUAFgABABEAAAAvAAMAAQAAABe7AAVZtwAGEge2AAgqtAAC" + + "tgAJtgAKsAAAAAEAEgAAAAYAAQAAAA4AAQAXABYAAQARAAAALwADAAEAAAAXuwAFWbcABhILtgAI" + + "KrQAArYACbYACrAAAAABABIAAAAGAAEAAAASAAEAGAAAAAIAGQ=="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQAOe/TYJCvVthTToFA3tveMDhwTo7uDf0IcBAAAcAAAAHhWNBIAAAAAAAAAAHwDAAAU" + + "AAAAcAAAAAYAAADAAAAABgAAANgAAAABAAAAIAEAAAkAAAAoAQAAAQAAAHABAACMAgAAkAEAAFYC" + + "AABeAgAAYQIAAGQCAABoAgAAbAIAAIACAACUAgAArwIAAMkCAADcAgAA8gIAAA8DAAASAwAAFgMA" + + "AB4DAAAyAwAANwMAADsDAABFAwAAAQAAAAUAAAAGAAAABwAAAAgAAAAMAAAAAgAAAAIAAAAAAAAA" + + "AwAAAAMAAABIAgAABAAAAAMAAABQAgAAAwAAAAQAAABIAgAADAAAAAUAAAAAAAAADQAAAAUAAABI" + + "AgAABAAAABMAAAABAAQAAAAAAAMABAAAAAAAAwABAA4AAAADAAIADgAAAAMAAAASAAAABAAFAAAA" + + "AAAEAAAAEAAAAAQAAwARAAAABAAAABIAAAAEAAAAEQAAAAEAAAAAAAAACQAAAAAAAABiAwAAAAAA" + + "AAQAAwABAAAASgMAAAYAAABwEAAAAQBaEgAADgAEAAIAAwAAAFIDAAAGAAAAIgAEAHAwBQAgAxEA" + + "BQABAAMAAABYAwAAFwAAACIAAwBwEAEAAAAbAQoAAABuIAMAEAAMAFNCAABuMAIAIAMMAG4QBAAA" + + "AAwAEQAAAAUAAQADAAAAXQMAABcAAAAiAAMAcBABAAAAGwELAAAAbiADABAADABTQgAAbjACACAD" + + "DABuEAQAAAAMABEAAAABAAAAAAAAAAEAAAACAAY8aW5pdD4AAUoAAUwAAkxKAAJMTAASTGphdmEv" + + "bGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRl" + + "cjsAGExqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwART3B0aW9uYWxMb25nLmphdmEAFFRoaXMgaXMg" + + "Zm9vIGZvciB2YWw9ABtUaGlzIGlzIHRvU3RyaW5nKCkgZm9yIHZhbD0AAVYAAlZKAAZhcHBlbmQA" + + "EmVtaXR0ZXI6IGphY2stNC4yMgADZm9vAAJvZgAIdG9TdHJpbmcAA3ZhbAAFAQAHDjwtAAoBAAcO" + + "AA4ABw4AEgAHDgAAAQICAAIFgoAEkAMCCawDBgHIAwIBiAQAAA0AAAAAAAAAAQAAAAAAAAABAAAA" + + "FAAAAHAAAAACAAAABgAAAMAAAAADAAAABgAAANgAAAAEAAAAAQAAACABAAAFAAAACQAAACgBAAAG" + + "AAAAAQAAAHABAAABIAAABAAAAJABAAABEAAAAgAAAEgCAAACIAAAFAAAAFYCAAADIAAABAAAAEoD" + + "AAAAIAAAAQAAAGIDAAAAEAAAAQAAAHwDAAA="); + + public static ClassLoader getClassLoaderFor(String location) throws Exception { + try { + Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader"); + Constructor<?> ctor = class_loader_class.getConstructor(String.class, ClassLoader.class); + return (ClassLoader)ctor.newInstance(location + "/" + TEST_NAME + "-ex.jar", + Main.class.getClassLoader()); + } catch (ClassNotFoundException e) { + // Running on RI. Use URLClassLoader. + return new java.net.URLClassLoader( + new java.net.URL[] { new java.net.URL("file://" + location + "/classes-ex/") }); + } + } + + public static void main(String[] args) { + setPopRetransformations(false); + addCommonTransformationResult("java/util/OptionalLong", CLASS_BYTES, DEX_BYTES); + enableCommonRetransformation(true); + try { + /* this is the "alternate" DEX/Jar file */ + ClassLoader new_loader = getClassLoaderFor(System.getenv("DEX_LOCATION")); + Class<?> klass = (Class<?>)new_loader.loadClass("TestMain"); + if (klass == null) { + throw new AssertionError("loadClass failed"); + } + Method run_test = klass.getMethod("runTest"); + run_test.invoke(null); + } catch (Exception e) { + System.out.println(e.toString()); + e.printStackTrace(); + } + } + + private static native void setPopRetransformations(boolean should_pop); + // Transforms the class + private static native void enableCommonRetransformation(boolean enable); + private static native void addCommonTransformationResult(String target_name, + byte[] class_bytes, + byte[] dex_bytes); +} diff --git a/test/939-hello-transformation-bcp/build b/test/939-hello-transformation-bcp/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/939-hello-transformation-bcp/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/939-hello-transformation-bcp/expected.txt b/test/939-hello-transformation-bcp/expected.txt new file mode 100644 index 0000000000..90fd25828d --- /dev/null +++ b/test/939-hello-transformation-bcp/expected.txt @@ -0,0 +1,3 @@ +ol.toString() -> 'OptionalLong[-559038737]' +Redefining OptionalLong! +ol.toString() -> 'Redefined OptionalLong!' diff --git a/test/939-hello-transformation-bcp/info.txt b/test/939-hello-transformation-bcp/info.txt new file mode 100644 index 0000000000..d230a382bd --- /dev/null +++ b/test/939-hello-transformation-bcp/info.txt @@ -0,0 +1,6 @@ +Tests basic functions in the jvmti plugin. + +Note this function is reliant on the definition of java.util.OptionalLong not +changing. If this classes definition changes we will need to update this class +so that the CLASS_BYTES and DEX_BYTES fields contain dex/class bytes for an +OptionalLong with all the same methods and fields. diff --git a/test/939-hello-transformation-bcp/run b/test/939-hello-transformation-bcp/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/939-hello-transformation-bcp/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/939-hello-transformation-bcp/src/Main.java b/test/939-hello-transformation-bcp/src/Main.java new file mode 100644 index 0000000000..bdf7f592ef --- /dev/null +++ b/test/939-hello-transformation-bcp/src/Main.java @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; +import java.util.OptionalLong; +public class Main { + + /** + * This is the base64 encoded class/dex. + * + * package java.util; + * import java.util.function.LongConsumer; + * import java.util.function.LongSupplier; + * import java.util.function.Supplier; + * public final class OptionalLong { + * // Make sure we have a <clinit> function since the real implementation of OptionalLong does. + * static { EMPTY = null; } + * private static final OptionalLong EMPTY; + * private final boolean isPresent; + * private final long value; + * private OptionalLong() { isPresent = false; value = 0; } + * private OptionalLong(long l) { this(); } + * public static OptionalLong empty() { return null; } + * public static OptionalLong of(long value) { return null; } + * public long getAsLong() { return 0; } + * public boolean isPresent() { return false; } + * public void ifPresent(LongConsumer c) { } + * public long orElse(long l) { return 0; } + * public long orElseGet(LongSupplier s) { return 0; } + * public<X extends Throwable> long orElseThrow(Supplier<X> s) throws X { return 0; } + * public boolean equals(Object o) { return false; } + * public int hashCode() { return 0; } + * public String toString() { return "Redefined OptionalLong!"; } + * } + */ + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAOAoACAAwCQAHADEJAAcAMgoABwAwCAAzCQAHADQHADUHADYBAAVFTVBUWQEAGExq" + + "YXZhL3V0aWwvT3B0aW9uYWxMb25nOwEACWlzUHJlc2VudAEAAVoBAAV2YWx1ZQEAAUoBAAY8aW5p" + + "dD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAEKEopVgEABWVtcHR5AQAaKClMamF2" + + "YS91dGlsL09wdGlvbmFsTG9uZzsBAAJvZgEAGyhKKUxqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwEA" + + "CWdldEFzTG9uZwEAAygpSgEAAygpWgEACWlmUHJlc2VudAEAJChMamF2YS91dGlsL2Z1bmN0aW9u" + + "L0xvbmdDb25zdW1lcjspVgEABm9yRWxzZQEABChKKUoBAAlvckVsc2VHZXQBACQoTGphdmEvdXRp" + + "bC9mdW5jdGlvbi9Mb25nU3VwcGxpZXI7KUoBAAtvckVsc2VUaHJvdwEAIChMamF2YS91dGlsL2Z1" + + "bmN0aW9uL1N1cHBsaWVyOylKAQAKRXhjZXB0aW9ucwcANwEACVNpZ25hdHVyZQEAQjxYOkxqYXZh" + + "L2xhbmcvVGhyb3dhYmxlOz4oTGphdmEvdXRpbC9mdW5jdGlvbi9TdXBwbGllcjxUWDs+OylKXlRY" + + "OwEABmVxdWFscwEAFShMamF2YS9sYW5nL09iamVjdDspWgEACGhhc2hDb2RlAQADKClJAQAIdG9T" + + "dHJpbmcBABQoKUxqYXZhL2xhbmcvU3RyaW5nOwEACDxjbGluaXQ+AQAKU291cmNlRmlsZQEAEU9w" + + "dGlvbmFsTG9uZy5qYXZhDAAPABAMAAsADAwADQAOAQAXUmVkZWZpbmVkIE9wdGlvbmFsTG9uZyEM" + + "AAkACgEAFmphdmEvdXRpbC9PcHRpb25hbExvbmcBABBqYXZhL2xhbmcvT2JqZWN0AQATamF2YS9s" + + "YW5nL1Rocm93YWJsZQAxAAcACAAAAAMAGgAJAAoAAAASAAsADAAAABIADQAOAAAADgACAA8AEAAB" + + "ABEAAAAnAAMAAQAAAA8qtwABKgO1AAIqCbUAA7EAAAABABIAAAAGAAEAAAALAAIADwATAAEAEQAA" + + "AB0AAQADAAAABSq3AASxAAAAAQASAAAABgABAAAADAAJABQAFQABABEAAAAaAAEAAAAAAAIBsAAA" + + "AAEAEgAAAAYAAQAAAA0ACQAWABcAAQARAAAAGgABAAIAAAACAbAAAAABABIAAAAGAAEAAAAOAAEA" + + "GAAZAAEAEQAAABoAAgABAAAAAgmtAAAAAQASAAAABgABAAAADwABAAsAGgABABEAAAAaAAEAAQAA" + + "AAIDrAAAAAEAEgAAAAYAAQAAABAAAQAbABwAAQARAAAAGQAAAAIAAAABsQAAAAEAEgAAAAYAAQAA" + + "ABEAAQAdAB4AAQARAAAAGgACAAMAAAACCa0AAAABABIAAAAGAAEAAAASAAEAHwAgAAEAEQAAABoA" + + "AgACAAAAAgmtAAAAAQASAAAABgABAAAAEwABACEAIgADABEAAAAaAAIAAgAAAAIJrQAAAAEAEgAA" + + "AAYAAQAAABQAIwAAAAQAAQAkACUAAAACACYAAQAnACgAAQARAAAAGgABAAIAAAACA6wAAAABABIA" + + "AAAGAAEAAAAVAAEAKQAqAAEAEQAAABoAAQABAAAAAgOsAAAAAQASAAAABgABAAAAFgABACsALAAB" + + "ABEAAAAbAAEAAQAAAAMSBbAAAAABABIAAAAGAAEAAAAXAAgALQAQAAEAEQAAAB0AAQAAAAAABQGz" + + "AAaxAAAAAQASAAAABgABAAAABwABAC4AAAACAC8="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQCvAoivSJqk6GdYOgJmvrM/b2/flxhw99q8BwAAcAAAAHhWNBIAAAAAAAAAAPgGAAAq" + + "AAAAcAAAAA0AAAAYAQAADQAAAEwBAAADAAAA6AEAAA8AAAAAAgAAAQAAAHgCAAAkBQAAmAIAACoE" + + "AAA4BAAAPQQAAEcEAABPBAAAUwQAAFoEAABdBAAAYAQAAGQEAABoBAAAawQAAG8EAACOBAAAqgQA" + + "AL4EAADSBAAA6QQAAAMFAAAmBQAASQUAAGcFAACGBQAAmQUAALIFAAC1BQAAuQUAAL0FAADABQAA" + + "xAUAANgFAADfBQAA5wUAAPIFAAD8BQAABwYAABIGAAAWBgAAHgYAACkGAAA2BgAAQAYAAAYAAAAH" + + "AAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABMAAAAVAAAAGAAAABsAAAAGAAAAAAAAAAAA" + + "AAAHAAAAAQAAAAAAAAAIAAAAAQAAAAQEAAAJAAAAAQAAAAwEAAAJAAAAAQAAABQEAAAKAAAABQAA" + + "AAAAAAAKAAAABwAAAAAAAAALAAAABwAAAAQEAAAYAAAACwAAAAAAAAAZAAAACwAAAAQEAAAaAAAA" + + "CwAAABwEAAAbAAAADAAAAAAAAAAcAAAADAAAACQEAAAHAAcABQAAAAcADAAjAAAABwABACkAAAAE" + + "AAgAAwAAAAcACAACAAAABwAIAAMAAAAHAAkAAwAAAAcABgAeAAAABwAMAB8AAAAHAAEAIAAAAAcA" + + "AAAhAAAABwAKACIAAAAHAAsAIwAAAAcABwAkAAAABwACACUAAAAHAAMAJgAAAAcABAAnAAAABwAF" + + "ACgAAAAHAAAAEQAAAAQAAAAAAAAAFgAAAOwDAACtBgAAAAAAAAIAAACVBgAApQYAAAEAAAAAAAAA" + + "RwYAAAQAAAASAGkAAAAOAAMAAQABAAAATQYAAAsAAABwEAAAAgASAFwgAQAWAAAAWiACAA4AAAAD" + + "AAMAAQAAAFIGAAAEAAAAcBACAAAADgABAAAAAAAAAFgGAAACAAAAEgARAAMAAgAAAAAAXQYAAAIA" + + "AAASABEAAwACAAAAAABjBgAAAgAAABIADwADAAEAAAAAAGkGAAADAAAAFgAAABAAAAACAAEAAAAA" + + "AG4GAAACAAAAEgAPAAIAAgAAAAAAcwYAAAEAAAAOAAAAAgABAAAAAAB5BgAAAgAAABIADwAFAAMA" + + "AAAAAH4GAAADAAAAFgAAABAAAAAEAAIAAAAAAIQGAAADAAAAFgAAABAAAAAEAAIAAAAAAIoGAAAD" + + "AAAAFgAAABAAAAACAAEAAAAAAJAGAAAEAAAAGwAXAAAAEQAAAAAAAAAAAAEAAAAAAAAADQAAAJgC" + + "AAABAAAAAQAAAAEAAAAJAAAAAQAAAAoAAAABAAAACAAAAAEAAAAEAAw8VFg7PjspSl5UWDsAAzxY" + + "OgAIPGNsaW5pdD4ABjxpbml0PgACPigABUVNUFRZAAFJAAFKAAJKSgACSkwAAUwAAkxKAB1MZGFs" + + "dmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAaTGRhbHZpay9hbm5vdGF0aW9uL1Rocm93czsAEkxq" + + "YXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABVMamF2YS9sYW5nL1Rocm93YWJs" + + "ZTsAGExqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwAhTGphdmEvdXRpbC9mdW5jdGlvbi9Mb25nQ29u" + + "c3VtZXI7ACFMamF2YS91dGlsL2Z1bmN0aW9uL0xvbmdTdXBwbGllcjsAHExqYXZhL3V0aWwvZnVu" + + "Y3Rpb24vU3VwcGxpZXIAHUxqYXZhL3V0aWwvZnVuY3Rpb24vU3VwcGxpZXI7ABFPcHRpb25hbExv" + + "bmcuamF2YQAXUmVkZWZpbmVkIE9wdGlvbmFsTG9uZyEAAVYAAlZKAAJWTAABWgACWkwAEmVtaXR0" + + "ZXI6IGphY2stNC4yMgAFZW1wdHkABmVxdWFscwAJZ2V0QXNMb25nAAhoYXNoQ29kZQAJaWZQcmVz" + + "ZW50AAlpc1ByZXNlbnQAAm9mAAZvckVsc2UACW9yRWxzZUdldAALb3JFbHNlVGhyb3cACHRvU3Ry" + + "aW5nAAV2YWx1ZQAHAAcOOQALAAcOAAwBAAcOAA0ABw4ADgEABw4AFQEABw4ADwAHDgAWAAcOABEB" + + "AAcOABAABw4AEgEABw4AEwEABw4AFAEABw4AFwAHDgACAgEpHAUXARcQFwQXFBcAAgMBKRwBGAYB" + + "AgUJABoBEgESAYiABKQFAYKABLwFAYKABOQFAQn8BQYJkAYFAaQGAQG4BgEB0AYBAeQGAQH4BgIB" + + "jAcBAaQHAQG8BwEB1AcAAAAQAAAAAAAAAAEAAAAAAAAAAQAAACoAAABwAAAAAgAAAA0AAAAYAQAA" + + "AwAAAA0AAABMAQAABAAAAAMAAADoAQAABQAAAA8AAAAAAgAABgAAAAEAAAB4AgAAAxAAAAEAAACY" + + "AgAAASAAAA4AAACkAgAABiAAAAEAAADsAwAAARAAAAUAAAAEBAAAAiAAACoAAAAqBAAAAyAAAA4A" + + "AABHBgAABCAAAAIAAACVBgAAACAAAAEAAACtBgAAABAAAAEAAAD4BgAA"); + + public static void main(String[] args) { + // OptionalLong is a class that is unlikely to be used by the time this test starts and is not + // likely to be changed in any meaningful way in the future. + OptionalLong ol = OptionalLong.of(0xDEADBEEF); + System.out.println("ol.toString() -> '" + ol.toString() + "'"); + System.out.println("Redefining OptionalLong!"); + doCommonClassRedefinition(OptionalLong.class, CLASS_BYTES, DEX_BYTES); + System.out.println("ol.toString() -> '" + ol.toString() + "'"); + } + + // Transforms the class + private static native void doCommonClassRedefinition(Class<?> target, + byte[] class_file, + byte[] dex_file); +} diff --git a/test/940-recursive-obsolete/build b/test/940-recursive-obsolete/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/940-recursive-obsolete/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/940-recursive-obsolete/expected.txt b/test/940-recursive-obsolete/expected.txt new file mode 100644 index 0000000000..18ffc25d8a --- /dev/null +++ b/test/940-recursive-obsolete/expected.txt @@ -0,0 +1,21 @@ +hello2 +hello1 +Not doing anything here +hello0 +goodbye0 +goodbye1 +goodbye2 +hello2 +hello1 +transforming calling function +Hello0 - transformed +Goodbye0 - transformed +goodbye1 +goodbye2 +Hello2 - transformed +Hello1 - transformed +Not doing anything here +Hello0 - transformed +Goodbye0 - transformed +Goodbye1 - transformed +Goodbye2 - transformed diff --git a/test/940-recursive-obsolete/info.txt b/test/940-recursive-obsolete/info.txt new file mode 100644 index 0000000000..c8b892cedd --- /dev/null +++ b/test/940-recursive-obsolete/info.txt @@ -0,0 +1 @@ +Tests basic obsolete method support diff --git a/test/940-recursive-obsolete/run b/test/940-recursive-obsolete/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/940-recursive-obsolete/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/940-recursive-obsolete/src/Main.java b/test/940-recursive-obsolete/src/Main.java new file mode 100644 index 0000000000..3766906a89 --- /dev/null +++ b/test/940-recursive-obsolete/src/Main.java @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +public class Main { + + // class Transform { + // public void sayHi(int recur, Runnable r) { + // System.out.println("Hello" + recur + " - transformed"); + // if (recur == 1) { + // r.run(); + // sayHi(recur - 1, r); + // } else if (recur != 0) { + // sayHi(recur - 1, r); + // } + // System.out.println("Goodbye" + recur + " - transformed"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQANwoADwAZCQAaABsHABwKAAMAGQgAHQoAAwAeCgADAB8IACAKAAMAIQoAIgAjCwAk" + + "ACUKAA4AJggAJwcAKAcAKQEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUB" + + "AAVzYXlIaQEAGChJTGphdmEvbGFuZy9SdW5uYWJsZTspVgEADVN0YWNrTWFwVGFibGUBAApTb3Vy" + + "Y2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMABAAEQcAKgwAKwAsAQAXamF2YS9sYW5nL1N0cmluZ0J1" + + "aWxkZXIBAAVIZWxsbwwALQAuDAAtAC8BAA4gLSB0cmFuc2Zvcm1lZAwAMAAxBwAyDAAzADQHADUM" + + "ADYAEQwAFAAVAQAHR29vZGJ5ZQEACVRyYW5zZm9ybQEAEGphdmEvbGFuZy9PYmplY3QBABBqYXZh" + + "L2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQAGYXBwZW5kAQAtKExq" + + "YXZhL2xhbmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7AQAcKEkpTGphdmEvbGFu" + + "Zy9TdHJpbmdCdWlsZGVyOwEACHRvU3RyaW5nAQAUKClMamF2YS9sYW5nL1N0cmluZzsBABNqYXZh" + + "L2lvL1ByaW50U3RyZWFtAQAHcHJpbnRsbgEAFShMamF2YS9sYW5nL1N0cmluZzspVgEAEmphdmEv" + + "bGFuZy9SdW5uYWJsZQEAA3J1bgAgAA4ADwAAAAAAAgAAABAAEQABABIAAAAdAAEAAQAAAAUqtwAB" + + "sQAAAAEAEwAAAAYAAQAAAAEAAQAUABUAAQASAAAAnQADAAMAAABfsgACuwADWbcABBIFtgAGG7YA" + + "BxIItgAGtgAJtgAKGwSgABQsuQALAQAqGwRkLLYADKcADxuZAAsqGwRkLLYADLIAArsAA1m3AAQS" + + "DbYABhu2AAcSCLYABrYACbYACrEAAAACABMAAAAiAAgAAAADAB4ABAAjAAUAKQAGADQABwA4AAgA" + + "QAAKAF4ACwAWAAAABAACNAsAAQAXAAAAAgAY"); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQA3pkIgnymz2/eri+mp2dyZo3jolQmaRPKEBAAAcAAAAHhWNBIAAAAAAAAAAOQDAAAa" + + "AAAAcAAAAAkAAADYAAAABgAAAPwAAAABAAAARAEAAAkAAABMAQAAAQAAAJQBAADQAgAAtAEAAJwC" + + "AACsAgAAtAIAAL0CAADEAgAAxwIAAMoCAADOAgAA0gIAAN8CAAD2AgAACgMAACADAAA0AwAATwMA" + + "AGMDAABzAwAAdgMAAHsDAAB/AwAAhwMAAJsDAACgAwAAqQMAAK4DAAC1AwAABAAAAAgAAAAJAAAA" + + "CgAAAAsAAAAMAAAADQAAAA4AAAAQAAAABQAAAAUAAAAAAAAABgAAAAYAAACEAgAABwAAAAYAAACM" + + "AgAAEAAAAAgAAAAAAAAAEQAAAAgAAACUAgAAEgAAAAgAAACMAgAABwACABUAAAABAAMAAQAAAAEA" + + "BAAYAAAAAgAFABYAAAADAAMAAQAAAAQAAwAXAAAABgADAAEAAAAGAAEAEwAAAAYAAgATAAAABgAA" + + "ABkAAAABAAAAAAAAAAMAAAAAAAAADwAAAAAAAADWAwAAAAAAAAEAAQABAAAAvwMAAAQAAABwEAMA" + + "AAAOAAYAAwADAAAAxAMAAFQAAABiAAAAIgEGAHAQBQABABsCAwAAAG4gBwAhAAwBbiAGAEEADAEb" + + "AgAAAABuIAcAIQAMAW4QCAABAAwBbiACABAAEhAzBCsAchAEAAUA2AAE/24wAQADBWIAAAAiAQYA" + + "cBAFAAEAGwICAAAAbiAHACEADAFuIAYAQQAMARsCAAAAAG4gBwAhAAwBbhAIAAEADAFuIAIAEAAO" + + "ADgE3//YAAT/bjABAAMFKNgBAAAAAAAAAAEAAAAFAAAAAgAAAAAABAAOIC0gdHJhbnNmb3JtZWQA" + + "Bjxpbml0PgAHR29vZGJ5ZQAFSGVsbG8AAUkAAUwAAkxJAAJMTAALTFRyYW5zZm9ybTsAFUxqYXZh" + + "L2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxl" + + "OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9s" + + "YW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAANWSUwAAlZMAAZhcHBlbmQAEmVtaXR0ZXI6" + + "IGphY2stNC4yNAADb3V0AAdwcmludGxuAANydW4ABXNheUhpAAh0b1N0cmluZwABAAcOAAMCAAAH" + + "DgEgDzw8XQEgDxktAAAAAQEAgIAEtAMBAcwDDQAAAAAAAAABAAAAAAAAAAEAAAAaAAAAcAAAAAIA" + + "AAAJAAAA2AAAAAMAAAAGAAAA/AAAAAQAAAABAAAARAEAAAUAAAAJAAAATAEAAAYAAAABAAAAlAEA" + + "AAEgAAACAAAAtAEAAAEQAAADAAAAhAIAAAIgAAAaAAAAnAIAAAMgAAACAAAAvwMAAAAgAAABAAAA" + + "1gMAAAAQAAABAAAA5AMAAA=="); + + public static void main(String[] args) { + doTest(new Transform()); + } + + public static void doTest(Transform t) { + t.sayHi(2, () -> { System.out.println("Not doing anything here"); }); + t.sayHi(2, () -> { + System.out.println("transforming calling function"); + doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + }); + t.sayHi(2, () -> { System.out.println("Not doing anything here"); }); + } + + // Transforms the class + private static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile); +} diff --git a/test/940-recursive-obsolete/src/Transform.java b/test/940-recursive-obsolete/src/Transform.java new file mode 100644 index 0000000000..97522cddf6 --- /dev/null +++ b/test/940-recursive-obsolete/src/Transform.java @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform { + public void sayHi(int recur, Runnable r) { + System.out.println("hello" + recur); + if (recur == 1) { + r.run(); + sayHi(recur - 1, r); + } else if (recur != 0) { + sayHi(recur - 1, r); + } + System.out.println("goodbye" + recur); + } +} diff --git a/test/941-recurive-obsolete-jit/build b/test/941-recurive-obsolete-jit/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/941-recurive-obsolete-jit/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/941-recurive-obsolete-jit/expected.txt b/test/941-recurive-obsolete-jit/expected.txt new file mode 100644 index 0000000000..086f7b03dc --- /dev/null +++ b/test/941-recurive-obsolete-jit/expected.txt @@ -0,0 +1,22 @@ +hello2 +hello1 +Not doing anything here +hello0 +goodbye0 +goodbye1 +goodbye2 +hello2 +hello1 +transforming calling function +Hello0 - transformed +Goodbye0 - transformed +goodbye1 +goodbye2 +Hello2 - transformed +Hello1 - transformed +Not doing anything here +Hello0 - transformed +Goodbye0 - transformed +Goodbye1 - transformed +Goodbye2 - transformed + diff --git a/test/941-recurive-obsolete-jit/info.txt b/test/941-recurive-obsolete-jit/info.txt new file mode 100644 index 0000000000..c8b892cedd --- /dev/null +++ b/test/941-recurive-obsolete-jit/info.txt @@ -0,0 +1 @@ +Tests basic obsolete method support diff --git a/test/941-recurive-obsolete-jit/run b/test/941-recurive-obsolete-jit/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/941-recurive-obsolete-jit/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/941-recurive-obsolete-jit/src/Main.java b/test/941-recurive-obsolete-jit/src/Main.java new file mode 100644 index 0000000000..f6d6416b55 --- /dev/null +++ b/test/941-recurive-obsolete-jit/src/Main.java @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; +import java.util.function.Consumer; +import java.lang.reflect.Method; + +public class Main { + + // import java.util.function.Consumer; + // class Transform { + // public void sayHi(int recur, Consumer<String> reporter, Runnable r) { + // reporter.accept("Hello" + recur + " - transformed"); + // if (recur == 1) { + // r.run(); + // sayHi(recur - 1, reporter, r); + // } else if (recur != 0) { + // sayHi(recur - 1, reporter, r); + // } + // reporter.accept("Goodbye" + recur + " - transformed"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAMwoADgAaBwAbCgACABoIABwKAAIAHQoAAgAeCAAfCgACACALACEAIgsAIwAkCgAN" + + "ACUIACYHACcHACgBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5" + + "SGkBADUoSUxqYXZhL3V0aWwvZnVuY3Rpb24vQ29uc3VtZXI7TGphdmEvbGFuZy9SdW5uYWJsZTsp" + + "VgEADVN0YWNrTWFwVGFibGUBAAlTaWduYXR1cmUBAEkoSUxqYXZhL3V0aWwvZnVuY3Rpb24vQ29u" + + "c3VtZXI8TGphdmEvbGFuZy9TdHJpbmc7PjtMamF2YS9sYW5nL1J1bm5hYmxlOylWAQAKU291cmNl" + + "RmlsZQEADlRyYW5zZm9ybS5qYXZhDAAPABABABdqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcgEABUhl" + + "bGxvDAApACoMACkAKwEADiAtIHRyYW5zZm9ybWVkDAAsAC0HAC4MAC8AMAcAMQwAMgAQDAATABQB" + + "AAdHb29kYnllAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAEABmFwcGVuZAEALShMamF2" + + "YS9sYW5nL1N0cmluZzspTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwEAHChJKUxqYXZhL2xhbmcv" + + "U3RyaW5nQnVpbGRlcjsBAAh0b1N0cmluZwEAFCgpTGphdmEvbGFuZy9TdHJpbmc7AQAbamF2YS91" + + "dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAQAGYWNjZXB0AQAVKExqYXZhL2xhbmcvT2JqZWN0OylWAQAS" + + "amF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAADQAOAAAAAAACAAAADwAQAAEAEQAAAB0AAQABAAAA" + + "BSq3AAGxAAAAAQASAAAABgABAAAAAgABABMAFAACABEAAACfAAQABAAAAGEsuwACWbcAAxIEtgAF" + + "G7YABhIHtgAFtgAIuQAJAgAbBKAAFS25AAoBACobBGQsLbYAC6cAEBuZAAwqGwRkLC22AAssuwAC" + + "WbcAAxIMtgAFG7YABhIHtgAFtgAIuQAJAgCxAAAAAgASAAAAIgAIAAAABAAeAAUAIwAGACkABwA1" + + "AAgAOQAJAEIACwBgAAwAFQAAAAQAAjUMABYAAAACABcAAQAYAAAAAgAZ"); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQA7uevryhDgvad3G3EACTdspZGfNKv2i3kkBQAAcAAAAHhWNBIAAAAAAAAAAGwEAAAf" + + "AAAAcAAAAAkAAADsAAAABgAAABABAAAAAAAAAAAAAAkAAABYAQAAAQAAAKABAABkAwAAwAEAAMoC" + + "AADaAgAA3gIAAOICAADlAgAA7QIAAPECAAD6AgAAAQMAAAQDAAAHAwAACwMAAA8DAAAcAwAAOwMA" + + "AE8DAABlAwAAeQMAAJQDAACyAwAA0QMAAOEDAADkAwAA6gMAAO4DAAD2AwAA/gMAABIEAAAXBAAA" + + "HgQAACgEAAAIAAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEwAAABUAAAAJAAAABQAAAAAAAAAK" + + "AAAABgAAAKgCAAALAAAABgAAALACAAAVAAAACAAAAAAAAAAWAAAACAAAALgCAAAXAAAACAAAAMQC" + + "AAABAAMABAAAAAEABAAcAAAAAwADAAQAAAAEAAMAGwAAAAYAAwAEAAAABgABABkAAAAGAAIAGQAA" + + "AAYAAAAdAAAABwAFABgAAAABAAAAAAAAAAMAAAAAAAAAFAAAAJACAABbBAAAAAAAAAEAAABHBAAA" + + "AQABAAEAAAAvBAAABAAAAHAQAgAAAA4ABgAEAAQAAAA0BAAAUAAAACIABgBwEAQAAAAbAQcAAABu" + + "IAYAEAAMAG4gBQAwAAwAGwEAAAAAbiAGABAADABuEAcAAAAMAHIgCAAEABIQMwMpAHIQAwAFANgA" + + "A/9uQAEAAlQiAAYAcBAEAAAAGwEGAAAAbiAGABAADABuIAUAMAAMABsBAAAAAG4gBgAQAAwAbhAH" + + "AAAADAByIAgABAAOADgD4f/YAAP/bkABAAJUKNoAAAAAAAAAAAEAAAAAAAAAAQAAAMABAAABAAAA" + + "AAAAAAEAAAAFAAAAAwAAAAAABwAEAAAAAQAAAAMADiAtIHRyYW5zZm9ybWVkAAIoSQACKVYAATwA" + + "Bjxpbml0PgACPjsAB0dvb2RieWUABUhlbGxvAAFJAAFMAAJMSQACTEwAC0xUcmFuc2Zvcm07AB1M" + + "ZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9s" + + "YW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxk" + + "ZXI7ABxMamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAB1MamF2YS91dGlsL2Z1bmN0aW9uL0Nv" + + "bnN1bWVyOwAOVHJhbnNmb3JtLmphdmEAAVYABFZJTEwAAlZMAAZhY2NlcHQABmFwcGVuZAASZW1p" + + "dHRlcjogamFjay00LjI0AANydW4ABXNheUhpAAh0b1N0cmluZwAFdmFsdWUAAgAHDgAEAwAAAAcO" + + "AR4PPDxdAR4PGS0AAgIBHhwHFwEXEhcDFxAXBRcPFwIAAAEBAICABMgDAQHgAwAAAA8AAAAAAAAA" + + "AQAAAAAAAAABAAAAHwAAAHAAAAACAAAACQAAAOwAAAADAAAABgAAABABAAAFAAAACQAAAFgBAAAG" + + "AAAAAQAAAKABAAADEAAAAQAAAMABAAABIAAAAgAAAMgBAAAGIAAAAQAAAJACAAABEAAABAAAAKgC" + + "AAACIAAAHwAAAMoCAAADIAAAAgAAAC8EAAAEIAAAAQAAAEcEAAAAIAAAAQAAAFsEAAAAEAAAAQAA" + + "AGwEAAA="); + + // A class that we can use to keep track of the output of this test. + private static class TestWatcher implements Consumer<String> { + private StringBuilder sb; + public TestWatcher() { + sb = new StringBuilder(); + } + + @Override + public void accept(String s) { + sb.append(s); + sb.append('\n'); + } + + public String getOutput() { + return sb.toString(); + } + + public void clear() { + sb = new StringBuilder(); + } + } + + public static void main(String[] args) { + doTest(new Transform()); + } + + private static boolean retry = false; + + public static void doTest(Transform t) { + final TestWatcher reporter = new TestWatcher(); + Method say_hi_method; + // Figure out if we can even JIT at all. + final boolean has_jit = hasJit(); + try { + say_hi_method = Transform.class.getDeclaredMethod( + "sayHi", int.class, Consumer.class, Runnable.class); + } catch (Exception e) { + System.out.println("Unable to find methods!"); + e.printStackTrace(); + return; + } + // Makes sure the stack is the way we want it for the test and does the redefinition. It will + // set the retry boolean to true if we need to go around again due to jit code being GCd. + Runnable do_redefinition = () -> { + if (has_jit && Main.isInterpretedFunction(say_hi_method, true)) { + // Try again. We are not running the right jitted methods/cannot redefine them now. + retry = true; + } else { + // Actually do the redefinition. The stack looks good. + retry = false; + reporter.accept("transforming calling function"); + doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + } + }; + do { + // Run ensureJitCompiled here since it might get GCd + ensureJitCompiled(Transform.class, "sayHi"); + // Clear output. + reporter.clear(); + t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); }); + t.sayHi(2, reporter, do_redefinition); + t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); }); + } while(retry); + System.out.println(reporter.getOutput()); + } + + private static native boolean hasJit(); + + private static native boolean isInterpretedFunction(Method m, boolean require_deoptimizable); + + private static native void ensureJitCompiled(Class c, String name); + + // Transforms the class + private static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile); +} diff --git a/test/941-recurive-obsolete-jit/src/Transform.java b/test/941-recurive-obsolete-jit/src/Transform.java new file mode 100644 index 0000000000..e6a913a391 --- /dev/null +++ b/test/941-recurive-obsolete-jit/src/Transform.java @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.function.Consumer; +class Transform { + public void sayHi(int recur, Consumer<String> c, Runnable r) { + c.accept("hello" + recur); + if (recur == 1) { + r.run(); + sayHi(recur - 1, c, r); + } else if (recur != 0) { + sayHi(recur - 1, c, r); + } + c.accept("goodbye" + recur); + } +} diff --git a/test/942-private-recursive/build b/test/942-private-recursive/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/942-private-recursive/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/942-private-recursive/expected.txt b/test/942-private-recursive/expected.txt new file mode 100644 index 0000000000..18ffc25d8a --- /dev/null +++ b/test/942-private-recursive/expected.txt @@ -0,0 +1,21 @@ +hello2 +hello1 +Not doing anything here +hello0 +goodbye0 +goodbye1 +goodbye2 +hello2 +hello1 +transforming calling function +Hello0 - transformed +Goodbye0 - transformed +goodbye1 +goodbye2 +Hello2 - transformed +Hello1 - transformed +Not doing anything here +Hello0 - transformed +Goodbye0 - transformed +Goodbye1 - transformed +Goodbye2 - transformed diff --git a/test/942-private-recursive/info.txt b/test/942-private-recursive/info.txt new file mode 100644 index 0000000000..c8b892cedd --- /dev/null +++ b/test/942-private-recursive/info.txt @@ -0,0 +1 @@ +Tests basic obsolete method support diff --git a/test/942-private-recursive/run b/test/942-private-recursive/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/942-private-recursive/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/942-private-recursive/src/Main.java b/test/942-private-recursive/src/Main.java new file mode 100644 index 0000000000..8cbab7bac3 --- /dev/null +++ b/test/942-private-recursive/src/Main.java @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +public class Main { + + // class Transform { + // public void sayHi(int recur, Runnable r) { + // privateSayHi(recur, r); + // } + // private void privateSayHi(int recur, Runnable r) { + // System.out.println("Hello" + recur + " - transformed"); + // if (recur == 1) { + // r.run(); + // privateSayHi(recur - 1, r); + // } else if (recur != 0) { + // privateSayHi(recur - 1, r); + // } + // System.out.println("Goodbye" + recur + " - transformed"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAOAoADwAaCgAOABsJABwAHQcAHgoABAAaCAAfCgAEACAKAAQAIQgAIgoABAAjCgAk" + + "ACULACYAJwgAKAcAKQcAKgEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUB" + + "AAVzYXlIaQEAGChJTGphdmEvbGFuZy9SdW5uYWJsZTspVgEADHByaXZhdGVTYXlIaQEADVN0YWNr" + + "TWFwVGFibGUBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMABAAEQwAFgAVBwArDAAsAC0B" + + "ABdqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcgEABUhlbGxvDAAuAC8MAC4AMAEADiAtIHRyYW5zZm9y" + + "bWVkDAAxADIHADMMADQANQcANgwANwARAQAHR29vZGJ5ZQEACVRyYW5zZm9ybQEAEGphdmEvbGFu" + + "Zy9PYmplY3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07" + + "AQAGYXBwZW5kAQAtKExqYXZhL2xhbmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7" + + "AQAcKEkpTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwEACHRvU3RyaW5nAQAUKClMamF2YS9sYW5n" + + "L1N0cmluZzsBABNqYXZhL2lvL1ByaW50U3RyZWFtAQAHcHJpbnRsbgEAFShMamF2YS9sYW5nL1N0" + + "cmluZzspVgEAEmphdmEvbGFuZy9SdW5uYWJsZQEAA3J1bgAgAA4ADwAAAAAAAwAAABAAEQABABIA" + + "AAAdAAEAAQAAAAUqtwABsQAAAAEAEwAAAAYAAQAAAAEAAQAUABUAAQASAAAAIwADAAMAAAAHKhss" + + "twACsQAAAAEAEwAAAAoAAgAAAAMABgAEAAIAFgAVAAEAEgAAAJ0AAwADAAAAX7IAA7sABFm3AAUS" + + "BrYABxu2AAgSCbYAB7YACrYACxsEoAAULLkADAEAKhsEZCy3AAKnAA8bmQALKhsEZCy3AAKyAAO7" + + "AARZtwAFEg22AAcbtgAIEgm2AAe2AAq2AAuxAAAAAgATAAAAIgAIAAAABgAeAAcAIwAIACkACQA0" + + "AAoAOAALAEAADQBeAA4AFwAAAAQAAjQLAAEAGAAAAAIAGQ=="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQBQqwVIiZvIuS8j1HDurKbXZEV62Mnug5PEBAAAcAAAAHhWNBIAAAAAAAAAACQEAAAb" + + "AAAAcAAAAAkAAADcAAAABgAAAAABAAABAAAASAEAAAoAAABQAQAAAQAAAKABAAAEAwAAwAEAAMAC" + + "AADQAgAA2AIAAOECAADoAgAA6wIAAO4CAADyAgAA9gIAAAMDAAAaAwAALgMAAEQDAABYAwAAcwMA" + + "AIcDAACXAwAAmgMAAJ8DAACjAwAAqwMAAL8DAADEAwAAzQMAANsDAADgAwAA5wMAAAQAAAAIAAAA" + + "CQAAAAoAAAALAAAADAAAAA0AAAAOAAAAEAAAAAUAAAAFAAAAAAAAAAYAAAAGAAAAqAIAAAcAAAAG" + + "AAAAsAIAABAAAAAIAAAAAAAAABEAAAAIAAAAuAIAABIAAAAIAAAAsAIAAAcAAgAVAAAAAQADAAEA" + + "AAABAAQAFwAAAAEABAAZAAAAAgAFABYAAAADAAMAAQAAAAQAAwAYAAAABgADAAEAAAAGAAEAEwAA" + + "AAYAAgATAAAABgAAABoAAAABAAAAAAAAAAMAAAAAAAAADwAAAAAAAAAQBAAAAAAAAAEAAQABAAAA" + + "8QMAAAQAAABwEAQAAAAOAAYAAwADAAAA9gMAAFQAAABiAAAAIgEGAHAQBgABABsCAwAAAG4gCAAh" + + "AAwBbiAHAEEADAEbAgAAAABuIAgAIQAMAW4QCQABAAwBbiADABAAEhAzBCsAchAFAAUA2AAE/3Aw" + + "AQADBWIAAAAiAQYAcBAGAAEAGwICAAAAbiAIACEADAFuIAcAQQAMARsCAAAAAG4gCAAhAAwBbhAJ" + + "AAEADAFuIAMAEAAOADgE3//YAAT/cDABAAMFKNgDAAMAAwAAAAgEAAAEAAAAcDABABACDgABAAAA" + + "AAAAAAEAAAAFAAAAAgAAAAAABAAOIC0gdHJhbnNmb3JtZWQABjxpbml0PgAHR29vZGJ5ZQAFSGVs" + + "bG8AAUkAAUwAAkxJAAJMTAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGph" + + "dmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7" + + "ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9y" + + "bS5qYXZhAAFWAANWSUwAAlZMAAZhcHBlbmQAEmVtaXR0ZXI6IGphY2stNC4yNAADb3V0AAdwcmlu" + + "dGxuAAxwcml2YXRlU2F5SGkAA3J1bgAFc2F5SGkACHRvU3RyaW5nAAEABw4ABgIAAAcOASAPPDxd" + + "ASAPGS0AAwIAAAcOPAAAAAIBAICABMADAQLYAwIBkAUAAA0AAAAAAAAAAQAAAAAAAAABAAAAGwAA" + + "AHAAAAACAAAACQAAANwAAAADAAAABgAAAAABAAAEAAAAAQAAAEgBAAAFAAAACgAAAFABAAAGAAAA" + + "AQAAAKABAAABIAAAAwAAAMABAAABEAAAAwAAAKgCAAACIAAAGwAAAMACAAADIAAAAwAAAPEDAAAA" + + "IAAAAQAAABAEAAAAEAAAAQAAACQEAAA="); + + public static void main(String[] args) { + doTest(new Transform()); + } + + public static void doTest(Transform t) { + t.sayHi(2, () -> { System.out.println("Not doing anything here"); }); + t.sayHi(2, () -> { + System.out.println("transforming calling function"); + doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + }); + t.sayHi(2, () -> { System.out.println("Not doing anything here"); }); + } + + // Transforms the class + private static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile); +} diff --git a/test/942-private-recursive/src/Transform.java b/test/942-private-recursive/src/Transform.java new file mode 100644 index 0000000000..dd5452cac8 --- /dev/null +++ b/test/942-private-recursive/src/Transform.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform { + public void sayHi(int recur, Runnable r) { + privateSayHi(recur, r); + } + + private void privateSayHi(int recur, Runnable r) { + System.out.println("hello" + recur); + if (recur == 1) { + r.run(); + privateSayHi(recur - 1, r); + } else if (recur != 0) { + privateSayHi(recur - 1, r); + } + System.out.println("goodbye" + recur); + } +} diff --git a/test/943-private-recursive-jit/build b/test/943-private-recursive-jit/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/943-private-recursive-jit/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/943-private-recursive-jit/expected.txt b/test/943-private-recursive-jit/expected.txt new file mode 100644 index 0000000000..447f4a2245 --- /dev/null +++ b/test/943-private-recursive-jit/expected.txt @@ -0,0 +1,22 @@ +hello2 +hello1 +Not doing anything here +hello0 +goodbye0 +goodbye1 +goodbye2 +hello2 +hello1 +transforming calling function +hello0 - transformed +goodbye0 - transformed +goodbye1 +goodbye2 +hello2 - transformed +hello1 - transformed +Not doing anything here +hello0 - transformed +goodbye0 - transformed +goodbye1 - transformed +goodbye2 - transformed + diff --git a/test/943-private-recursive-jit/info.txt b/test/943-private-recursive-jit/info.txt new file mode 100644 index 0000000000..c8b892cedd --- /dev/null +++ b/test/943-private-recursive-jit/info.txt @@ -0,0 +1 @@ +Tests basic obsolete method support diff --git a/test/943-private-recursive-jit/run b/test/943-private-recursive-jit/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/943-private-recursive-jit/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/943-private-recursive-jit/src/Main.java b/test/943-private-recursive-jit/src/Main.java new file mode 100644 index 0000000000..8fa534d997 --- /dev/null +++ b/test/943-private-recursive-jit/src/Main.java @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; +import java.util.function.Consumer; +import java.lang.reflect.Method; + +public class Main { + static final boolean ALWAYS_PRINT = false; + + // import java.util.function.Consumer; + // class Transform { + // public void sayHi(int recur, Consumer<String> reporter, Runnable r) { + // privateSayHi(recur, reporter, r); + // } + // private void privateSayHi(int recur, Consumer<String> reporter, Runnable r) { + // reporter.accpet("hello" + recur + " - transformed"); + // if (recur == 1) { + // r.run(); + // privateSayHi(recur - 1, reporter, r); + // } else if (recur != 0) { + // privateSayHi(recur - 1, reporter, r); + // } + // reporter.accept("goodbye" + recur + " - transformed"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQANAoADgAbCgANABwHAB0KAAMAGwgAHgoAAwAfCgADACAIACEKAAMAIgsAIwAkCwAl" + + "ACYIACcHACgHACkBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5" + + "SGkBADUoSUxqYXZhL3V0aWwvZnVuY3Rpb24vQ29uc3VtZXI7TGphdmEvbGFuZy9SdW5uYWJsZTsp" + + "VgEACVNpZ25hdHVyZQEASShJTGphdmEvdXRpbC9mdW5jdGlvbi9Db25zdW1lcjxMamF2YS9sYW5n" + + "L1N0cmluZzs+O0xqYXZhL2xhbmcvUnVubmFibGU7KVYBAAxwcml2YXRlU2F5SGkBAA1TdGFja01h" + + "cFRhYmxlAQAKU291cmNlRmlsZQEADlRyYW5zZm9ybS5qYXZhDAAPABAMABcAFAEAF2phdmEvbGFu" + + "Zy9TdHJpbmdCdWlsZGVyAQAFaGVsbG8MACoAKwwAKgAsAQAOIC0gdHJhbnNmb3JtZWQMAC0ALgcA" + + "LwwAMAAxBwAyDAAzABABAAdnb29kYnllAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAEA" + + "BmFwcGVuZAEALShMamF2YS9sYW5nL1N0cmluZzspTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwEA" + + "HChJKUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcjsBAAh0b1N0cmluZwEAFCgpTGphdmEvbGFuZy9T" + + "dHJpbmc7AQAbamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAQAGYWNjZXB0AQAVKExqYXZhL2xh" + + "bmcvT2JqZWN0OylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAADQAOAAAAAAADAAAADwAQ" + + "AAEAEQAAAB0AAQABAAAABSq3AAGxAAAAAQASAAAABgABAAAAAgABABMAFAACABEAAAAkAAQABAAA" + + "AAgqGywttwACsQAAAAEAEgAAAAoAAgAAAAQABwAFABUAAAACABYAAgAXABQAAgARAAAAnwAEAAQA" + + "AABhLLsAA1m3AAQSBbYABhu2AAcSCLYABrYACbkACgIAGwSgABUtuQALAQAqGwRkLC23AAKnABAb" + + "mQAMKhsEZCwttwACLLsAA1m3AAQSDLYABhu2AAcSCLYABrYACbkACgIAsQAAAAIAEgAAACIACAAA" + + "AAcAHgAIACMACQApAAoANQALADkADABCAA4AYAAPABgAAAAEAAI1DAAVAAAAAgAWAAEAGQAAAAIA" + + "Gg=="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQCevtlr8B0kh/duuDYqXkGz/w9lMmtCCuRoBQAAcAAAAHhWNBIAAAAAAAAAALAEAAAg" + + "AAAAcAAAAAkAAADwAAAABgAAABQBAAAAAAAAAAAAAAoAAABcAQAAAQAAAKwBAACcAwAAzAEAAPYC" + + "AAAGAwAACgMAAA4DAAARAwAAGQMAAB0DAAAgAwAAIwMAACcDAAArAwAAOAMAAFcDAABrAwAAgQMA" + + "AJUDAACwAwAAzgMAAO0DAAD9AwAAAAQAAAYEAAAKBAAAEgQAABoEAAAuBAAANwQAAD4EAABMBAAA" + + "UQQAAFgEAABiBAAABgAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABEAAAATAAAABwAAAAUAAAAA" + + "AAAACAAAAAYAAADUAgAACQAAAAYAAADcAgAAEwAAAAgAAAAAAAAAFAAAAAgAAADkAgAAFQAAAAgA" + + "AADwAgAAAQADAAQAAAABAAQAGwAAAAEABAAdAAAAAwADAAQAAAAEAAMAHAAAAAYAAwAEAAAABgAB" + + "ABcAAAAGAAIAFwAAAAYAAAAeAAAABwAFABYAAAABAAAAAAAAAAMAAAAAAAAAEgAAALQCAACeBAAA" + + "AAAAAAEAAACKBAAAAQABAAEAAABpBAAABAAAAHAQAwAAAA4ABgAEAAQAAABuBAAAUAAAACIABgBw" + + "EAUAAAAbARoAAABuIAcAEAAMAG4gBgAwAAwAGwEAAAAAbiAHABAADABuEAgAAAAMAHIgCQAEABIQ" + + "MwMpAHIQBAAFANgAA/9wQAEAAlQiAAYAcBAFAAAAGwEZAAAAbiAHABAADABuIAYAMAAMABsBAAAA" + + "AG4gBwAQAAwAbhAIAAAADAByIAkABAAOADgD4f/YAAP/cEABAAJUKNoEAAQABAAAAIEEAAAEAAAA" + + "cEABABAyDgAAAAAAAAAAAAIAAAAAAAAAAQAAAMwBAAACAAAAzAEAAAEAAAAAAAAAAQAAAAUAAAAD" + + "AAAAAAAHAAQAAAABAAAAAwAOIC0gdHJhbnNmb3JtZWQAAihJAAIpVgABPAAGPGluaXQ+AAI+OwAB" + + "SQABTAACTEkAAkxMAAtMVHJhbnNmb3JtOwAdTGRhbHZpay9hbm5vdGF0aW9uL1NpZ25hdHVyZTsA" + + "EkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xhbmcvU3Ry" + + "aW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwAcTGphdmEvdXRpbC9mdW5jdGlvbi9Db25z" + + "dW1lcgAdTGphdmEvdXRpbC9mdW5jdGlvbi9Db25zdW1lcjsADlRyYW5zZm9ybS5qYXZhAAFWAARW" + + "SUxMAAJWTAAGYWNjZXB0AAZhcHBlbmQAEmVtaXR0ZXI6IGphY2stNC4yNAAHZ29vZGJ5ZQAFaGVs" + + "bG8ADHByaXZhdGVTYXlIaQADcnVuAAVzYXlIaQAIdG9TdHJpbmcABXZhbHVlAAIABw4ABwMAAAAH" + + "DgEeDzw8XQEeDxktAAQDAAAABw48AAICAR8cBxcBFxAXAxcOFwUXDRcCAAACAQCAgATUAwEC7AMC" + + "AZwFDwAAAAAAAAABAAAAAAAAAAEAAAAgAAAAcAAAAAIAAAAJAAAA8AAAAAMAAAAGAAAAFAEAAAUA" + + "AAAKAAAAXAEAAAYAAAABAAAArAEAAAMQAAABAAAAzAEAAAEgAAADAAAA1AEAAAYgAAABAAAAtAIA" + + "AAEQAAAEAAAA1AIAAAIgAAAgAAAA9gIAAAMgAAADAAAAaQQAAAQgAAABAAAAigQAAAAgAAABAAAA" + + "ngQAAAAQAAABAAAAsAQAAA=="); + + // A class that we can use to keep track of the output of this test. + private static class TestWatcher implements Consumer<String> { + private StringBuilder sb; + public TestWatcher() { + sb = new StringBuilder(); + } + + @Override + public void accept(String s) { + if (Main.ALWAYS_PRINT) { + System.out.println(s); + } + sb.append(s); + sb.append('\n'); + } + + public String getOutput() { + return sb.toString(); + } + + public void clear() { + sb = new StringBuilder(); + } + } + + public static void main(String[] args) { + doTest(new Transform()); + } + + private static boolean retry = false; + + public static void doTest(Transform t) { + final TestWatcher reporter = new TestWatcher(); + Method say_hi_method; + Method private_say_hi_method; + // Figure out if we can even JIT at all. + final boolean has_jit = hasJit(); + try { + say_hi_method = Transform.class.getDeclaredMethod( + "sayHi", int.class, Consumer.class, Runnable.class); + private_say_hi_method = Transform.class.getDeclaredMethod( + "privateSayHi", int.class, Consumer.class, Runnable.class); + } catch (Exception e) { + System.out.println("Unable to find methods!"); + e.printStackTrace(); + return; + } + // Makes sure the stack is the way we want it for the test and does the redefinition. It will + // set the retry boolean to true if we need to go around again due to jit code being GCd. + Runnable do_redefinition = () -> { + if (has_jit && + (Main.isInterpretedFunction(say_hi_method, true) || + Main.isInterpretedFunction(private_say_hi_method, true))) { + // Try again. We are not running the right jitted methods/cannot redefine them now. + retry = true; + } else { + // Actually do the redefinition. The stack looks good. + retry = false; + reporter.accept("transforming calling function"); + doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + } + }; + do { + // Run ensureJitCompiled here since it might get GCd + ensureJitCompiled(Transform.class, "sayHi"); + ensureJitCompiled(Transform.class, "privateSayHi"); + // Clear output. + reporter.clear(); + t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); }); + t.sayHi(2, reporter, do_redefinition); + t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); }); + } while(retry); + System.out.println(reporter.getOutput()); + } + + private static native boolean hasJit(); + + private static native boolean isInterpretedFunction(Method m, boolean require_deoptimizable); + + private static native void ensureJitCompiled(Class c, String name); + + // Transforms the class + private static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile); +} diff --git a/test/943-private-recursive-jit/src/Transform.java b/test/943-private-recursive-jit/src/Transform.java new file mode 100644 index 0000000000..9ec3e42544 --- /dev/null +++ b/test/943-private-recursive-jit/src/Transform.java @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.function.Consumer; +class Transform { + public void sayHi(int recur, Consumer<String> reporter, Runnable r) { + privateSayHi(recur, reporter, r); + } + + private void privateSayHi(int recur, Consumer<String> reporter, Runnable r) { + reporter.accept("hello" + recur); + if (recur == 1) { + r.run(); + privateSayHi(recur - 1, reporter, r); + } else if (recur != 0) { + privateSayHi(recur - 1, reporter, r); + } + reporter.accept("goodbye" + recur); + } +} diff --git a/test/944-transform-classloaders/build b/test/944-transform-classloaders/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/944-transform-classloaders/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/944-transform-classloaders/classloader.cc b/test/944-transform-classloaders/classloader.cc new file mode 100644 index 0000000000..5fbd8e11c9 --- /dev/null +++ b/test/944-transform-classloaders/classloader.cc @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/macros.h" +#include "jni.h" +#include "mirror/class-inl.h" +#include "openjdkjvmti/jvmti.h" +#include "ScopedLocalRef.h" + +#include "ti-agent/common_helper.h" +#include "ti-agent/common_load.h" + +namespace art { +namespace Test944TransformClassloaders { + + +extern "C" JNIEXPORT jlong JNICALL Java_Main_getDexFilePointer(JNIEnv* env, jclass, jclass klass) { + if (Runtime::Current() == nullptr) { + env->ThrowNew(env->FindClass("java/lang/Exception"), + "We do not seem to be running in ART! Unable to get dex file."); + return 0; + } + ScopedObjectAccess soa(env); + // This sequence of casts must be the same as those done in + // runtime/native/dalvik_system_DexFile.cc in order to ensure that we get the same results. + return static_cast<jlong>(reinterpret_cast<uintptr_t>( + &soa.Decode<mirror::Class>(klass)->GetDexFile())); +} + +} // namespace Test944TransformClassloaders +} // namespace art diff --git a/test/944-transform-classloaders/expected.txt b/test/944-transform-classloaders/expected.txt new file mode 100644 index 0000000000..79522479dd --- /dev/null +++ b/test/944-transform-classloaders/expected.txt @@ -0,0 +1,5 @@ +hello +hello2 +Goodbye +Goodbye2 +Passed diff --git a/test/944-transform-classloaders/info.txt b/test/944-transform-classloaders/info.txt new file mode 100644 index 0000000000..9155564d62 --- /dev/null +++ b/test/944-transform-classloaders/info.txt @@ -0,0 +1,7 @@ +Tests that redefined dex files are stored in the appropriate classloader. + +This test cannot run on the RI. + +We use reflection with setAccessible(true) to examine the private internals of +classloaders. Changes to the internal operation or definition of +dalvik.system.BaseDexClassLoader might cause this test to fail. diff --git a/test/944-transform-classloaders/run b/test/944-transform-classloaders/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/944-transform-classloaders/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/944-transform-classloaders/src/CommonClassDefinition.java b/test/944-transform-classloaders/src/CommonClassDefinition.java new file mode 100644 index 0000000000..62602a02e9 --- /dev/null +++ b/test/944-transform-classloaders/src/CommonClassDefinition.java @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class CommonClassDefinition { + public final Class<?> target; + public final byte[] class_file_bytes; + public final byte[] dex_file_bytes; + + CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) { + this.target = target; + this.class_file_bytes = class_file_bytes; + this.dex_file_bytes = dex_file_bytes; + } +} diff --git a/test/944-transform-classloaders/src/Main.java b/test/944-transform-classloaders/src/Main.java new file mode 100644 index 0000000000..4911e00a70 --- /dev/null +++ b/test/944-transform-classloaders/src/Main.java @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Arrays; +import java.util.ArrayList; +import java.util.Base64; +import java.lang.reflect.*; +public class Main { + + /** + * base64 encoded class/dex file for + * class Transform { + * public void sayHi() { + * System.out.println("Goodbye"); + * } + * } + */ + private static CommonClassDefinition TRANSFORM_DEFINITION = new CommonClassDefinition( + Transform.class, + Base64.getDecoder().decode( + "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" + + "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA" + + "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" + + "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph" + + "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG" + + "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB" + + "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0="), + Base64.getDecoder().decode( + "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO" + + "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB" + + "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA" + + "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" + + "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA" + + "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA" + + "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50" + + "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh" + + "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291" + + "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA" + + "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA" + + "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA" + + "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=")); + + /** + * base64 encoded class/dex file for + * class Transform2 { + * public void sayHi() { + * System.out.println("Goodbye2"); + * } + * } + */ + private static CommonClassDefinition TRANSFORM2_DEFINITION = new CommonClassDefinition( + Transform2.class, + Base64.getDecoder().decode( + "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" + + "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA9UcmFuc2Zvcm0yLmphdmEM" + + "AAcACAcAFgwAFwAYAQAIR29vZGJ5ZTIHABkMABoAGwEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcv" + + "T2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEA" + + "E2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAA" + + "BQAGAAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAQABAAsA" + + "CAABAAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAADAAgABAABAAwAAAACAA0="), + Base64.getDecoder().decode( + "ZGV4CjAzNQABX6vL8OT7aGLjbzFBEfCM9Aaz+zzGzVnQAgAAcAAAAHhWNBIAAAAAAAAAADACAAAO" + + "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACwAQAAIAEAAGIB" + + "AABqAQAAdAEAAIIBAACZAQAArQEAAMEBAADVAQAA5gEAAOkBAADtAQAAAQIAAAYCAAAPAgAAAgAA" + + "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" + + "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAACECAAAA" + + "AAAAAQABAAEAAAAWAgAABAAAAHAQAwAAAA4AAwABAAIAAAAbAgAACQAAAGIAAAAbAQEAAABuIAIA" + + "EAAOAAAAAQAAAAMABjxpbml0PgAIR29vZGJ5ZTIADExUcmFuc2Zvcm0yOwAVTGphdmEvaW8vUHJp" + + "bnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEv" + + "bGFuZy9TeXN0ZW07AA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTQuMjQA" + + "A291dAAHcHJpbnRsbgAFc2F5SGkAAQAHDgADAAcOhwAAAAEBAICABKACAQG4AgANAAAAAAAAAAEA" + + "AAAAAAAAAQAAAA4AAABwAAAAAgAAAAYAAACoAAAAAwAAAAIAAADAAAAABAAAAAEAAADYAAAABQAA" + + "AAQAAADgAAAABgAAAAEAAAAAAQAAASAAAAIAAAAgAQAAARAAAAEAAABcAQAAAiAAAA4AAABiAQAA" + + "AyAAAAIAAAAWAgAAACAAAAEAAAAhAgAAABAAAAEAAAAwAgAA")); + + public static void main(String[] args) throws Exception { + doTest(); + System.out.println("Passed"); + } + + private static void checkIsInstance(Class<?> klass, Object o) throws Exception { + if (!klass.isInstance(o)) { + throw new Exception(klass + " is not the class of " + o); + } + } + + private static boolean arrayContains(long[] arr, long value) { + if (arr == null) { + return false; + } + for (int i = 0; i < arr.length; i++) { + if (arr[i] == value) { + return true; + } + } + return false; + } + + /** + * Checks that we can find the dex-file for the given class in its classloader. + * + * Throws if it fails. + */ + private static void checkDexFileInClassLoader(Class<?> klass) throws Exception { + // If all the android BCP classes were availible when compiling this test and access checks + // weren't a thing this function would be written as follows: + // + // long dexFilePtr = getDexFilePointer(klass); + // dalvik.system.BaseDexClassLoader loader = + // (dalvik.system.BaseDexClassLoader)klass.getClassLoader(); + // dalvik.system.DexPathList pathListValue = loader.pathList; + // dalvik.system.DexPathList.Element[] elementArrayValue = pathListValue.dexElements; + // int array_length = elementArrayValue.length; + // for (int i = 0; i < array_length; i++) { + // dalvik.system.DexPathList.Element curElement = elementArrayValue[i]; + // dalvik.system.DexFile curDexFile = curElement.dexFile; + // if (curDexFile == null) { + // continue; + // } + // long[] curCookie = (long[])curDexFile.mCookie; + // long[] curInternalCookie = (long[])curDexFile.mInternalCookie; + // if (arrayContains(curCookie, dexFilePtr) || arrayContains(curInternalCookie, dexFilePtr)) { + // return; + // } + // } + // throw new Exception( + // "Unable to find dex file pointer " + dexFilePtr + " in class loader for " + klass); + + // Get all the fields and classes we need by reflection. + Class<?> baseDexClassLoaderClass = Class.forName("dalvik.system.BaseDexClassLoader"); + Field pathListField = baseDexClassLoaderClass.getDeclaredField("pathList"); + + Class<?> dexPathListClass = Class.forName("dalvik.system.DexPathList"); + Field elementArrayField = dexPathListClass.getDeclaredField("dexElements"); + + Class<?> dexPathListElementClass = Class.forName("dalvik.system.DexPathList$Element"); + Field dexFileField = dexPathListElementClass.getDeclaredField("dexFile"); + + Class<?> dexFileClass = Class.forName("dalvik.system.DexFile"); + Field dexFileCookieField = dexFileClass.getDeclaredField("mCookie"); + Field dexFileInternalCookieField = dexFileClass.getDeclaredField("mInternalCookie"); + + // Make all the fields accessible + AccessibleObject.setAccessible(new AccessibleObject[] { pathListField, + elementArrayField, + dexFileField, + dexFileCookieField, + dexFileInternalCookieField }, true); + + long dexFilePtr = getDexFilePointer(klass); + + ClassLoader loader = klass.getClassLoader(); + checkIsInstance(baseDexClassLoaderClass, loader); + // DexPathList pathListValue = ((BaseDexClassLoader) loader).pathList; + Object pathListValue = pathListField.get(loader); + + checkIsInstance(dexPathListClass, pathListValue); + + // DexPathList.Element[] elementArrayValue = pathListValue.dexElements; + Object elementArrayValue = elementArrayField.get(pathListValue); + if (!elementArrayValue.getClass().isArray() || + elementArrayValue.getClass().getComponentType() != dexPathListElementClass) { + throw new Exception("elementArrayValue is not an " + dexPathListElementClass + " array!"); + } + // int array_length = elementArrayValue.length; + int array_length = Array.getLength(elementArrayValue); + for (int i = 0; i < array_length; i++) { + // DexPathList.Element curElement = elementArrayValue[i]; + Object curElement = Array.get(elementArrayValue, i); + checkIsInstance(dexPathListElementClass, curElement); + + // DexFile curDexFile = curElement.dexFile; + Object curDexFile = dexFileField.get(curElement); + if (curDexFile == null) { + continue; + } + checkIsInstance(dexFileClass, curDexFile); + + // long[] curCookie = (long[])curDexFile.mCookie; + long[] curCookie = (long[])dexFileCookieField.get(curDexFile); + // long[] curInternalCookie = (long[])curDexFile.mInternalCookie; + long[] curInternalCookie = (long[])dexFileInternalCookieField.get(curDexFile); + + if (arrayContains(curCookie, dexFilePtr) || arrayContains(curInternalCookie, dexFilePtr)) { + return; + } + } + throw new Exception( + "Unable to find dex file pointer " + dexFilePtr + " in class loader for " + klass); + } + + private static void doTest() throws Exception { + Transform t = new Transform(); + Transform2 t2 = new Transform2(); + + long initial_t1_dex = getDexFilePointer(Transform.class); + long initial_t2_dex = getDexFilePointer(Transform2.class); + if (initial_t2_dex != initial_t1_dex) { + throw new Exception("The classes " + Transform.class + " and " + Transform2.class + " " + + "have different initial dex files!"); + } + checkDexFileInClassLoader(Transform.class); + checkDexFileInClassLoader(Transform2.class); + + // Make sure they are loaded + t.sayHi(); + t2.sayHi(); + // Redefine both of the classes. + doMultiClassRedefinition(TRANSFORM_DEFINITION, TRANSFORM2_DEFINITION); + // Make sure we actually transformed them! + t.sayHi(); + t2.sayHi(); + + long final_t1_dex = getDexFilePointer(Transform.class); + long final_t2_dex = getDexFilePointer(Transform2.class); + if (final_t2_dex == final_t1_dex) { + throw new Exception("The classes " + Transform.class + " and " + Transform2.class + " " + + "have the same initial dex files!"); + } else if (final_t1_dex == initial_t1_dex) { + throw new Exception("The class " + Transform.class + " did not get a new dex file!"); + } else if (final_t2_dex == initial_t2_dex) { + throw new Exception("The class " + Transform2.class + " did not get a new dex file!"); + } + // Check to make sure the new dex files are in the class loader. + checkDexFileInClassLoader(Transform.class); + checkDexFileInClassLoader(Transform2.class); + } + + private static void doMultiClassRedefinition(CommonClassDefinition... defs) { + ArrayList<Class<?>> classes = new ArrayList<>(); + ArrayList<byte[]> class_files = new ArrayList<>(); + ArrayList<byte[]> dex_files = new ArrayList<>(); + + for (CommonClassDefinition d : defs) { + classes.add(d.target); + class_files.add(d.class_file_bytes); + dex_files.add(d.dex_file_bytes); + } + doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]), + class_files.toArray(new byte[0][]), + dex_files.toArray(new byte[0][])); + } + + // Gets the 'long' (really a native pointer) that is stored in the ClassLoader representing the + // DexFile a class is loaded from. This is converted from the DexFile* in the same way it is done + // in runtime/native/dalvik_system_DexFile.cc + private static native long getDexFilePointer(Class<?> target); + // Transforms the classes + private static native void doCommonMultiClassRedefinition(Class<?>[] targets, + byte[][] classfiles, + byte[][] dexfiles); +} diff --git a/test/944-transform-classloaders/src/Transform.java b/test/944-transform-classloaders/src/Transform.java new file mode 100644 index 0000000000..8e8af355da --- /dev/null +++ b/test/944-transform-classloaders/src/Transform.java @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform { + public void sayHi() { + // Use lower 'h' to make sure the string will have a different string id + // than the transformation (the transformation code is the same except + // the actual printed String, which was making the test inacurately passing + // in JIT mode when loading the string from the dex cache, as the string ids + // of the two different strings were the same). + // We know the string ids will be different because lexicographically: + // "Goodbye" < "LTransform;" < "hello". + System.out.println("hello"); + } +} diff --git a/test/944-transform-classloaders/src/Transform2.java b/test/944-transform-classloaders/src/Transform2.java new file mode 100644 index 0000000000..eb22842184 --- /dev/null +++ b/test/944-transform-classloaders/src/Transform2.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform2 { + public void sayHi() { + System.out.println("hello2"); + } +} diff --git a/test/Android.bp b/test/Android.bp index 1070645040..d3244a683a 100644 --- a/test/Android.bp +++ b/test/Android.bp @@ -273,6 +273,7 @@ art_cc_defaults { "931-agent-thread/agent_thread.cc", "933-misc-events/misc_events.cc", "936-search-onload/search_onload.cc", + "944-transform-classloaders/classloader.cc", ], shared_libs: [ "libbase", diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 1b4f19509f..742353da46 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -439,13 +439,14 @@ TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \ 629-vdex-speed # This test fails without an image. -# 018, 961, 964 often time out. b/34369284 +# 018, 961, 964, 968 often time out. b/34369284 TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS := \ 137-cfi \ 138-duplicate-classes-check \ 018-stack-overflow \ 961-default-iface-resolution-gen \ - 964-default-iface-init + 964-default-iface-init \ + 968-default-partial-compile-gen \ ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \ diff --git a/test/Nested/Nested.java b/test/Nested/Nested.java index 78b273bec0..f493989268 100644 --- a/test/Nested/Nested.java +++ b/test/Nested/Nested.java @@ -17,4 +17,6 @@ class Nested { class Inner { } + Object x = new Object() { + }; } diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index 751aa95f50..186a1513ee 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -364,6 +364,8 @@ fi if [ "$HAVE_IMAGE" = "n" ]; then + # Add 5 minutes to give some time to generate the boot image. + TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + 300)) DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art" else DALVIKVM_BOOT_OPT="-Ximage:${BOOT_IMAGE}" diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc index ed82bb04cf..ea6359e5e0 100644 --- a/test/ti-agent/common_helper.cc +++ b/test/ti-agent/common_helper.cc @@ -210,6 +210,7 @@ struct CommonTransformationResult { // Map from class name to transformation result. std::map<std::string, std::deque<CommonTransformationResult>> gTransformations; +bool gPopTransformations = true; extern "C" JNIEXPORT void JNICALL Java_Main_addCommonTransformationResult(JNIEnv* env, jclass, @@ -266,7 +267,32 @@ void JNICALL CommonClassFileLoadHookRetransformable(jvmtiEnv* jvmti_env, memcpy(new_data, desired_array.data(), desired_array.size()); *new_class_data = new_data; *new_class_data_len = desired_array.size(); + if (gPopTransformations) { + gTransformations[name_str].pop_front(); + } + } +} + +extern "C" JNIEXPORT void Java_Main_setPopRetransformations(JNIEnv*, + jclass, + jboolean enable) { + gPopTransformations = enable; +} + +extern "C" JNIEXPORT void Java_Main_popTransformationFor(JNIEnv* env, + jclass, + jstring class_name) { + const char* name_chrs = env->GetStringUTFChars(class_name, nullptr); + std::string name_str(name_chrs); + env->ReleaseStringUTFChars(class_name, name_chrs); + if (gTransformations.find(name_str) != gTransformations.end() && + gTransformations[name_str].size() > 0) { gTransformations[name_str].pop_front(); + } else { + std::stringstream err; + err << "No transformations found for class " << name_str; + std::string message = err.str(); + env->ThrowNew(env->FindClass("java/lang/Exception"), message.c_str()); } } diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc index 621d45a1bc..c5a93568c6 100644 --- a/test/ti-agent/common_load.cc +++ b/test/ti-agent/common_load.cc @@ -115,6 +115,13 @@ static AgentLib agents[] = { { "935-non-retransformable", common_transform::OnLoad, nullptr }, { "936-search-onload", Test936SearchOnload::OnLoad, nullptr }, { "937-hello-retransform-package", common_retransform::OnLoad, nullptr }, + { "938-load-transform-bcp", common_retransform::OnLoad, nullptr }, + { "939-hello-transformation-bcp", common_redefine::OnLoad, nullptr }, + { "940-recursive-obsolete", common_redefine::OnLoad, nullptr }, + { "941-recursive-obsolete-jit", common_redefine::OnLoad, nullptr }, + { "942-private-recursive", common_redefine::OnLoad, nullptr }, + { "943-private-recursive-jit", common_redefine::OnLoad, nullptr }, + { "944-transform-classloaders", common_redefine::OnLoad, nullptr }, }; static AgentLib* FindAgent(char* name) { diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh index 2d26b4858d..963efa49a5 100755 --- a/tools/buildbot-build.sh +++ b/tools/buildbot-build.sh @@ -52,6 +52,9 @@ while true; do shift elif [[ "$1" == "" ]]; then break + else + echo "Unknown options $@" + exit 1 fi done diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def index 5fa5194d00..82cdbb20f1 100644 --- a/tools/cpp-define-generator/constant_jit.def +++ b/tools/cpp-define-generator/constant_jit.def @@ -25,5 +25,6 @@ DEFINE_JIT_CONSTANT(CHECK_OSR, int16_t, art::jit::kJitCheckForOSR) DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled) +DEFINE_JIT_CONSTANT(CHECK_OSR_THRESHOLD, int16_t, art::jit::Jit::kJitRecheckOSRThreshold) #undef DEFINE_JIT_CONSTANT |