summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_test.mk7
-rw-r--r--compiler/compiler.h3
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc7
-rw-r--r--compiler/dex/dex_to_dex_compiler.h7
-rw-r--r--compiler/driver/compiler_driver-inl.h13
-rw-r--r--compiler/driver/compiler_driver.cc123
-rw-r--r--compiler/driver/compiler_driver.h14
-rw-r--r--compiler/driver/compiler_driver_test.cc1
-rw-r--r--compiler/driver/dex_compilation_unit.cc2
-rw-r--r--compiler/driver/dex_compilation_unit.h8
-rw-r--r--compiler/image_writer.cc11
-rw-r--r--compiler/image_writer.h10
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/linker/mips/relative_patcher_mips.cc37
-rw-r--r--compiler/linker/mips/relative_patcher_mips32r6_test.cc7
-rw-r--r--compiler/linker/mips/relative_patcher_mips_test.cc11
-rw-r--r--compiler/oat_writer.cc16
-rw-r--r--compiler/optimizing/builder.h15
-rw-r--r--compiler/optimizing/code_generator_arm64.cc25
-rw-r--r--compiler/optimizing/code_generator_mips.cc102
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc8
-rw-r--r--compiler/optimizing/code_generator_mips64.h9
-rw-r--r--compiler/optimizing/codegen_test.cc33
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.cc7
-rw-r--r--compiler/optimizing/induction_var_range.cc29
-rw-r--r--compiler/optimizing/inliner.cc45
-rw-r--r--compiler/optimizing/instruction_builder.cc49
-rw-r--r--compiler/optimizing/instruction_builder.h11
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc13
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
-rw-r--r--compiler/optimizing/reference_type_propagation.cc46
-rw-r--r--compiler/optimizing/reference_type_propagation.h3
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc1
-rw-r--r--compiler/optimizing/ssa_builder.cc6
-rw-r--r--compiler/optimizing/ssa_builder.h3
-rw-r--r--dex2oat/dex2oat.cc7
-rw-r--r--dex2oat/dex2oat_test.cc6
-rw-r--r--oatdump/oatdump.cc12
-rw-r--r--patchoat/patchoat.cc4
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S375
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S351
-rw-r--r--runtime/art_field-inl.h15
-rw-r--r--runtime/art_field.cc4
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method-inl.h13
-rw-r--r--runtime/art_method.cc11
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/class_linker-inl.h45
-rw-r--r--runtime/class_linker.cc99
-rw-r--r--runtime/class_linker.h8
-rw-r--r--runtime/class_linker_test.cc8
-rw-r--r--runtime/compiler_filter.cc9
-rw-r--r--runtime/compiler_filter.h1
-rw-r--r--runtime/debugger.cc39
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h8
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc22
-rw-r--r--runtime/gc/collector/concurrent_copying.cc22
-rw-r--r--runtime/gc/collector/garbage_collector.cc20
-rw-r--r--runtime/gc/collector/garbage_collector.h4
-rw-r--r--runtime/gc/heap.cc4
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.cc59
-rw-r--r--runtime/instrumentation.h14
-rw-r--r--runtime/interpreter/interpreter_common.cc22
-rw-r--r--runtime/interpreter/interpreter_common.h10
-rw-r--r--runtime/jit/jit_code_cache.cc2
-rw-r--r--runtime/mirror/class.cc3
-rw-r--r--runtime/mirror/dex_cache-inl.h110
-rw-r--r--runtime/mirror/dex_cache.cc20
-rw-r--r--runtime/mirror/dex_cache.h77
-rw-r--r--runtime/mirror/dex_cache_test.cc3
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc17
-rw-r--r--runtime/native/java_lang_DexCache.cc14
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file_assistant.cc2
-rw-r--r--runtime/openjdkjvmti/ti_class_loader.cc9
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc112
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h12
-rw-r--r--runtime/parsed_options.cc2
-rw-r--r--runtime/quick_exception_handler.cc4
-rw-r--r--runtime/runtime.cc76
-rw-r--r--runtime/runtime.h23
-rw-r--r--runtime/runtime_android.cc53
-rw-r--r--runtime/runtime_common.cc414
-rw-r--r--runtime/runtime_common.h79
-rw-r--r--runtime/runtime_linux.cc378
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/thread.cc5
-rw-r--r--runtime/thread_list.cc10
-rw-r--r--runtime/thread_pool.cc11
-rw-r--r--runtime/thread_pool.h9
-rw-r--r--runtime/thread_pool_test.cc52
-rw-r--r--runtime/trace.cc3
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h19
-rw-r--r--runtime/verifier/method_verifier.cc8
-rw-r--r--test/082-inline-execute/src/Main.java7
-rw-r--r--test/466-get-live-vreg/get_live_vreg_jni.cc2
-rw-r--r--test/552-checker-sharpening/src/Main.java50
-rw-r--r--test/623-checker-loop-regressions/src/Main.java121
-rw-r--r--test/626-const-class-linking/clear_dex_cache_types.cc3
-rw-r--r--test/908-gc-start-finish/gc_callbacks.cc25
-rwxr-xr-xtest/909-attach-agent/run6
-rw-r--r--test/909-attach-agent/src/Main.java4
-rw-r--r--test/916-obsolete-jit/src/Main.java17
-rw-r--r--test/916-obsolete-jit/src/Transform.java8
-rw-r--r--test/919-obsolete-fields/src/Main.java7
-rw-r--r--test/919-obsolete-fields/src/Transform.java7
-rwxr-xr-xtest/938-load-transform-bcp/build17
-rw-r--r--test/938-load-transform-bcp/expected.txt2
-rw-r--r--test/938-load-transform-bcp/info.txt1
-rwxr-xr-xtest/938-load-transform-bcp/run17
-rw-r--r--test/938-load-transform-bcp/src-ex/TestMain.java35
-rw-r--r--test/938-load-transform-bcp/src/Main.java122
-rwxr-xr-xtest/939-hello-transformation-bcp/build17
-rw-r--r--test/939-hello-transformation-bcp/expected.txt3
-rw-r--r--test/939-hello-transformation-bcp/info.txt6
-rwxr-xr-xtest/939-hello-transformation-bcp/run17
-rw-r--r--test/939-hello-transformation-bcp/src/Main.java126
-rw-r--r--test/957-methodhandle-transforms/expected.txt17
-rw-r--r--test/957-methodhandle-transforms/src/Main.java267
-rw-r--r--test/Android.run-test.mk13
-rwxr-xr-xtest/etc/run-test-jar6
-rwxr-xr-xtest/run-test7
-rw-r--r--test/ti-agent/common_load.cc2
127 files changed, 2512 insertions, 1865 deletions
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 291db8b531..b7a2379dc9 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -87,8 +87,8 @@ ART_TEST_GC_STRESS ?= $(ART_TEST_FULL)
# Do you want tests with the JNI forcecopy mode enabled run?
ART_TEST_JNI_FORCECOPY ?= $(ART_TEST_FULL)
-# Do you want run-tests with relocation disabled run?
-ART_TEST_RUN_TEST_NO_RELOCATE ?= $(ART_TEST_FULL)
+# Do you want run-tests with relocation enabled run?
+ART_TEST_RUN_TEST_RELOCATE ?= $(ART_TEST_FULL)
# Do you want run-tests with prebuilding?
ART_TEST_RUN_TEST_PREBUILD ?= true
@@ -96,6 +96,9 @@ ART_TEST_RUN_TEST_PREBUILD ?= true
# Do you want run-tests with no prebuilding enabled run?
ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL)
+# Do you want run-tests with a pregenerated core.art?
+ART_TEST_RUN_TEST_IMAGE ?= true
+
# Do you want run-tests without a pregenerated core.art?
ART_TEST_RUN_TEST_NO_IMAGE ?= $(ART_TEST_FULL)
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 2ca0b77a73..908d3669ed 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -27,6 +27,7 @@ namespace jit {
class JitCodeCache;
}
namespace mirror {
+ class ClassLoader;
class DexCache;
}
@@ -63,7 +64,7 @@ class Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const = 0;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index d4f6545c59..76aeaa55d7 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -284,16 +284,13 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
}
uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(unit_.GetClassLoader())));
ClassLinker* class_linker = unit_.GetClassLinker();
ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
GetDexFile(),
method_idx,
unit_.GetDexCache(),
- class_loader,
+ unit_.GetClassLoader(),
/* referrer */ nullptr,
kVirtual);
@@ -330,7 +327,7 @@ CompiledMethod* ArtCompileDEX(
InvokeType invoke_type ATTRIBUTE_UNUSED,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level) {
DCHECK(driver != nullptr);
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 0a00d45297..00c596d60e 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
#include "dex_file.h"
+#include "handle.h"
#include "invoke_type.h"
namespace art {
@@ -25,6 +26,10 @@ namespace art {
class CompiledMethod;
class CompilerDriver;
+namespace mirror {
+class ClassLoader;
+} // namespace mirror
+
namespace optimizer {
enum class DexToDexCompilationLevel {
@@ -40,7 +45,7 @@ CompiledMethod* ArtCompileDEX(CompilerDriver* driver,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index f056dd3c00..81d80f4f8f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,17 +31,12 @@
namespace art {
-inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit) {
- return soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()).Ptr();
-}
-
inline mirror::Class* CompilerDriver::ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, dex::TypeIndex cls_index,
const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
mirror::Class* cls = mUnit->GetClassLinker()->ResolveType(
*mUnit->GetDexFile(), cls_index, dex_cache, class_loader);
DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending());
@@ -56,7 +51,7 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
@@ -87,7 +82,7 @@ inline ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx,
is_static);
}
@@ -198,7 +193,7 @@ inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
ArtMethod* resolved_method =
check_incompatible_class_change
? mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kForceICCECheck>(
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1d4eaf8c5a..4e19dbe949 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -583,7 +583,7 @@ static void CompileMethod(Thread* self,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled,
@@ -624,9 +624,6 @@ static void CompileMethod(Thread* self,
// Look-up the ArtMethod associated with this code_item (if any)
// -- It is later used to lookup any [optimization] annotations for this method.
ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader_handle(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(class_loader)));
// TODO: Lookup annotation from DexFile directly without resolving method.
ArtMethod* method =
@@ -634,7 +631,7 @@ static void CompileMethod(Thread* self,
dex_file,
method_idx,
dex_cache,
- class_loader_handle,
+ class_loader,
/* referrer */ nullptr,
invoke_type);
@@ -681,9 +678,14 @@ static void CompileMethod(Thread* self,
if (compile) {
// NOTE: if compiler declines to compile this method, it will return null.
- compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type,
- class_def_idx, method_idx, class_loader,
- dex_file, dex_cache);
+ compiled_method = driver->GetCompiler()->Compile(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_cache);
}
if (compiled_method == nullptr &&
dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) {
@@ -730,12 +732,14 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
uint32_t method_idx = method->GetDexMethodIndex();
uint32_t access_flags = method->GetAccessFlags();
InvokeType invoke_type = method->GetInvokeType();
- StackHandleScope<1> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(method->GetDeclaringClass()->GetClassLoader()));
{
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> local_class_loader(
- soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+ soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get()));
jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
// Find the dex_file
dex_file = method->GetDexFile();
@@ -769,7 +773,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_to_dex_compilation_level,
true,
@@ -795,7 +799,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_to_dex_compilation_level,
true,
@@ -1070,22 +1074,30 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r
class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
- explicit ResolveCatchBlockExceptionsClassVisitor(
- std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve)
- : exceptions_to_resolve_(exceptions_to_resolve) {}
+ ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ classes_.push_back(c);
+ return true;
+ }
+
+ void FindExceptionTypesToResolve(
+ std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m);
+ for (ObjPtr<mirror::Class> klass : classes_) {
+ for (ArtMethod& method : klass->GetMethods(pointer_size)) {
+ FindExceptionTypesToResolveForMethod(&method, exceptions_to_resolve);
+ }
}
- return true;
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle)
+ void FindExceptionTypesToResolveForMethod(
+ ArtMethod* method,
+ std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
+ const DexFile::CodeItem* code_item = method->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
}
@@ -1105,9 +1117,9 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
dex::TypeIndex encoded_catch_handler_handlers_type_idx =
dex::TypeIndex(DecodeUnsignedLeb128(&encoded_catch_handler_list));
// Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
- exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
- method_handle->GetDexFile());
+ if (!method->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ exceptions_to_resolve->emplace(encoded_catch_handler_handlers_type_idx,
+ method->GetDexFile());
}
// ignore address associated with catch handler
DecodeUnsignedLeb128(&encoded_catch_handler_list);
@@ -1119,7 +1131,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
}
}
- std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve_;
+ std::vector<ObjPtr<mirror::Class>> classes_;
};
class RecordImageClassesVisitor : public ClassVisitor {
@@ -1173,8 +1185,14 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
- ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types);
- class_linker->VisitClasses(&visitor);
+ {
+ // Thread suspension is not allowed while ResolveCatchBlockExceptionsClassVisitor
+ // is using a std::vector<ObjPtr<mirror::Class>>.
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ResolveCatchBlockExceptionsClassVisitor visitor;
+ class_linker->VisitClasses(&visitor);
+ visitor.FindExceptionTypesToResolve(&unresolved_exception_types);
+ }
for (const auto& exception_type : unresolved_exception_types) {
dex::TypeIndex exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
@@ -1423,19 +1441,14 @@ void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodRefere
dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index);
}
-bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx) {
- // Get type from dex cache assuming it was populated by the verifier
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class) {
if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx);
bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible.
if (!is_accessible) {
- mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -1452,12 +1465,9 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
return is_accessible;
}
-bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx,
+bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class,
bool* finalizable) {
- // Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
// Be conservative.
@@ -1465,10 +1475,8 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
return false; // Unknown class needs access checks.
}
*finalizable = resolved_class->IsFinalizable();
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx);
bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible.
if (!is_accessible) {
- mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -1512,9 +1520,7 @@ ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
mirror::Class* referrer_class;
Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
{
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader_handle(
- hs.NewHandle(soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader_handle = mUnit->GetClassLoader();
resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false);
referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr;
@@ -2587,10 +2593,18 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_direct_method_idx = method_idx;
- CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled, dex_cache);
+ CompileMethod(soa.Self(),
+ driver,
+ it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
// Compile virtual methods
@@ -2604,10 +2618,17 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_virtual_method_idx = method_idx;
- CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled, dex_cache);
+ CompileMethod(soa.Self(),
+ driver, it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
DCHECK(!it.HasNext());
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 503fe3adfc..d032a26fd5 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -187,16 +187,14 @@ class CompilerDriver {
REQUIRES(!requires_constructor_barrier_lock_);
// Are runtime access checks necessary in the compiled code?
- bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx)
+ bool CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class)
REQUIRES_SHARED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
// out_is_finalizable is set to whether the type is finalizable.
- bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx,
+ bool CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class,
bool* out_is_finalizable)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -406,10 +404,6 @@ class CompilerDriver {
uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 1e4ca16844..e4b66ebc5a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -101,6 +101,7 @@ class CompilerDriverTest : public CommonCompilerTest {
};
// Disabled due to 10 second runtime on host
+// TODO: Update the test for hash-based dex cache arrays. Bug: 30627598
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
CompileAll(nullptr);
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index 47b19297e5..7e8e812c4a 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -21,7 +21,7 @@
namespace art {
-DexCompilationUnit::DexCompilationUnit(jobject class_loader,
+DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
const DexFile::CodeItem* code_item,
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 854927d747..24a9a5b653 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -34,7 +34,7 @@ class VerifiedMethod;
class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
public:
- DexCompilationUnit(jobject class_loader,
+ DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
const DexFile::CodeItem* code_item,
@@ -44,7 +44,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const VerifiedMethod* verified_method,
Handle<mirror::DexCache> dex_cache);
- jobject GetClassLoader() const {
+ Handle<mirror::ClassLoader> GetClassLoader() const {
return class_loader_;
}
@@ -113,7 +113,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
}
private:
- const jobject class_loader_;
+ const Handle<mirror::ClassLoader> class_loader_;
ClassLinker* const class_linker_;
@@ -125,7 +125,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const uint32_t access_flags_;
const VerifiedMethod* verified_method_;
- Handle<mirror::DexCache> dex_cache_;
+ const Handle<mirror::DexCache> dex_cache_;
std::string symbol_;
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c72edb18a3..3e9ae0834c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -940,9 +940,11 @@ void ImageWriter::PruneNonImageClasses() {
}
ObjPtr<mirror::DexCache> dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache();
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- Class* klass = dex_cache->GetResolvedType(dex::TypeIndex(i));
+ mirror::TypeDexCachePair pair =
+ dex_cache->GetResolvedTypes()[i].load(std::memory_order_relaxed);
+ mirror::Class* klass = pair.object.Read();
if (klass != nullptr && !KeepClass(klass)) {
- dex_cache->SetResolvedType(dex::TypeIndex(i), nullptr);
+ dex_cache->ClearResolvedType(dex::TypeIndex(pair.index));
}
}
ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
@@ -1922,8 +1924,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
// above comment for intern tables.
ClassTable temp_class_table;
temp_class_table.ReadFromMemory(class_table_memory_ptr);
- CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
- mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr;
+ ObjPtr<mirror::ClassLoader> class_loader = GetClassLoader();
CHECK_EQ(temp_class_table.NumZygoteClasses(class_loader),
table->NumNonZygoteClasses(class_loader) + table->NumZygoteClasses(class_loader));
UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown));
@@ -2213,7 +2214,7 @@ void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache,
orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
ImageAddressVisitor(this));
}
- GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
NativeLocationInImage(orig_types),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index cc7df1ce21..bdc7146632 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -51,8 +51,13 @@ class ImageSpace;
} // namespace space
} // namespace gc
+namespace mirror {
+class ClassLoader;
+} // namespace mirror
+
class ClassLoaderVisitor;
class ClassTable;
+class ImtConflictTable;
static constexpr int kInvalidFd = -1;
@@ -79,6 +84,11 @@ class ImageWriter FINAL {
return true;
}
+ ObjPtr<mirror::ClassLoader> GetClassLoader() {
+ CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
+ return compile_app_image_ ? *class_loaders_.begin() : nullptr;
+ }
+
template <typename T>
T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (object == nullptr || IsInBootImage(object)) {
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 148ce4f9ee..eaac0b40f5 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -102,7 +102,7 @@ JitCompiler::JitCompiler() {
/* no_inline_from */ nullptr,
/* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
- Runtime::Current()->IsDebuggable(),
+ Runtime::Current()->IsJavaDebuggable(),
CompilerOptions::kDefaultGenerateDebugInfo,
/* implicit_null_checks */ true,
/* implicit_so_checks */ true,
diff --git a/compiler/linker/mips/relative_patcher_mips.cc b/compiler/linker/mips/relative_patcher_mips.cc
index c09950cd5d..fe5f9a948a 100644
--- a/compiler/linker/mips/relative_patcher_mips.cc
+++ b/compiler/linker/mips/relative_patcher_mips.cc
@@ -49,9 +49,12 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
uint32_t target_offset) {
uint32_t anchor_literal_offset = patch.PcInsnOffset();
uint32_t literal_offset = patch.LiteralOffset();
+ uint32_t literal_low_offset;
bool dex_cache_array = (patch.GetType() == LinkerPatch::Type::kDexCacheArray);
- // Basic sanity checks.
+ // Perform basic sanity checks and initialize `literal_low_offset` to point
+ // to the instruction containing the 16 least significant bits of the
+ // relative address.
if (is_r6) {
DCHECK_GE(code->size(), 8u);
DCHECK_LE(literal_offset, code->size() - 8u);
@@ -61,10 +64,10 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
DCHECK_EQ((*code)[literal_offset + 1], 0x12);
DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
- // ADDIU reg, reg, offset_low
+ // instr reg(s), offset_low
DCHECK_EQ((*code)[literal_offset + 4], 0x78);
DCHECK_EQ((*code)[literal_offset + 5], 0x56);
- DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x24);
+ literal_low_offset = literal_offset + 4;
} else {
DCHECK_GE(code->size(), 16u);
DCHECK_LE(literal_offset, code->size() - 12u);
@@ -84,36 +87,34 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
DCHECK_EQ((*code)[literal_offset + 1], 0x12);
DCHECK_EQ(((*code)[literal_offset + 2] & 0xE0), 0x00);
DCHECK_EQ((*code)[literal_offset + 3], 0x3C);
- // ORI reg, reg, offset_low
- DCHECK_EQ((*code)[literal_offset + 4], 0x78);
- DCHECK_EQ((*code)[literal_offset + 5], 0x56);
- DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x34);
// ADDU reg, reg, reg2
- DCHECK_EQ((*code)[literal_offset + 8], 0x21);
- DCHECK_EQ(((*code)[literal_offset + 9] & 0x07), 0x00);
+ DCHECK_EQ((*code)[literal_offset + 4], 0x21);
+ DCHECK_EQ(((*code)[literal_offset + 5] & 0x07), 0x00);
if (dex_cache_array) {
// reg2 is either RA or from HMipsComputeBaseMethodAddress.
- DCHECK_EQ(((*code)[literal_offset + 10] & 0x1F), 0x1F);
+ DCHECK_EQ(((*code)[literal_offset + 6] & 0x1F), 0x1F);
}
- DCHECK_EQ(((*code)[literal_offset + 11] & 0xFC), 0x00);
+ DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x00);
+ // instr reg(s), offset_low
+ DCHECK_EQ((*code)[literal_offset + 8], 0x78);
+ DCHECK_EQ((*code)[literal_offset + 9], 0x56);
+ literal_low_offset = literal_offset + 8;
}
// Apply patch.
uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
uint32_t diff = target_offset - anchor_offset;
- if (dex_cache_array) {
+ if (dex_cache_array && !is_r6) {
diff += kDexCacheArrayLwOffset;
}
- if (is_r6) {
- diff += (diff & 0x8000) << 1; // Account for sign extension in ADDIU.
- }
+ diff += (diff & 0x8000) << 1; // Account for sign extension in "instr reg(s), offset_low".
// LUI reg, offset_high / AUIPC reg, offset_high
(*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
(*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
- // ORI reg, reg, offset_low / ADDIU reg, reg, offset_low
- (*code)[literal_offset + 4] = static_cast<uint8_t>(diff >> 0);
- (*code)[literal_offset + 5] = static_cast<uint8_t>(diff >> 8);
+ // instr reg(s), offset_low
+ (*code)[literal_low_offset + 0] = static_cast<uint8_t>(diff >> 0);
+ (*code)[literal_low_offset + 1] = static_cast<uint8_t>(diff >> 8);
}
} // namespace linker
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index 4f9a3a050a..474eb73e08 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -20,10 +20,6 @@
namespace art {
namespace linker {
-// We'll maximize the range of a single load instruction for dex cache array accesses
-// by aligning offset -32768 with the offset of the first used element.
-static constexpr uint32_t kDexCacheArrayLwOffset = 0x8000;
-
class Mips32r6RelativePatcherTest : public RelativePatcherTest {
public:
Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
@@ -64,9 +60,6 @@ void Mips32r6RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const Link
ASSERT_TRUE(result.first);
uint32_t diff = target_offset - (result.second + kAnchorOffset);
- if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
- diff += kDexCacheArrayLwOffset;
- }
diff += (diff & 0x8000) << 1; // Account for sign extension in addiu.
const uint8_t expected_code[] = {
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index faeb92a315..b0d1294cf4 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -47,12 +47,12 @@ class MipsRelativePatcherTest : public RelativePatcherTest {
const uint8_t MipsRelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
0x00, 0x00, 0x10, 0x04, // nal
- 0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
- 0x78, 0x56, 0x52, 0x36, // ori s2, s2, low(diff); placeholder = 0x5678
- 0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
+ 0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
+ 0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
+ 0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
};
const uint32_t MipsRelativePatcherTest::kLiteralOffset = 4; // At lui (where patching starts).
-const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8; // At ori (where PC+0 points).
+const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8; // At addu (where PC+0 points).
const ArrayRef<const uint8_t> MipsRelativePatcherTest::kUnpatchedPcRelativeCode(
kUnpatchedPcRelativeRawCode);
@@ -68,12 +68,13 @@ void MipsRelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPa
if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
diff += kDexCacheArrayLwOffset;
}
+ diff += (diff & 0x8000) << 1; // Account for sign extension in addiu.
const uint8_t expected_code[] = {
0x00, 0x00, 0x10, 0x04,
static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x12, 0x3C,
- static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x36,
0x21, 0x90, 0x5F, 0x02,
+ static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
};
EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bd2c5e3bfc..227fdc4874 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1060,6 +1060,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
WriteCodeMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
size_t relative_offset) SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
: OatDexMethodVisitor(writer, relative_offset),
+ class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
out_(out),
file_offset_(file_offset),
soa_(Thread::Current()),
@@ -1245,12 +1246,13 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
private:
+ ObjPtr<mirror::ClassLoader> class_loader_;
OutputStream* const out_;
const size_t file_offset_;
const ScopedObjectAccess soa_;
const ScopedAssertNoThreadSuspension no_thread_suspension_;
ClassLinker* const class_linker_;
- mirror::DexCache* dex_cache_;
+ ObjPtr<mirror::DexCache> dex_cache_;
std::vector<uint8_t> patched_code_;
void ReportWriteFailure(const char* what, const ClassDataItemIterator& it) {
@@ -1261,7 +1263,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
ArtMethod* GetTargetMethod(const LinkerPatch& patch)
REQUIRES_SHARED(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
- mirror::DexCache* dex_cache =
+ ObjPtr<mirror::DexCache> dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
Thread::Current(), *ref.dex_file);
ArtMethod* method = dex_cache->GetResolvedMethod(
@@ -1295,7 +1297,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return target_offset;
}
- mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
+ ObjPtr<mirror::DexCache> GetDexCache(const DexFile* target_dex_file)
REQUIRES_SHARED(Locks::mutator_lock_) {
return (target_dex_file == dex_file_)
? dex_cache_
@@ -1303,10 +1305,12 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
- mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
+ DCHECK(writer_->HasImage());
+ ObjPtr<mirror::DexCache> dex_cache = GetDexCache(patch.TargetTypeDexFile());
+ ObjPtr<mirror::Class> type =
+ ClassLinker::LookupResolvedType(patch.TargetTypeIndex(), dex_cache, class_loader_);
CHECK(type != nullptr);
- return type;
+ return type.Ptr();
}
mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 8cf4089eba..223439b0c7 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -51,7 +51,10 @@ class HGraphBuilder : public ValueObject {
compiler_driver_(driver),
compilation_stats_(compiler_stats),
block_builder_(graph, dex_file, code_item),
- ssa_builder_(graph, dex_compilation_unit->GetDexCache(), handles),
+ ssa_builder_(graph,
+ dex_compilation_unit->GetClassLoader(),
+ dex_compilation_unit->GetDexCache(),
+ handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -76,10 +79,12 @@ class HGraphBuilder : public ValueObject {
code_item_(code_item),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
- null_dex_cache_(),
compilation_stats_(nullptr),
block_builder_(graph, nullptr, code_item),
- ssa_builder_(graph, null_dex_cache_, handles),
+ ssa_builder_(graph,
+ handles->NewHandle<mirror::ClassLoader>(nullptr),
+ handles->NewHandle<mirror::DexCache>(nullptr),
+ handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -91,7 +96,7 @@ class HGraphBuilder : public ValueObject {
/* compiler_driver */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- null_dex_cache_,
+ handles->NewHandle<mirror::DexCache>(nullptr),
handles) {}
GraphAnalysisResult BuildGraph();
@@ -112,8 +117,6 @@ class HGraphBuilder : public ValueObject {
CompilerDriver* const compiler_driver_;
- ScopedNullHandle<mirror::DexCache> null_dex_cache_;
-
OptimizingCompilerStats* compilation_stats_;
HBasicBlockBuilder block_builder_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index cf824a14e3..26c8254c76 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1452,6 +1452,19 @@ static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
(cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
}
+// Allocate a scratch register from the VIXL pool, querying first into
+// the floating-point register pool, and then the the core register
+// pool. This is essentially a reimplementation of
+// vixl::aarch64::UseScratchRegisterScope::AcquireCPURegisterOfSize
+// using a different allocation strategy.
+static CPURegister AcquireFPOrCoreCPURegisterOfSize(vixl::aarch64::MacroAssembler* masm,
+ vixl::aarch64::UseScratchRegisterScope* temps,
+ int size_in_bits) {
+ return masm->GetScratchFPRegisterList()->IsEmpty()
+ ? CPURegister(temps->AcquireRegisterOfSize(size_in_bits))
+ : CPURegister(temps->AcquireVRegisterOfSize(size_in_bits));
+}
+
void CodeGeneratorARM64::MoveLocation(Location destination,
Location source,
Primitive::Type dst_type) {
@@ -1563,8 +1576,16 @@ void CodeGeneratorARM64::MoveLocation(Location destination,
// a move is blocked by a another move requiring a scratch FP
// register, which would reserve D31). To prevent this issue, we
// ask for a scratch register of any type (core or FP).
- CPURegister temp =
- temps.AcquireCPURegisterOfSize(destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize);
+ //
+ // Also, we start by asking for a FP scratch register first, as the
+ // demand of scratch core registers is higher. This is why we
+ // use AcquireFPOrCoreCPURegisterOfSize instead of
+ // UseScratchRegisterScope::AcquireCPURegisterOfSize, which
+ // allocates core scratch registers first.
+ CPURegister temp = AcquireFPOrCoreCPURegisterOfSize(
+ GetVIXLAssembler(),
+ &temps,
+ (destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize));
__ Ldr(temp, StackOperandFrom(source));
__ Str(temp, StackOperandFrom(destination));
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 76be74e921..a095970a1e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -258,8 +258,10 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK_NE(out.AsRegister<Register>(), AT);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
- __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, 0);
+ bool reordering = __ SetReorder(false);
+ mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
}
__ B(GetExitLabel());
}
@@ -313,8 +315,10 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK_NE(out, AT);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
- mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
- __ StoreToOffset(kStoreWord, out, TMP, 0);
+ bool reordering = __ SetReorder(false);
+ mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
__ B(GetExitLabel());
}
@@ -1127,16 +1131,15 @@ Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address)
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
-void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholder(
- PcRelativePatchInfo* info, Register out, Register base) {
- bool reordering = __ SetReorder(false);
+void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
+ Register out,
+ Register base) {
if (GetInstructionSetFeatures().IsR6()) {
DCHECK_EQ(base, ZERO);
__ Bind(&info->high_label);
__ Bind(&info->pc_rel_label);
- // Add a 32-bit offset to PC.
+ // Add the high half of a 32-bit offset to PC.
__ Auipc(out, /* placeholder */ 0x1234);
- __ Addiu(out, out, /* placeholder */ 0x5678);
} else {
// If base is ZERO, emit NAL to obtain the actual base.
if (base == ZERO) {
@@ -1150,11 +1153,11 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholder(
if (base == ZERO) {
__ Bind(&info->pc_rel_label);
}
- __ Ori(out, out, /* placeholder */ 0x5678);
- // Add a 32-bit offset to PC.
+ // Add the high half of a 32-bit offset to PC.
__ Addu(out, out, (base == ZERO) ? RA : base);
}
- __ SetReorder(reordering);
+ // The immediately following instruction will add the sign-extended low half of the 32-bit
+ // offset to `out` (e.g. lw, jialc, addiu).
}
void CodeGeneratorMIPS::MarkGCCard(Register object,
@@ -5159,7 +5162,8 @@ void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invo
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- bool has_extra_input = invoke->HasPcRelativeDexCache();
+ bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
+ bool has_extra_input = invoke->HasPcRelativeDexCache() && !is_r6;
IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -5200,12 +5204,13 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
if (kEmitCompilerReadBarrier) {
UNIMPLEMENTED(FATAL) << "for read barrier";
}
- // We disable PC-relative load when there is an irreducible loop, as the optimization
+ // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
// with irreducible loops.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
- bool fallback_load = has_irreducible_loops;
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
DCHECK(!GetCompilerOptions().GetCompilePic());
@@ -5238,10 +5243,11 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
if (kEmitCompilerReadBarrier) {
UNIMPLEMENTED(FATAL) << "for read barrier";
}
- // We disable pc-relative load when there is an irreducible loop, as the optimization
+ // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
- bool fallback_load = has_irreducible_loops;
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_class_load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
fallback_load = false;
@@ -5259,6 +5265,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
+ // TODO: implement.
fallback_load = true;
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
@@ -5273,6 +5280,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
Register temp) {
+ CHECK(!GetInstructionSetFeatures().IsR6());
CHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
if (!invoke->GetLocations()->Intrinsified()) {
@@ -5301,13 +5309,13 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticO
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
- // We disable PC-relative load when there is an irreducible loop, as the optimization
+ // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
- bool fallback_load = true;
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ bool fallback_load = has_irreducible_loops && !is_r6;
switch (dispatch_info.method_load_kind) {
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- fallback_load = has_irreducible_loops;
break;
default:
fallback_load = false;
@@ -5325,7 +5333,8 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
- Register base_reg = invoke->HasPcRelativeDexCache()
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ Register base_reg = (invoke->HasPcRelativeDexCache() && !is_r6)
? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
: ZERO;
@@ -5346,14 +5355,23 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
- HMipsDexCacheArraysBase* base =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
- int32_t offset =
- invoke->GetDexCacheArrayOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ if (is_r6) {
+ uint32_t offset = invoke->GetDexCacheArrayOffset();
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
+ bool reordering = __ SetReorder(false);
+ EmitPcRelativeAddressPlaceholderHigh(info, TMP, ZERO);
+ __ Lw(temp.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
+ } else {
+ HMipsDexCacheArraysBase* base =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
+ int32_t offset =
+ invoke->GetDexCacheArrayOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
+ __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
+ }
break;
- }
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
Register reg = temp.AsRegister<Register>();
@@ -5546,7 +5564,10 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
@@ -5562,8 +5583,10 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, 0);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
generate_null_check = true;
break;
}
@@ -5678,7 +5701,10 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -5694,8 +5720,10 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, 0);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
@@ -6894,8 +6922,12 @@ void InstructionCodeGeneratorMIPS::VisitMipsDexCacheArraysBase(HMipsDexCacheArra
Register reg = base->GetLocations()->Out().AsRegister<Register>();
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
+ CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
+ bool reordering = __ SetReorder(false);
// TODO: Reuse MipsComputeBaseMethodAddress on R2 instead of passing ZERO to force emitting NAL.
- codegen_->EmitPcRelativeAddressPlaceholder(info, reg, ZERO);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, reg, ZERO);
+ __ Addiu(reg, reg, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
}
void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index c8fd325999..e92eeef88f 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -463,7 +463,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
- void EmitPcRelativeAddressPlaceholder(PcRelativePatchInfo* info, Register out, Register base);
+ void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 192b4a5050..e96e3d75e1 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3117,14 +3117,6 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
Location root,
GpuRegister obj,
uint32_t offset) {
- // When handling PC-relative loads, the caller calls
- // EmitPcRelativeAddressPlaceholderHigh() and then GenerateGcRootFieldLoad().
- // The relative patcher expects the two methods to emit the following patchable
- // sequence of instructions in this case:
- // auipc reg1, 0x1234 // 0x1234 is a placeholder for offset_high.
- // lwu reg2, 0x5678(reg1) // 0x5678 is a placeholder for offset_low.
- // TODO: Adjust GenerateGcRootFieldLoad() and its caller when this method is
- // extended (e.g. for read barriers) so as not to break the relative patcher.
GpuRegister root_reg = root.AsRegister<GpuRegister>();
if (kEmitCompilerReadBarrier) {
UNIMPLEMENTED(FATAL) << "for read barrier";
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 52b780c106..5ba8912134 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -115,12 +115,11 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
- return Primitive::Is64BitType(type)
+ Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED,
+ bool is_instance) const OVERRIDE {
+ return is_instance
? Location::RegisterLocation(A2)
- : (is_instance
- ? Location::RegisterLocation(A2)
- : Location::RegisterLocation(A1));
+ : Location::RegisterLocation(A1);
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(F0);
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e3f3df0ff5..763d6da6f5 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -1067,6 +1067,39 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
}
#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+// Regression test for b/34760542.
+TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
+ std::unique_ptr<const Arm64InstructionSetFeatures> features(
+ Arm64InstructionSetFeatures::FromCppDefines());
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+ arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
+
+ codegen.Initialize();
+
+ // The following ParallelMove used to fail this assertion:
+ //
+ // Assertion failed (!available->IsEmpty())
+ //
+ // in vixl::aarch64::UseScratchRegisterScope::AcquireNextAvailable.
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ move->AddMove(Location::DoubleStackSlot(0),
+ Location::DoubleStackSlot(257),
+ Primitive::kPrimDouble,
+ nullptr);
+ move->AddMove(Location::DoubleStackSlot(257),
+ Location::DoubleStackSlot(0),
+ Primitive::kPrimDouble,
+ nullptr);
+ codegen.GetMoveResolver()->EmitNativeCode(move);
+
+ InternalCodeAllocator code_allocator;
+ codegen.Finalize(&code_allocator);
+}
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 04a4294c48..7734f9197d 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -47,7 +47,7 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
// Computing the dex cache base for PC-relative accesses will clobber RA with
// the NAL instruction on R2. Take a note of this before generating the method
// entry.
- if (!dex_cache_array_bases_.empty() && !codegen_->GetInstructionSetFeatures().IsR6()) {
+ if (!dex_cache_array_bases_.empty()) {
codegen_->ClobberRA();
}
}
@@ -92,6 +92,11 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
};
void DexCacheArrayFixups::Run() {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_);
+ if (mips_codegen->GetInstructionSetFeatures().IsR6()) {
+ // Do nothing for R6 because it has PC-relative addressing.
+ return;
+ }
if (graph_->HasIrreducibleLoops()) {
// Do not run this optimization, as irreducible loops do not work with an instruction
// that can be live-in at the irreducible loop header.
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 3973985338..5539413aad 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -57,14 +57,18 @@ static bool IsIntAndGet(HInstruction* instruction, int64_t* value) {
return false;
}
-/** Returns b^e for b,e >= 1. */
-static int64_t IntPow(int64_t b, int64_t e) {
+/** Returns b^e for b,e >= 1. Sets overflow if arithmetic wrap-around occurred. */
+static int64_t IntPow(int64_t b, int64_t e, /*out*/ bool* overflow) {
DCHECK_GE(b, 1);
DCHECK_GE(e, 1);
int64_t pow = 1;
while (e) {
if (e & 1) {
+ int64_t oldpow = pow;
pow *= b;
+ if (pow < oldpow) {
+ *overflow = true;
+ }
}
e >>= 1;
b *= b;
@@ -1020,20 +1024,27 @@ bool InductionVarRange::GenerateLastValueGeometric(HInductionVarAnalysis::Induct
HInstruction* opb = nullptr;
if (GenerateCode(info->op_a, nullptr, graph, block, &opa, false, false) &&
GenerateCode(info->op_b, nullptr, graph, block, &opb, false, false)) {
- // Compute f ^ m for known maximum index value m.
- int64_t fpow = IntPow(f, m);
if (graph != nullptr) {
- DCHECK(info->operation == HInductionVarAnalysis::kMul ||
- info->operation == HInductionVarAnalysis::kDiv);
Primitive::Type type = info->type;
+ // Compute f ^ m for known maximum index value m.
+ bool overflow = false;
+ int64_t fpow = IntPow(f, m, &overflow);
+ if (info->operation == HInductionVarAnalysis::kDiv) {
+ // For division, any overflow truncates to zero.
+ if (overflow || (type != Primitive::kPrimLong && !CanLongValueFitIntoInt(fpow))) {
+ fpow = 0;
+ }
+ } else if (type != Primitive::kPrimLong) {
+ // For multiplication, okay to truncate to required precision.
+ DCHECK(info->operation == HInductionVarAnalysis::kMul);
+ fpow = static_cast<int32_t>(fpow);
+ }
+ // Generate code.
if (fpow == 0) {
// Special case: repeated mul/div always yields zero.
*result = graph->GetConstant(type, 0);
} else {
// Last value: a * f ^ m + b or a * f ^ -m + b.
- if (type != Primitive::kPrimLong) {
- fpow = static_cast<int32_t>(fpow); // okay to truncate
- }
HInstruction* e = nullptr;
if (info->operation == HInductionVarAnalysis::kMul) {
e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 7772e8f973..22f0646fd0 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -198,9 +198,9 @@ static uint32_t FindMethodIndexIn(ArtMethod* method,
}
static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
- const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ const DexCompilationUnit& compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile& dex_file = *compilation_unit.GetDexFile();
dex::TypeIndex index;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
@@ -209,22 +209,19 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
DCHECK(cls->IsProxyClass()) << cls->PrettyClass();
// TODO: deal with proxy classes.
} else if (IsSameDexFile(cls->GetDexFile(), dex_file)) {
- DCHECK_EQ(cls->GetDexCache(), dex_cache.Get());
+ DCHECK_EQ(cls->GetDexCache(), compilation_unit.GetDexCache().Get());
index = cls->GetDexTypeIndex();
- // Update the dex cache to ensure the class is in. The generated code will
- // consider it is. We make it safe by updating the dex cache, as other
- // dex files might also load the class, and there is no guarantee the dex
- // cache of the dex file of the class will be updated.
- if (dex_cache->GetResolvedType(index) == nullptr) {
- dex_cache->SetResolvedType(index, cls);
- }
} else {
index = cls->FindTypeIndexInOtherDexFile(dex_file);
- // We cannot guarantee the entry in the dex cache will resolve to the same class,
+ // We cannot guarantee the entry will resolve to the same class,
// as there may be different class loaders. So only return the index if it's
- // the right class in the dex cache already.
- if (index.IsValid() && dex_cache->GetResolvedType(index) != cls) {
- index = dex::TypeIndex::Invalid();
+ // the right class already resolved with the class loader.
+ if (index.IsValid()) {
+ ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType(
+ index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
+ if (resolved != cls) {
+ index = dex::TypeIndex::Invalid();
+ }
}
}
@@ -451,9 +448,8 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
dex::TypeIndex class_index = FindClassIndexIn(
- GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
@@ -496,6 +492,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -586,7 +583,6 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
bool one_target_inlined = false;
@@ -608,8 +604,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- dex::TypeIndex class_index = FindClassIndexIn(
- handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_);
HInstruction* return_replacement = nullptr;
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
@@ -665,6 +660,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guards typed.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -859,6 +855,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
// Run type propagation to get the guard typed.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -927,6 +924,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
// Actual return value has a more specific type than the method's declared
// return type. Run RTP again on the outer graph to propagate it.
ReferenceTypePropagation(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
@@ -1179,7 +1177,11 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex
/* dex_pc */ 0);
if (iget->GetType() == Primitive::kPrimNot) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
- ReferenceTypePropagation rtp(graph_, dex_cache, handles_, /* is_first_run */ false);
+ ReferenceTypePropagation rtp(graph_,
+ outer_compilation_unit_.GetClassLoader(),
+ dex_cache,
+ handles_,
+ /* is_first_run */ false);
rtp.Visit(iget);
}
return iget;
@@ -1225,7 +1227,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDeclaringClass()->GetClassLoader()));
DexCompilationUnit dex_compilation_unit(
- class_loader.ToJObject(),
+ class_loader,
class_linker,
callee_dex_file,
code_item,
@@ -1341,6 +1343,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// are more specific than the declared ones, run RTP again on the inner graph.
if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
ReferenceTypePropagation(callee_graph,
+ outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index cac385ce3c..3d911d77ba 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -668,11 +668,10 @@ static InvokeType GetInvokeTypeFromOpCode(Instruction::Code opcode) {
ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
// We fetch the referenced class eagerly (that is, the class pointed by in the MethodId
// at method_idx), as `CanAccessResolvedMethod` expects it be be in the dex cache.
@@ -1284,9 +1283,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
static mirror::Class* GetClassFrom(CompilerDriver* driver,
const DexCompilationUnit& compilation_unit) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(compilation_unit.GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
@@ -1302,10 +1299,9 @@ mirror::Class* HInstructionBuilder::GetCompilingClass() const {
bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1343,10 +1339,8 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
ArtField* resolved_field = compiler_driver_->ResolveField(
soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
@@ -1357,6 +1351,7 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
return true;
}
+ StackHandleScope<2> hs(soa.Self());
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1635,9 +1630,7 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
const DexCompilationUnit* compilation_unit =
outer ? outer_compilation_unit_ : dex_compilation_unit_;
const DexFile& dex_file = *compilation_unit->GetDexFile();
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit));
@@ -1692,17 +1685,9 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
}
}
-bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index,
- Handle<mirror::DexCache> dex_cache,
- bool* finalizable) const {
- return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index, finalizable);
-}
-
bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const {
- ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- return NeedsAccessCheck(type_index, dex_cache, finalizable);
+ return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
+ LookupReferrerClass(), LookupResolvedType(type_index, *dex_compilation_unit_), finalizable);
}
bool HInstructionBuilder::CanDecodeQuickenedInfo() const {
@@ -2742,4 +2727,18 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
return true;
} // NOLINT(readability/fn_size)
+ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
+ dex::TypeIndex type_index,
+ const DexCompilationUnit& compilation_unit) const {
+ return ClassLinker::LookupResolvedType(
+ type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
+}
+
+ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
+ // TODO: Cache the result in a Handle<mirror::Class>.
+ const DexFile::MethodId& method_id =
+ dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
+ return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
+}
+
} // namespace art
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 5efe95094c..6e3b078dbb 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -103,11 +103,8 @@ class HInstructionBuilder : public ValueObject {
// Returns whether the current method needs access check for the type.
// Output parameter finalizable is set to whether the type is finalizable.
- bool NeedsAccessCheck(dex::TypeIndex type_index,
- Handle<mirror::DexCache> dex_cache,
- /*out*/bool* finalizable) const
+ bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const
REQUIRES_SHARED(Locks::mutator_lock_);
- bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const;
template<typename T>
void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
@@ -290,6 +287,12 @@ class HInstructionBuilder : public ValueObject {
// not be resolved.
ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type);
+ ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_index,
+ const DexCompilationUnit& compilation_unit) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
+
ArenaAllocator* const arena_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 91d9c56d10..1a10173ed7 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -514,6 +514,18 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
__ Vsqrt(OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
+void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ CreateFPToFPLocations(arena_, invoke);
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+ ArmVIXLAssembler* assembler = GetAssembler();
+ __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
CreateIntToIntLocations(arena_, invoke);
}
@@ -2772,7 +2784,6 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinLongLong)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxLongLong)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRint)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 297500b12f..dad87e3d9e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -305,7 +305,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const OVERRIDE;
@@ -374,7 +374,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -871,7 +871,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -942,11 +942,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
const uint8_t* interpreter_metadata = nullptr;
if (method == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(class_loader)));
method = compiler_driver->ResolveMethod(
- soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
+ soa, dex_cache, class_loader, &dex_compilation_unit, method_idx, invoke_type);
}
// For AOT compilation, we may not get a method, for example if its class is erroneous.
// JIT should always have a method.
@@ -955,16 +952,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
graph->SetArtMethod(method);
ScopedObjectAccess soa(Thread::Current());
interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize());
- dex::TypeIndex type_index = method->GetDeclaringClass()->GetDexTypeIndex();
-
- // Update the dex cache if the type is not in it yet. Note that under AOT,
- // the verifier must have set it, but under JIT, there's no guarantee, as we
- // don't necessarily run the verifier.
- // The compiler and the compiler driver assume the compiling class is
- // in the dex cache.
- if (dex_cache->GetResolvedType(type_index) == nullptr) {
- dex_cache->SetResolvedType(type_index, method->GetDeclaringClass());
- }
}
std::unique_ptr<CodeGenerator> codegen(
@@ -1044,7 +1031,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject jclass_loader,
+ Handle<mirror::ClassLoader> jclass_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
@@ -1139,7 +1126,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
DCHECK(method->IsCompilable());
- jobject jclass_loader = class_loader.ToJObject();
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
@@ -1163,7 +1149,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_cache,
method,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index b02f2509ab..be4857a49a 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -66,11 +66,13 @@ ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetThrowabl
class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
public:
RTPVisitor(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
HandleCache* handle_cache,
ArenaVector<HInstruction*>* worklist,
bool is_first_run)
: HGraphDelegateVisitor(graph),
+ class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handle_cache),
worklist_(worklist),
@@ -102,6 +104,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
bool is_exact);
private:
+ Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> hint_dex_cache_;
HandleCache* handle_cache_;
ArenaVector<HInstruction*>* worklist_;
@@ -109,11 +112,13 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
};
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
const char* name)
: HOptimization(graph, name),
+ class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
@@ -148,7 +153,12 @@ void ReferenceTypePropagation::ValidateTypes() {
}
void ReferenceTypePropagation::Visit(HInstruction* instruction) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ RTPVisitor visitor(graph_,
+ class_loader_,
+ hint_dex_cache_,
+ &handle_cache_,
+ &worklist_,
+ is_first_run_);
instruction->Accept(&visitor);
}
@@ -322,7 +332,12 @@ void ReferenceTypePropagation::Run() {
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ RTPVisitor visitor(graph_,
+ class_loader_,
+ hint_dex_cache_,
+ &handle_cache_,
+ &worklist_,
+ is_first_run_);
// Handle Phis first as there might be instructions in the same block who depend on them.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
VisitPhi(it.Current()->AsPhi());
@@ -542,9 +557,10 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
DCHECK_EQ(instr->GetType(), Primitive::kPrimNot);
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
- // Get type from dex cache assuming it was populated by the verifier.
- SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
+ ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
+ ObjPtr<mirror::Class> klass =
+ ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get());
+ SetClassAsTypeInfo(instr, klass, is_exact);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
@@ -557,25 +573,13 @@ void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
}
-static mirror::Class* GetClassFromDexCache(Thread* self,
- const DexFile& dex_file,
- dex::TypeIndex type_idx,
- Handle<mirror::DexCache> hint_dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
- // Get type from dex cache assuming it was populated by the verifier.
- return dex_cache->GetResolvedType(type_idx);
-}
-
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
- instr->GetDexFile(),
- instr->GetTypeIndex(),
- hint_dex_cache_);
- SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
+ UpdateReferenceTypeInfo(instr,
+ instr->GetTypeIndex(),
+ instr->GetDexFile(),
+ /* is_exact */ false);
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 4663471729..215e96786b 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -33,6 +33,7 @@ namespace art {
class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
@@ -105,6 +106,8 @@ class ReferenceTypePropagation : public HOptimization {
void ValidateTypes();
+ Handle<mirror::ClassLoader> class_loader_;
+
// Note: hint_dex_cache_ is usually, but not necessarily, the dex cache associated with
// graph_->GetDexFile(). Since we may look up also in other dex files, it's used only
// as a hint, to reduce the number of calls to the costly ClassLinker::FindDexCache().
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index b061c871b0..84a4bab1a9 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -38,6 +38,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
+ Handle<mirror::ClassLoader>(),
Handle<mirror::DexCache>(),
handles,
true,
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ae1e369999..d6edb650ba 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -497,7 +497,11 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// 4) Compute type of reference type instructions. The pass assumes that
// NullConstant has been fixed up.
- ReferenceTypePropagation(graph_, dex_cache_, handles_, /* is_first_run */ true).Run();
+ ReferenceTypePropagation(graph_,
+ class_loader_,
+ dex_cache_,
+ handles_,
+ /* is_first_run */ true).Run();
// 5) HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 45dac54115..978f113ec4 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -48,9 +48,11 @@ namespace art {
class SsaBuilder : public ValueObject {
public:
SsaBuilder(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
+ class_loader_(class_loader),
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
@@ -115,6 +117,7 @@ class SsaBuilder : public ValueObject {
void RemoveRedundantUninitializedStrings();
HGraph* graph_;
+ Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> dex_cache_;
VariableSizedHandleScope* const handles_;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e8a92c1914..19f0f1c182 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -277,7 +277,6 @@ NO_RETURN static void Usage(const char* fmt, ...) {
"|balanced"
"|speed-profile"
"|speed"
- "|layout-profile"
"|everything-profile"
"|everything):");
UsageError(" select compiler filter.");
@@ -1540,9 +1539,9 @@ class Dex2Oat FINAL {
std::unique_ptr<MemMap> opened_dex_files_map;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// No need to verify the dex file for:
- // 1) dexlayout, which already verified it
+ // 1) kSpeedProfile, since it includes dexlayout, which does the verification.
// 2) when we have a vdex file, which means it was already verified.
- bool verify = compiler_options_->GetCompilerFilter() != CompilerFilter::kLayoutProfile &&
+ bool verify = compiler_options_->GetCompilerFilter() != CompilerFilter::kSpeedProfile &&
(input_vdex_file_ == nullptr);
if (!oat_writers_[i]->WriteAndOpenDexFiles(
kIsVdexEnabled ? vdex_files_[i].get() : oat_files_[i].get(),
@@ -2349,7 +2348,7 @@ class Dex2Oat FINAL {
compiler_options_.get(),
oat_file.get()));
elf_writers_.back()->Start();
- bool do_dexlayout = compiler_options_->GetCompilerFilter() == CompilerFilter::kLayoutProfile;
+ bool do_dexlayout = compiler_options_->GetCompilerFilter() == CompilerFilter::kSpeedProfile;
oat_writers_.emplace_back(new OatWriter(
IsBootImage(), timings_, do_dexlayout ? profile_compilation_info_.get() : nullptr));
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index e86e560b1a..c2275aca95 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -125,7 +125,7 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
class_path = OatFile::kSpecialSharedLibrary;
}
argv.push_back(class_path);
- if (runtime->IsDebuggable()) {
+ if (runtime->IsJavaDebuggable()) {
argv.push_back("--debuggable");
}
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
@@ -591,7 +591,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum());
const std::vector<std::string>& extra_args = { "--profile-file=" + profile_location };
- GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kLayoutProfile, extra_args);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile, extra_args);
CheckValidity();
ASSERT_TRUE(success_);
@@ -632,7 +632,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
EXPECT_EQ(old_class1, new_class0);
}
- EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kLayoutProfile);
+ EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
}
// Check whether the dex2oat run was really successful.
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index b6da6c13f3..9b4d3e1156 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2180,9 +2180,14 @@ class ImageDumper {
ScopedIndentation indent2(&state->vios_);
auto* resolved_types = dex_cache->GetResolvedTypes();
for (size_t i = 0; i < num_types; ++i) {
- auto* elem = resolved_types[i].Read();
+ auto pair = resolved_types[i].load(std::memory_order_relaxed);
size_t run = 0;
- for (size_t j = i + 1; j != num_types && elem == resolved_types[j].Read(); ++j) {
+ for (size_t j = i + 1; j != num_types; ++j) {
+ auto other_pair = resolved_types[j].load(std::memory_order_relaxed);
+ if (pair.index != other_pair.index ||
+ pair.object.Read() != other_pair.object.Read()) {
+ break;
+ }
++run;
}
if (run == 0) {
@@ -2192,12 +2197,13 @@ class ImageDumper {
i = i + run;
}
std::string msg;
+ auto* elem = pair.object.Read();
if (elem == nullptr) {
msg = "null";
} else {
msg = elem->PrettyClass();
}
- os << StringPrintf("%p %s\n", elem, msg.c_str());
+ os << StringPrintf("%p %u %s\n", elem, pair.index, msg.c_str());
}
}
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 9a73830f99..2546822613 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -643,8 +643,8 @@ void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots
if (orig_strings != nullptr) {
orig_dex_cache->FixupStrings(RelocatedCopyOf(orig_strings), RelocatedPointerVisitor(this));
}
- GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
- GcRoot<mirror::Class>* relocated_types = RelocatedAddressOfPointer(orig_types);
+ mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* relocated_types = RelocatedAddressOfPointer(orig_types);
copy_dex_cache->SetField64<false>(
mirror::DexCache::ResolvedTypesOffset(),
static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_types)));
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 196c65e11a..540df5a554 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -186,6 +186,7 @@ cc_defaults {
"reflection.cc",
"runtime.cc",
"runtime_callbacks.cc",
+ "runtime_common.cc",
"runtime_options.cc",
"signal_catcher.cc",
"stack.cc",
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 76218fb542..2d5eca003d 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1450,316 +1450,83 @@ ENTRY art_quick_aput_obj
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
- /*
- * Called by managed code to resolve a static field and load a boolean primitive value.
- */
- .extern artGetBooleanStaticFromCode
-ENTRY art_quick_get_boolean_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetBooleanStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_static
- /*
- * Called by managed code to resolve a static field and load a byte primitive value.
- */
- .extern artGetByteStaticFromCode
-ENTRY art_quick_get_byte_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetByteStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_static
-
- /*
- * Called by managed code to resolve a static field and load a char primitive value.
- */
- .extern artGetCharStaticFromCode
-ENTRY art_quick_get_char_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetCharStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_static
- /*
- * Called by managed code to resolve a static field and load a short primitive value.
- */
- .extern artGetShortStaticFromCode
-ENTRY art_quick_get_short_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetShortStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_static
-
- /*
- * Called by managed code to resolve a static field and load a 32-bit primitive value.
- */
- .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet32StaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static
-
- /*
- * Called by managed code to resolve a static field and load a 64-bit primitive value.
- */
- .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet64StaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static
-
- /*
- * Called by managed code to resolve a static field and load an object reference.
- */
- .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetObjStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static
-
- /*
- * Called by managed code to resolve an instance field and load a boolean primitive value.
- */
- .extern artGetBooleanInstanceFromCode
-ENTRY art_quick_get_boolean_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetBooleanInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_instance
- /*
- * Called by managed code to resolve an instance field and load a byte primitive value.
- */
- .extern artGetByteInstanceFromCode
-ENTRY art_quick_get_byte_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetByteInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_instance
-
- /*
- * Called by managed code to resolve an instance field and load a char primitive value.
- */
- .extern artGetCharInstanceFromCode
-ENTRY art_quick_get_char_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetCharInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_instance
- /*
- * Called by managed code to resolve an instance field and load a short primitive value.
- */
- .extern artGetShortInstanceFromCode
-ENTRY art_quick_get_short_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetShortInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 32-bit primitive value.
- */
- .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet32InstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 64-bit primitive value.
- */
- .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet64InstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance
-
- /*
- * Called by managed code to resolve an instance field and load an object reference.
- */
- .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetObjInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance
-
- /*
- * Called by managed code to resolve a static field and store a 8-bit primitive value.
- */
- .extern artSet8StaticFromCode
-ENTRY art_quick_set8_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet8StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_static
-
- /*
- * Called by managed code to resolve a static field and store a 16-bit primitive value.
- */
- .extern artSet16StaticFromCode
-ENTRY art_quick_set16_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet16StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*, $sp)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_static
-
- /*
- * Called by managed code to resolve a static field and store a 32-bit primitive value.
- */
- .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet32StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_static
-
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- lw $a1, 0($sp) # pass referrer's Method*
- # 64 bit new_val is in a2:a3 pair
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet64StaticFromCode
- jalr $t9 # (field_idx, referrer, new_val, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_static
-
- /*
- * Called by managed code to resolve a static field and store an object reference.
- */
- .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSetObjStaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_static
-
- /*
- * Called by managed code to resolve an instance field and store a 8-bit primitive value.
- */
- .extern artSet8InstanceFromCode
-ENTRY art_quick_set8_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet8InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_instance
+// Macros taking opportunity of code similarities for downcalls.
+.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 16-bit primitive value.
- */
- .extern artSet16InstanceFromCode
-ENTRY art_quick_set16_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet16InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_instance
+.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, Thread*) or
+ # (field_idx, new_val, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 32-bit primitive value.
- */
- .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet32InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_instance
+.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
- */
- .extern artSet64InstanceFromCode
-ENTRY art_quick_set64_instance
- lw $t1, 0($sp) # load referrer's Method*
- # 64 bit new_val is in a2:a3 pair
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- sw rSELF, 20($sp) # pass Thread::Current
- la $t9, artSet64InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw $t1, 16($sp) # pass referrer's Method*
- RETURN_IF_ZERO
-END art_quick_set64_instance
+.macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, 64-bit new_val, Thread*) or
+ # (field_idx, 64-bit new_val, Thread*)
+ # Note that a 64-bit new_val needs to be aligned with
+ # an even-numbered register, hence A1 may be skipped
+ # for new_val to reside in A2-A3.
+ sw rSELF, 16($sp) # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
/*
- * Called by managed code to resolve an instance field and store an object reference.
+ * Called by managed code to resolve a static/instance field and load/store a value.
*/
- .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSetObjInstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
+FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
+FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index b53fd100fa..f3629d90d3 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1416,296 +1416,77 @@ ENTRY art_quick_aput_obj
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
- /*
- * Called by managed code to resolve a static field and load a boolean primitive value.
- */
- .extern artGetBooleanStaticFromCode
-ENTRY art_quick_get_boolean_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_static
-
- /*
- * Called by managed code to resolve a static field and load a byte primitive value.
- */
- .extern artGetByteStaticFromCode
-ENTRY art_quick_get_byte_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_static
-
- /*
- * Called by managed code to resolve a static field and load a char primitive value.
- */
- .extern artGetCharStaticFromCode
-ENTRY art_quick_get_char_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_static
-
- /*
- * Called by managed code to resolve a static field and load a short primitive value.
- */
- .extern artGetShortStaticFromCode
-ENTRY art_quick_get_short_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_static
-
- /*
- * Called by managed code to resolve a static field and load a 32-bit primitive value.
- */
- .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- sll $v0, $v0, 0 # sign-extend result
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static
-
- /*
- * Called by managed code to resolve a static field and load a 64-bit primitive value.
- */
- .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static
-
- /*
- * Called by managed code to resolve a static field and load an object reference.
- */
- .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static
-
- /*
- * Called by managed code to resolve an instance field and load a boolean primitive value.
- */
- .extern artGetBooleanInstanceFromCode
-ENTRY art_quick_get_boolean_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_instance
-
- /*
- * Called by managed code to resolve an instance field and load a byte primitive value.
- */
- .extern artGetByteInstanceFromCode
-ENTRY art_quick_get_byte_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_instance
-
- /*
- * Called by managed code to resolve an instance field and load a char primitive value.
- */
- .extern artGetCharInstanceFromCode
-ENTRY art_quick_get_char_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_instance
-
- /*
- * Called by managed code to resolve an instance field and load a short primitive value.
- */
- .extern artGetShortInstanceFromCode
-ENTRY art_quick_get_short_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 32-bit primitive value.
- */
- .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- sll $v0, $v0, 0 # sign-extend result
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 64-bit primitive value.
- */
- .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance
-
- /*
- * Called by managed code to resolve an instance field and load an object reference.
- */
- .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance
-
- /*
- * Called by managed code to resolve a static field and store a 8-bit primitive value.
- */
- .extern artSet8StaticFromCode
-ENTRY art_quick_set8_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_static
-
- /*
- * Called by managed code to resolve a static field and store a 16-bit primitive value.
- */
- .extern artSet16StaticFromCode
-ENTRY art_quick_set16_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_static
-
- /*
- * Called by managed code to resolve a static field and store a 32-bit primitive value.
- */
- .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_static
-
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- # a2 contains the new val
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_static
-
- /*
- * Called by managed code to resolve a static field and store an object reference.
- */
- .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_static
-
- /*
- * Called by managed code to resolve an instance field and store a 8-bit primitive value.
- */
- .extern artSet8InstanceFromCode
-ENTRY art_quick_set8_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 16-bit primitive value.
- */
- .extern artSet16InstanceFromCode
-ENTRY art_quick_set16_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_instance
+// Macros taking opportunity of code similarities for downcalls.
+.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 32-bit primitive value.
- */
- .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_instance
+.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, Thread*) or
+ # (field_idx, new_val, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
- */
- .extern artSet64InstanceFromCode
-ENTRY art_quick_set64_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_instance
+.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
/*
- * Called by managed code to resolve an instance field and store an object reference.
+ * Called by managed code to resolve a static/instance field and load/store a value.
*/
- .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 80af8e7bde..16b73c681f 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -311,6 +311,8 @@ inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) {
template <bool kResolve>
inline ObjPtr<mirror::Class> ArtField::GetType() {
+ // TODO: Refactor this function into two functions, ResolveType() and LookupType()
+ // so that we can properly annotate it with no-suspension possible / suspension possible.
const uint32_t field_index = GetDexFieldIndex();
ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
if (UNLIKELY(declaring_class->IsProxyClass())) {
@@ -320,9 +322,16 @@ inline ObjPtr<mirror::Class> ArtField::GetType() {
const DexFile* const dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(field_id.type_idx_);
- if (kResolve && UNLIKELY(type == nullptr)) {
- type = ResolveGetType(field_id.type_idx_);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ if (UNLIKELY(type == nullptr)) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (kResolve) {
+ type = class_linker->ResolveType(*dex_file, field_id.type_idx_, declaring_class);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ } else {
+ type = class_linker->LookupResolvedType(
+ *dex_file, field_id.type_idx_, dex_cache, declaring_class->GetClassLoader());
+ DCHECK(!Thread::Current()->IsExceptionPending());
+ }
}
return type;
}
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index a4a6e5a4fb..7e131040be 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -48,10 +48,6 @@ ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
}
-ObjPtr<mirror::Class> ArtField::ResolveGetType(dex::TypeIndex type_idx) {
- return Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
-}
-
ObjPtr<mirror::String> ArtField::ResolveGetStringName(Thread* self,
const DexFile& dex_file,
dex::StringIndex string_idx,
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 427e103749..75dd981136 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -217,8 +217,6 @@ class ArtField FINAL {
private:
ObjPtr<mirror::Class> ProxyFindSystemClass(const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_);
- ObjPtr<mirror::Class> ResolveGetType(dex::TypeIndex type_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<mirror::String> ResolveGetStringName(Thread* self,
const DexFile& dex_file,
dex::StringIndex string_idx,
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7ec3900aa9..efcdbbff5a 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -175,12 +175,19 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerS
}
inline mirror::Class* ArtMethod::GetClassFromTypeIndex(dex::TypeIndex type_idx, bool resolve) {
+ // TODO: Refactor this function into two functions, Resolve...() and Lookup...()
+ // so that we can properly annotate it with no-suspension possible / suspension possible.
ObjPtr<mirror::DexCache> dex_cache = GetDexCache();
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
- if (UNLIKELY(type == nullptr) && resolve) {
+ if (UNLIKELY(type == nullptr)) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- type = class_linker->ResolveType(type_idx, this);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ if (resolve) {
+ type = class_linker->ResolveType(type_idx, this);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ } else {
+ type = class_linker->LookupResolvedType(
+ *dex_cache->GetDexFile(), type_idx, dex_cache, GetClassLoader());
+ }
}
return type.Ptr();
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index ec789f51ef..61ff41742b 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -55,6 +55,17 @@ extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*,
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
+ArtMethod* ArtMethod::GetNonObsoleteMethod() {
+ DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ if (LIKELY(!IsObsolete())) {
+ return this;
+ } else if (IsDirect()) {
+ return &GetDeclaringClass()->GetDirectMethodsSlice(kRuntimePointerSize)[GetMethodIndex()];
+ } else {
+ return GetDeclaringClass()->GetVTableEntry(GetMethodIndex(), kRuntimePointerSize);
+ }
+}
+
ArtMethod* ArtMethod::GetSingleImplementation(PointerSize pointer_size) {
DCHECK(!IsNative());
if (!IsAbstract()) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index f145d7c416..e4db2c7324 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -570,6 +570,8 @@ class ArtMethod FINAL {
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtMethod* GetNonObsoleteMethod() REQUIRES_SHARED(Locks::mutator_lock_);
+
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2e17dd85e6..e928344fb6 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -68,22 +68,28 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self,
inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx.index_, declaring_class->GetDexFile().NumStringIds());
- ObjPtr<mirror::String> string =
- mirror::StringDexCachePair::Lookup(declaring_class->GetDexCache()->GetStrings(),
- string_idx.index_,
- mirror::DexCache::kDexCacheStringCacheSize).Read();
+ ObjPtr<mirror::String> string = referrer->GetDexCache()->GetResolvedString(string_idx);
if (UNLIKELY(string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
string = ResolveString(dex_file, string_idx, dex_cache);
}
return string.Ptr();
}
+inline ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(
+ dex::TypeIndex type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
+ if (type == nullptr) {
+ type = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ *dex_cache->GetDexFile(), type_idx, dex_cache, class_loader);
+ }
+ return type;
+}
+
inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
if (kIsDebugBuild) {
@@ -93,29 +99,10 @@ inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMetho
if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
- }
- return resolved_type.Ptr();
-}
-
-inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtField* referrer) {
- Thread::PoisonObjectPointersIfDebug();
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ObjPtr<mirror::DexCache> dex_cache_ptr = declaring_class->GetDexCache();
- ObjPtr<mirror::Class> resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == nullptr)) {
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type.Ptr();
}
@@ -159,7 +146,7 @@ inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
if (UNLIKELY(resolved_method == nullptr)) {
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile* dex_file = h_dex_cache->GetDexFile();
resolved_method = ResolveMethod<kResolveMode>(*dex_file,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 02b26c6568..866936739a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1171,6 +1171,23 @@ static void CopyNonNull(const T* src, size_t count, T* dst, const NullPred& pred
}
}
+template <typename T>
+static void CopyDexCachePairs(const std::atomic<mirror::DexCachePair<T>>* src,
+ size_t count,
+ std::atomic<mirror::DexCachePair<T>>* dst) {
+ DCHECK_NE(count, 0u);
+ DCHECK(!src[0].load(std::memory_order_relaxed).object.IsNull() ||
+ src[0].load(std::memory_order_relaxed).index != 0u);
+ for (size_t i = 0; i < count; ++i) {
+ DCHECK_EQ(dst[i].load(std::memory_order_relaxed).index, 0u);
+ DCHECK(dst[i].load(std::memory_order_relaxed).object.IsNull());
+ mirror::DexCachePair<T> source = src[i].load(std::memory_order_relaxed);
+ if (source.index != 0u || !source.object.IsNull()) {
+ dst[i].store(source, std::memory_order_relaxed);
+ }
+ }
+}
+
bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1224,7 +1241,10 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
- const size_t num_types = dex_file->NumTypeIds();
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (dex_file->NumTypeIds() < num_types) {
+ num_types = dex_file->NumTypeIds();
+ }
const size_t num_methods = dex_file->NumMethodIds();
const size_t num_fields = dex_file->NumFieldIds();
size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
@@ -1243,28 +1263,14 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
mirror::StringDexCacheType* const image_resolved_strings = dex_cache->GetStrings();
mirror::StringDexCacheType* const strings =
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- for (size_t j = 0; j < num_strings; ++j) {
- DCHECK_EQ(strings[j].load(std::memory_order_relaxed).index, 0u);
- DCHECK(strings[j].load(std::memory_order_relaxed).object.IsNull());
- strings[j].store(image_resolved_strings[j].load(std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
- mirror::StringDexCachePair::Initialize(strings);
+ CopyDexCachePairs(image_resolved_strings, num_strings, strings);
dex_cache->SetStrings(strings);
}
if (num_types != 0u) {
- GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes();
- GcRoot<mirror::Class>* const types =
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
- for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) {
- DCHECK(types[j].IsNull());
- }
- CopyNonNull(image_resolved_types,
- num_types,
- types,
- [](const GcRoot<mirror::Class>& elem) {
- return elem.IsNull();
- });
+ mirror::TypeDexCacheType* const image_resolved_types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types =
+ reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
+ CopyDexCachePairs(image_resolved_types, num_types, types);
dex_cache->SetResolvedTypes(types);
}
if (num_methods != 0u) {
@@ -1305,15 +1311,7 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
mirror::MethodTypeDexCacheType* const method_types =
reinterpret_cast<mirror::MethodTypeDexCacheType*>(
raw_arrays + layout.MethodTypesOffset());
- for (size_t j = 0; j < num_method_types; ++j) {
- DCHECK_EQ(method_types[j].load(std::memory_order_relaxed).index, 0u);
- DCHECK(method_types[j].load(std::memory_order_relaxed).object.IsNull());
- method_types[j].store(
- image_resolved_method_types[j].load(std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
-
- mirror::MethodTypeDexCachePair::Initialize(method_types);
+ CopyDexCachePairs(image_resolved_method_types, num_method_types, method_types);
dex_cache->SetResolvedMethodTypes(method_types);
}
}
@@ -1335,11 +1333,11 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
}
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
- GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
const size_t num_types = dex_cache->NumResolvedTypes();
- for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
+ for (size_t j = 0; j != num_types; ++j) {
// The image space is not yet added to the heap, avoid read barriers.
- ObjPtr<mirror::Class> klass = types[j].Read();
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (space->HasAddress(klass.Ptr())) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
auto it = new_class_set->Find(ClassTable::TableSlot(klass));
@@ -1700,9 +1698,9 @@ bool ClassLinker::AddImageSpace(
// The current dex file field is bogus, overwrite it so that we can get the dex file in the
// loop below.
h_dex_cache->SetDexFile(dex_file.get());
- GcRoot<mirror::Class>* const types = h_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types = h_dex_cache->GetResolvedTypes();
for (int32_t j = 0, num_types = h_dex_cache->NumResolvedTypes(); j < num_types; j++) {
- ObjPtr<mirror::Class> klass = types[j].Read();
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (klass != nullptr) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
}
@@ -2862,9 +2860,12 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
return true;
}
- if (runtime->IsFullyDeoptable()) {
- // We need to be able to deoptimize at any time so we should always just ignore precompiled
- // code and go to the interpreter assuming we don't already have jitted code.
+ if (runtime->IsJavaDebuggable()) {
+ // For simplicity, we ignore precompiled code and go to the interpreter
+ // assuming we don't already have jitted code.
+ // We could look at the oat file where `quick_code` is being defined,
+ // and check whether it's been compiled debuggable, but we decided to
+ // only rely on the JIT for debuggable apps.
jit::Jit* jit = Runtime::Current()->GetJit();
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
@@ -2872,18 +2873,13 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
- // since we want to JIT it with extra stackmaps for native debugging.
- // On the other hand, keep all AOT code from the boot image, since the
- // blocking JIT would results in non-negligible performance impact.
+ // since we want to JIT it (at first use) with extra stackmaps for native
+ // debugging. We keep however all AOT code from the boot image,
+ // since the JIT-at-first-use is blocking and would result in non-negligible
+ // startup performance impact.
return !runtime->GetHeap()->IsInBootImageOatFile(quick_code);
}
- if (Dbg::IsDebuggerActive()) {
- // Boot image classes may be AOT-compiled as non-debuggable.
- // This is not suitable for the Java debugger, so ignore the AOT code.
- return runtime->GetHeap()->IsInBootImageOatFile(quick_code);
- }
-
return false;
}
@@ -7700,7 +7696,9 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
uint32_t utf16_length;
const char* utf8_data = dex_file.StringDataAndUtf16LengthByIdx(string_idx, &utf16_length);
ObjPtr<mirror::String> string = intern_table_->InternStrong(utf16_length, utf8_data);
- dex_cache->SetResolvedString(string_idx, string);
+ if (string != nullptr) {
+ dex_cache->SetResolvedString(string_idx, string);
+ }
return string.Ptr();
}
@@ -7743,6 +7741,7 @@ ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(const DexFile& dex_file,
}
}
if (type != nullptr && type->IsResolved()) {
+ dex_cache->SetResolvedType(type_idx, type);
return type.Ptr();
}
return nullptr;
@@ -7765,6 +7764,12 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == nullptr) {
+ // TODO: Avoid this lookup as it duplicates work done in FindClass(). It is here
+ // as a workaround for FastNative JNI to avoid AssertNoPendingException() when
+ // trying to resolve annotations while an exception may be pending. Bug: 34659969
+ resolved = LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get());
+ }
+ if (resolved == nullptr) {
Thread* self = Thread::Current();
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
resolved = FindClass(self, descriptor, class_loader);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 5042fb7609..21edd513ac 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -262,10 +262,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- mirror::Class* ResolveType(dex::TypeIndex type_idx, ArtField* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
-
// Look up a resolved type with the given ID from the DexFile. The ClassLoader is used to search
// for the type, since it may be referenced from but not contained within the given DexFile.
ObjPtr<mirror::Class> LookupResolvedType(const DexFile& dex_file,
@@ -273,6 +269,10 @@ class ClassLinker {
ObjPtr<mirror::DexCache> dex_cache,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a type with the given ID from the DexFile, storing the
// result in DexCache. The ClassLoader is used to search for the
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 17510bb598..6eee0bd617 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -914,7 +914,7 @@ TEST_F(ClassLinkerTest, LookupResolvedType) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
klass);
// Zero out the resolved type and make sure LookupResolvedType still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
@@ -949,7 +949,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeArray) {
class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
array_klass);
// Zero out the resolved type and make sure LookupResolvedType() still finds it.
- dex_cache->SetResolvedType(array_idx, nullptr);
+ dex_cache->ClearResolvedType(array_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(array_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
@@ -972,7 +972,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
klass.Get());
// Zero out the resolved type and make sure LookupResolvedType still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
@@ -990,7 +990,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
klass.Get());
// Zero out the resolved type and make sure LookupResolvedType() still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index dc89d32f50..cb8c11db96 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -33,7 +33,6 @@ bool CompilerFilter::IsBytecodeCompilationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -53,7 +52,6 @@ bool CompilerFilter::IsJniCompilationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -73,7 +71,6 @@ bool CompilerFilter::IsAnyMethodCompilationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -93,7 +90,6 @@ bool CompilerFilter::IsVerificationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -120,7 +116,6 @@ bool CompilerFilter::DependsOnProfile(Filter filter) {
case CompilerFilter::kVerifyProfile:
case CompilerFilter::kSpaceProfile:
case CompilerFilter::kSpeedProfile:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile: return true;
}
UNREACHABLE();
@@ -145,7 +140,6 @@ CompilerFilter::Filter CompilerFilter::GetNonProfileDependentFilterFrom(Filter f
return CompilerFilter::kSpace;
case CompilerFilter::kSpeedProfile:
- case CompilerFilter::kLayoutProfile:
return CompilerFilter::kSpeed;
case CompilerFilter::kEverythingProfile:
@@ -171,7 +165,6 @@ std::string CompilerFilter::NameOfFilter(Filter filter) {
case CompilerFilter::kTime: return "time";
case CompilerFilter::kSpeedProfile: return "speed-profile";
case CompilerFilter::kSpeed: return "speed";
- case CompilerFilter::kLayoutProfile: return "layout-profile";
case CompilerFilter::kEverythingProfile: return "everything-profile";
case CompilerFilter::kEverything: return "everything";
}
@@ -199,8 +192,6 @@ bool CompilerFilter::ParseCompilerFilter(const char* option, Filter* filter) {
*filter = kSpeed;
} else if (strcmp(option, "speed-profile") == 0) {
*filter = kSpeedProfile;
- } else if (strcmp(option, "layout-profile") == 0) {
- *filter = kLayoutProfile;
} else if (strcmp(option, "everything") == 0) {
*filter = kEverything;
} else if (strcmp(option, "everything-profile") == 0) {
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 7eb5f9a90f..796f4aad0c 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -39,7 +39,6 @@ class CompilerFilter FINAL {
kSpace, // Maximize space savings.
kBalanced, // Good performance return on compilation investment.
kSpeedProfile, // Maximize runtime performance based on profile.
- kLayoutProfile, // Temporary filter for dexlayout. Will be merged with kSpeedProfile.
kSpeed, // Maximize runtime performance.
kEverythingProfile, // Compile everything capable of being compiled based on profile.
kEverything, // Compile everything capable of being compiled.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 22a31635a6..1a0cec075c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -588,29 +588,6 @@ bool Dbg::RequiresDeoptimization() {
return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
}
-// Used to patch boot image method entry point to interpreter bridge.
-class UpdateEntryPointsClassVisitor : public ClassVisitor {
- public:
- explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
- : instrumentation_(instrumentation) {}
-
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
- auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : klass->GetMethods(pointer_size)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
- !m.IsNative() &&
- !m.IsProxyMethod()) {
- instrumentation_->UpdateMethodsCodeFromDebugger(&m, GetQuickToInterpreterBridge());
- }
- }
- return true;
- }
-
- private:
- instrumentation::Instrumentation* const instrumentation_;
-};
-
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
@@ -639,14 +616,16 @@ void Dbg::GoActive() {
}
Runtime* runtime = Runtime::Current();
- // Since boot image code may be AOT compiled as not debuggable, we need to patch
- // entry points of methods in boot image to interpreter bridge.
- // However, the performance cost of this is non-negligible during native-debugging due to the
+ // Best effort deoptimization if the runtime is non-Java debuggable. This happens when
+ // ro.debuggable is set, but the application is not debuggable, or when a standalone
+ // dalvikvm invocation is not passed the debuggable option (-Xcompiler-option --debuggable).
+ //
+ // The performance cost of this is non-negligible during native-debugging due to the
// forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
- if (!runtime->GetInstrumentation()->IsForcedInterpretOnly() && !runtime->IsNativeDebuggable()) {
- ScopedObjectAccess soa(self);
- UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
- runtime->GetClassLinker()->VisitClasses(&visitor);
+ if (!runtime->IsJavaDebuggable() &&
+ !runtime->GetInstrumentation()->IsForcedInterpretOnly() &&
+ !runtime->IsNativeDebuggable()) {
+ runtime->DeoptimizeBootImage();
}
ScopedSuspendAll ssa(__FUNCTION__);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index ac0ce36016..1b267eb991 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -705,10 +705,10 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx,
return resolved_method;
} else if (type == kSuper) {
// TODO This lookup is rather slow.
- dex::TypeIndex method_type_idx =
- referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
- mirror::Class* method_reference_class =
- referrer->GetDexCache()->GetResolvedType(method_type_idx);
+ ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
+ dex::TypeIndex method_type_idx = dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
+ ObjPtr<mirror::Class> method_reference_class = ClassLinker::LookupResolvedType(
+ method_type_idx, dex_cache, referrer->GetClassLoader());
if (method_reference_class == nullptr) {
// Need to do full type resolution...
return nullptr;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index eb76fb6b88..bde9009f7b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -781,15 +781,19 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc.
if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- Dbg::IsForcedInterpreterNeededForUpcall(self, caller) &&
- Runtime::Current()->IsDeoptimizeable(caller_pc))) {
- // Push the context of the deoptimization stack so we can restore the return value and the
- // exception before executing the deoptimized frames.
- self->PushDeoptimizationContext(
- result, shorty[0] == 'L', /* from_code */ false, self->GetException());
-
- // Set special exception to cause deoptimization.
- self->SetException(Thread::GetDeoptimizationException());
+ Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << caller->PrettyMethod();
+ } else {
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(
+ result, shorty[0] == 'L', /* from_code */ false, self->GetException());
+
+ // Set special exception to cause deoptimization.
+ self->SetException(Thread::GetDeoptimizationException());
+ }
}
// No need to restore the args since the method has already been run by the interpreter.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9dc72f0791..0819ba04f7 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -25,6 +25,7 @@
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/gc_pause_listener.h"
#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -139,7 +140,7 @@ void ConcurrentCopying::RunPhases() {
// Verify no from space refs. This causes a pause.
if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
- ScopedPause pause(this);
+ ScopedPause pause(this, false);
CheckEmptyMarkStack();
if (kVerboseMode) {
LOG(INFO) << "Verifying no from-space refs";
@@ -439,8 +440,27 @@ void ConcurrentCopying::FlipThreadRoots() {
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
+
+ // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if
+ // necessary. This is slightly over-reporting, as this includes the time to actually suspend
+ // threads.
+ {
+ GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
+ }
+
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
+
+ {
+ GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
+ }
+
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 01bcb7df19..14fd332b57 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -158,22 +158,26 @@ void GarbageCollector::ResetMeasurements() {
total_freed_bytes_ = 0;
}
-GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector)
- : start_time_(NanoTime()), collector_(collector) {
+GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
+ : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll(__FUNCTION__);
- GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->StartPause();
+ if (with_reporting) {
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
}
}
GarbageCollector::ScopedPause::~ScopedPause() {
collector_->RegisterPause(NanoTime() - start_time_);
Runtime* runtime = Runtime::Current();
- GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->EndPause();
+ if (with_reporting_) {
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
}
runtime->GetThreadList()->ResumeAll();
}
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 0177e2a1ad..95601d736d 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -126,12 +126,14 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
public:
class SCOPED_LOCKABLE ScopedPause {
public:
- explicit ScopedPause(GarbageCollector* collector) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
+ explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
+ EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
~ScopedPause() UNLOCK_FUNCTION();
private:
const uint64_t start_time_;
GarbageCollector* const collector_;
+ bool with_reporting_;
};
GarbageCollector(Heap* heap, const std::string& name);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 70449797c1..f5bf935323 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3352,7 +3352,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
void Heap::PreGcVerification(collector::GarbageCollector* gc) {
if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
- collector::GarbageCollector::ScopedPause pause(gc);
+ collector::GarbageCollector::ScopedPause pause(gc, false);
PreGcVerificationPaused(gc);
}
}
@@ -3420,7 +3420,7 @@ void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
void Heap::PostGcVerification(collector::GarbageCollector* gc) {
if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
- collector::GarbageCollector::ScopedPause pause(gc);
+ collector::GarbageCollector::ScopedPause pause(gc, false);
PostGcVerificationPaused(gc);
}
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e03958d717..e56f0dc613 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1225,9 +1225,9 @@ class ImageSpaceLoader {
}
dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
}
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
if (types != nullptr) {
- GcRoot<mirror::Class>* new_types = fixup_adapter.ForwardObject(types);
+ mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
if (types != new_types) {
dex_cache->SetResolvedTypes(new_types);
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 54b099eb14..87f429568d 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '6', '\0' }; // Erroneous resolved class.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '7', '\0' }; // hash-based DexCache types
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index bbd6d352d3..f11e2cba10 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -105,10 +105,9 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
-bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
+bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::IsDebuggerActive() &&
- Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+ Runtime::Current()->IsJavaDebuggable() &&
!method->IsNative() &&
!method->IsProxyMethod();
}
@@ -132,9 +131,10 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
new_quick_code = GetQuickToInterpreterBridge();
} else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+ if (NeedDebugVersionFor(method)) {
new_quick_code = GetQuickToInterpreterBridge();
+ } else {
+ new_quick_code = class_linker->GetQuickOatCodeFor(method);
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -148,13 +148,14 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
// class, all its static methods code will be set to the instrumentation entry point.
// For more details, see ClassLinker::FixupStaticTrampolines.
if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+ if (NeedDebugVersionFor(method)) {
// Oat code should not be used. Don't install instrumentation stub and
// use interpreter for instrumentation.
new_quick_code = GetQuickToInterpreterBridge();
} else if (entry_exit_stubs_installed_) {
new_quick_code = GetQuickInstrumentationEntryPoint();
+ } else {
+ new_quick_code = class_linker->GetQuickOatCodeFor(method);
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -557,10 +558,8 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
}
Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
- if (interpreter_stubs_installed_ && interpret_only_) {
+ if (interpreter_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInterpreter;
- } else if (interpreter_stubs_installed_) {
- return InstrumentationLevel::kInstrumentWithInterpreterAndJit;
} else if (entry_exit_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
} else {
@@ -569,11 +568,8 @@ Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentation
}
bool Instrumentation::RequiresInstrumentationInstallation(InstrumentationLevel new_level) const {
- // We need to reinstall instrumentation if we go to a different level or if the current level is
- // kInstrumentWithInterpreterAndJit since that level does not force all code to always use the
- // interpreter and so we might have started running optimized code again.
- return new_level == InstrumentationLevel::kInstrumentWithInterpreterAndJit ||
- GetCurrentInstrumentationLevel() != new_level;
+ // We need to reinstall instrumentation if we go to a different level.
+ return GetCurrentInstrumentationLevel() != new_level;
}
void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
@@ -604,7 +600,7 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir
Locks::mutator_lock_->AssertExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
if (requested_level > InstrumentationLevel::kInstrumentNothing) {
- if (requested_level >= InstrumentationLevel::kInstrumentWithInterpreterAndJit) {
+ if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
interpreter_stubs_installed_ = true;
entry_exit_stubs_installed_ = true;
} else {
@@ -731,10 +727,12 @@ void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_cod
UpdateMethodsCodeImpl(method, quick_code);
}
-void Instrumentation::UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code) {
- // When debugger attaches, we may update the entry points of all methods of a class
- // to the interpreter bridge. A method's declaring class might not be in resolved
- // state yet in that case.
+void Instrumentation::UpdateMethodsCodeForJavaDebuggable(ArtMethod* method,
+ const void* quick_code) {
+ // When the runtime is set to Java debuggable, we may update the entry points of
+ // all methods of a class to the interpreter bridge. A method's declaring class
+ // might not be in resolved state yet in that case, so we bypass the DCHECK in
+ // UpdateMethodsCode.
UpdateMethodsCodeImpl(method, quick_code);
}
@@ -819,10 +817,9 @@ void Instrumentation::Undeoptimize(ArtMethod* method) {
!method->GetDeclaringClass()->IsInitialized()) {
UpdateEntrypoints(method, GetQuickResolutionStub());
} else {
- const void* quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, quick_code)) {
- quick_code = GetQuickToInterpreterBridge();
- }
+ const void* quick_code = NeedDebugVersionFor(method)
+ ? GetQuickToInterpreterBridge()
+ : class_linker->GetQuickOatCodeFor(method);
UpdateEntrypoints(method, quick_code);
}
@@ -879,14 +876,6 @@ bool Instrumentation::ShouldNotifyMethodEnterExitEvents() const {
return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
-// TODO we don't check deoptimization_enabled_ because currently there isn't really any support for
-// multiple users of instrumentation. Since this is just a temporary state anyway pending work to
-// ensure that the current_method doesn't get kept across suspend points this should be okay.
-// TODO Remove once b/33630159 is resolved.
-void Instrumentation::ReJitEverything(const char* key) {
- ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreterAndJit);
-}
-
void Instrumentation::DeoptimizeEverything(const char* key) {
CHECK(deoptimization_enabled_);
ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
@@ -1114,7 +1103,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
- if (deoptimize && Runtime::Current()->IsDeoptimizeable(*return_pc)) {
+ if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
if (kVerboseInstrumentation) {
LOG(INFO) << "Deoptimizing "
<< visitor.caller->PrettyMethod()
@@ -1132,6 +1121,10 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
+ if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
+ }
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
<< " to PC " << reinterpret_cast<void*>(*return_pc);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 05c0aaa081..01071a541f 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -133,9 +133,6 @@ class Instrumentation {
enum class InstrumentationLevel {
kInstrumentNothing, // execute without instrumentation
kInstrumentWithInstrumentationStubs, // execute with instrumentation entry/exit stubs
- kInstrumentWithInterpreterAndJit, // execute with interpreter initially and later the JIT
- // (if it is enabled). This level is special in that it
- // always requires re-instrumentation.
kInstrumentWithInterpreter // execute with interpreter
};
@@ -166,13 +163,6 @@ class Instrumentation {
}
bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
- // Executes everything with the interpreter/jit (if available).
- void ReJitEverything(const char* key)
- REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
-
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
@@ -239,7 +229,7 @@ class Instrumentation {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Update the code of a method respecting any installed stubs from debugger.
- void UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code)
+ void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
@@ -264,7 +254,7 @@ class Instrumentation {
// Code is in boot image oat file which isn't compiled as debuggable.
// Need debug version (interpreter or jitted) if that's the case.
- bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+ bool NeedDebugVersionFor(ArtMethod* method) const
REQUIRES_SHARED(Locks::mutator_lock_);
bool AreExitStubsInstalled() const {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 28bcb97105..c235317020 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -438,14 +438,22 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) {
// about ALWAYS_INLINE (-Werror, -Wgcc-compat) in definitions.
//
+// b/30419309
+#if defined(__i386__)
+#define IF_X86_OPTNONE_ELSE_ALWAYS_INLINE __attribute__((optnone))
+#else
+#define IF_X86_OPTNONE_ELSE_ALWAYS_INLINE ALWAYS_INLINE
+#endif
+
template <bool is_range, bool do_assignability_check>
-static ALWAYS_INLINE bool DoCallCommon(ArtMethod* called_method,
- Thread* self,
- ShadowFrame& shadow_frame,
- JValue* result,
- uint16_t number_of_inputs,
- uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
+IF_X86_OPTNONE_ELSE_ALWAYS_INLINE
+static bool DoCallCommon(ArtMethod* called_method,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ JValue* result,
+ uint16_t number_of_inputs,
+ uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool is_range>
ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index aeb438f05f..7ef3508164 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -255,17 +255,11 @@ static inline ObjPtr<mirror::String> ResolveString(Thread* self,
}
}
ArtMethod* method = shadow_frame.GetMethod();
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
- method->GetDexFile()->NumStringIds());
- ObjPtr<mirror::String> string_ptr =
- mirror::StringDexCachePair::Lookup(method->GetDexCache()->GetStrings(),
- string_idx.index_,
- mirror::DexCache::kDexCacheStringCacheSize).Read();
+ ObjPtr<mirror::String> string_ptr = method->GetDexCache()->GetResolvedString(string_idx);
if (UNLIKELY(string_ptr == nullptr)) {
StackHandleScope<1> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
- string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
+ string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*dex_cache->GetDexFile(),
string_idx,
dex_cache);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6336cddc07..45611a93f7 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -594,7 +594,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
return nullptr;
}
- DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsDebuggable())
+ DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
<< "Should not be using cha on debuggable apps/runs!";
for (ArtMethod* single_impl : cha_single_implementation_list) {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f08d4daf95..85636fb5b1 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -951,7 +951,8 @@ ObjPtr<Class> Class::GetDirectInterface(Thread* self, ObjPtr<Class> klass, uint3
return interfaces->Get(idx);
} else {
dex::TypeIndex type_idx = klass->GetDirectInterfaceTypeIdx(idx);
- ObjPtr<Class> interface = klass->GetDexCache()->GetResolvedType(type_idx);
+ ObjPtr<Class> interface = ClassLinker::LookupResolvedType(
+ type_idx, klass->GetDexCache(), klass->GetClassLoader());
return interface;
}
}
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index a59bb7b880..bef3ad29a3 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -40,14 +40,22 @@ inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
-inline mirror::String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) {
DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds());
- return StringDexCachePair::Lookup(GetStrings(), string_idx.index_, NumStrings()).Read();
+ const uint32_t slot_idx = string_idx.index_ % kDexCacheStringCacheSize;
+ DCHECK_LT(slot_idx, NumStrings());
+ return slot_idx;
}
-inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
- ObjPtr<mirror::String> resolved) {
- StringDexCachePair::Assign(GetStrings(), string_idx.index_, resolved.Ptr(), NumStrings());
+inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+ return GetStrings()[StringSlotIndex(string_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(string_idx.index_);
+}
+
+inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<String> resolved) {
+ DCHECK(resolved != nullptr);
+ GetStrings()[StringSlotIndex(string_idx)].store(
+ StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed);
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
DCHECK(runtime->IsAotCompiler());
@@ -58,50 +66,70 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
}
inline void DexCache::ClearString(dex::StringIndex string_idx) {
- const uint32_t slot_idx = string_idx.index_ % NumStrings();
DCHECK(Runtime::Current()->IsAotCompiler());
+ uint32_t slot_idx = StringSlotIndex(string_idx);
StringDexCacheType* slot = &GetStrings()[slot_idx];
// This is racy but should only be called from the transactional interpreter.
if (slot->load(std::memory_order_relaxed).index == string_idx.index_) {
- StringDexCachePair cleared(
- nullptr,
- StringDexCachePair::InvalidIndexForSlot(slot_idx));
+ StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx));
slot->store(cleared, std::memory_order_relaxed);
}
}
+inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) {
+ DCHECK_LT(type_idx.index_, GetDexFile()->NumTypeIds());
+ const uint32_t slot_idx = type_idx.index_ % kDexCacheTypeCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedTypes());
+ return slot_idx;
+}
+
inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) {
// It is theorized that a load acquire is not required since obtaining the resolved class will
// always have an address dependency or a lock.
- DCHECK_LT(type_idx.index_, NumResolvedTypes());
- return GetResolvedTypes()[type_idx.index_].Read();
+ return GetResolvedTypes()[TypeSlotIndex(type_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(type_idx.index_);
}
inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) {
- DCHECK_LT(type_idx.index_, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
+ DCHECK(resolved != nullptr);
// TODO default transaction support.
// Use a release store for SetResolvedType. This is done to prevent other threads from seeing a
// class but not necessarily seeing the loaded members like the static fields array.
// See b/32075261.
- reinterpret_cast<Atomic<GcRoot<mirror::Class>>&>(GetResolvedTypes()[type_idx.index_]).
- StoreRelease(GcRoot<Class>(resolved));
+ GetResolvedTypes()[TypeSlotIndex(type_idx)].store(
+ TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
-inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
- DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
- return MethodTypeDexCachePair::Lookup(
- GetResolvedMethodTypes(), proto_idx, NumResolvedMethodTypes()).Read();
+inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ uint32_t slot_idx = TypeSlotIndex(type_idx);
+ TypeDexCacheType* slot = &GetResolvedTypes()[slot_idx];
+ // This is racy but should only be called from the single-threaded ImageWriter and tests.
+ if (slot->load(std::memory_order_relaxed).index == type_idx.index_) {
+ TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx));
+ slot->store(cleared, std::memory_order_relaxed);
+ }
}
-inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+inline uint32_t DexCache::MethodTypeSlotIndex(uint32_t proto_idx) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
+ const uint32_t slot_idx = proto_idx % kDexCacheMethodTypeCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedMethodTypes());
+ return slot_idx;
+}
- MethodTypeDexCachePair::Assign(GetResolvedMethodTypes(), proto_idx, resolved,
- NumResolvedMethodTypes());
+inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
+ return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(proto_idx);
+}
+
+inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+ DCHECK(resolved != nullptr);
+ GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
+ MethodTypeDexCachePair(resolved, proto_idx), std::memory_order_relaxed);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
@@ -198,49 +226,49 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- VisitDexCachePairs<mirror::String, kReadBarrierOption, Visitor>(
+ VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
GetStrings(), NumStrings(), visitor);
- GcRoot<mirror::Class>* resolved_types = GetResolvedTypes();
- for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) {
- visitor.VisitRootIfNonNull(resolved_types[i].AddressWithoutBarrier());
- }
+ VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
+ GetResolvedTypes(), NumResolvedTypes(), visitor);
- VisitDexCachePairs<mirror::MethodType, kReadBarrierOption, Visitor>(
+ VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupStrings(mirror::StringDexCacheType* dest, const Visitor& visitor) {
- mirror::StringDexCacheType* src = GetStrings();
+inline void DexCache::FixupStrings(StringDexCacheType* dest, const Visitor& visitor) {
+ StringDexCacheType* src = GetStrings();
for (size_t i = 0, count = NumStrings(); i < count; ++i) {
StringDexCachePair source = src[i].load(std::memory_order_relaxed);
- mirror::String* ptr = source.object.Read<kReadBarrierOption>();
- mirror::String* new_source = visitor(ptr);
+ String* ptr = source.object.Read<kReadBarrierOption>();
+ String* new_source = visitor(ptr);
source.object = GcRoot<String>(new_source);
dest[i].store(source, std::memory_order_relaxed);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor) {
- GcRoot<mirror::Class>* src = GetResolvedTypes();
+inline void DexCache::FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor) {
+ TypeDexCacheType* src = GetResolvedTypes();
for (size_t i = 0, count = NumResolvedTypes(); i < count; ++i) {
- mirror::Class* source = src[i].Read<kReadBarrierOption>();
- mirror::Class* new_source = visitor(source);
- dest[i] = GcRoot<mirror::Class>(new_source);
+ TypeDexCachePair source = src[i].load(std::memory_order_relaxed);
+ Class* ptr = source.object.Read<kReadBarrierOption>();
+ Class* new_source = visitor(ptr);
+ source.object = GcRoot<Class>(new_source);
+ dest[i].store(source, std::memory_order_relaxed);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupResolvedMethodTypes(mirror::MethodTypeDexCacheType* dest,
+inline void DexCache::FixupResolvedMethodTypes(MethodTypeDexCacheType* dest,
const Visitor& visitor) {
- mirror::MethodTypeDexCacheType* src = GetResolvedMethodTypes();
+ MethodTypeDexCacheType* src = GetResolvedMethodTypes();
for (size_t i = 0, count = NumResolvedMethodTypes(); i < count; ++i) {
MethodTypeDexCachePair source = src[i].load(std::memory_order_relaxed);
- mirror::MethodType* ptr = source.object.Read<kReadBarrierOption>();
- mirror::MethodType* new_source = visitor(ptr);
+ MethodType* ptr = source.object.Read<kReadBarrierOption>();
+ MethodType* new_source = visitor(ptr);
source.object = GcRoot<MethodType>(new_source);
dest[i].store(source, std::memory_order_relaxed);
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 741cf3bb47..3103a92c83 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -58,8 +58,8 @@ void DexCache::InitializeDexCache(Thread* self,
mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- GcRoot<mirror::Class>* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
+ mirror::TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
+ reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
@@ -69,6 +69,10 @@ void DexCache::InitializeDexCache(Thread* self,
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (dex_file->NumTypeIds() < num_types) {
+ num_types = dex_file->NumTypeIds();
+ }
// Note that we allocate the method type dex caches regardless of this flag,
// and we make sure here that they're not used by the runtime. This is in the
@@ -104,8 +108,9 @@ void DexCache::InitializeDexCache(Thread* self,
CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u);
CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull());
}
- for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
- CHECK(types[i].IsNull());
+ for (size_t i = 0; i < num_types; ++i) {
+ CHECK_EQ(types[i].load(std::memory_order_relaxed).index, 0u);
+ CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
}
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
@@ -121,6 +126,9 @@ void DexCache::InitializeDexCache(Thread* self,
if (strings != nullptr) {
mirror::StringDexCachePair::Initialize(strings);
}
+ if (types != nullptr) {
+ mirror::TypeDexCachePair::Initialize(types);
+ }
if (method_types != nullptr) {
mirror::MethodTypeDexCachePair::Initialize(method_types);
}
@@ -129,7 +137,7 @@ void DexCache::InitializeDexCache(Thread* self,
strings,
num_strings,
types,
- dex_file->NumTypeIds(),
+ num_types,
methods,
dex_file->NumMethodIds(),
fields,
@@ -143,7 +151,7 @@ void DexCache::Init(const DexFile* dex_file,
ObjPtr<String> location,
StringDexCacheType* strings,
uint32_t num_strings,
- GcRoot<Class>* resolved_types,
+ TypeDexCacheType* resolved_types,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6f88cc5df4..e68b0c7219 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -18,14 +18,14 @@
#define ART_RUNTIME_MIRROR_DEX_CACHE_H_
#include "array.h"
-#include "art_field.h"
-#include "class.h"
+#include "base/bit_utils.h"
#include "dex_file_types.h"
#include "object.h"
#include "object_array.h"
namespace art {
+class ArtField;
class ArtMethod;
struct DexCacheOffsets;
class DexFile;
@@ -36,6 +36,7 @@ class Thread;
namespace mirror {
+class Class;
class MethodType;
class String;
@@ -60,7 +61,7 @@ template <typename T> struct PACKED(8) DexCachePair {
// it's always non-null if the id branch succeeds (except for the 0th id).
// Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
// the lookup id == stored id branch.
- DexCachePair(T* object, uint32_t index)
+ DexCachePair(ObjPtr<T> object, uint32_t index)
: object(object),
index(index) {}
DexCachePair() = default;
@@ -74,39 +75,28 @@ template <typename T> struct PACKED(8) DexCachePair {
dex_cache[0].store(first_elem, std::memory_order_relaxed);
}
- static GcRoot<T> Lookup(std::atomic<DexCachePair<T>>* dex_cache,
- uint32_t idx,
- uint32_t cache_size) {
- DCHECK_NE(cache_size, 0u);
- DexCachePair<T> element = dex_cache[idx % cache_size].load(std::memory_order_relaxed);
- if (idx != element.index) {
- return GcRoot<T>(nullptr);
- }
-
- DCHECK(!element.object.IsNull());
- return element.object;
- }
-
- static void Assign(std::atomic<DexCachePair<T>>* dex_cache,
- uint32_t idx,
- T* object,
- uint32_t cache_size) {
- DCHECK_LT(idx % cache_size, cache_size);
- dex_cache[idx % cache_size].store(
- DexCachePair<T>(object, idx), std::memory_order_relaxed);
- }
-
static uint32_t InvalidIndexForSlot(uint32_t slot) {
// Since the cache size is a power of two, 0 will always map to slot 0.
// Use 1 for slot 0 and 0 for all other slots.
return (slot == 0) ? 1u : 0u;
}
+
+ T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (idx != index) {
+ return nullptr;
+ }
+ DCHECK(!object.IsNull());
+ return object.Read();
+ }
};
-using StringDexCachePair = DexCachePair<mirror::String>;
+using TypeDexCachePair = DexCachePair<Class>;
+using TypeDexCacheType = std::atomic<TypeDexCachePair>;
+
+using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
-using MethodTypeDexCachePair = DexCachePair<mirror::MethodType>;
+using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
// C++ mirror of java.lang.DexCache.
@@ -115,6 +105,11 @@ class MANAGED DexCache FINAL : public Object {
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
+ // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+ static constexpr size_t kDexCacheTypeCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
+ "Type dex cache size is not a power of 2.");
+
// Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
static constexpr size_t kDexCacheStringCacheSize = 1024;
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
@@ -126,6 +121,10 @@ class MANAGED DexCache FINAL : public Object {
static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
"MethodType dex cache size is not a power of 2.");
+ static constexpr size_t StaticTypeSize() {
+ return kDexCacheTypeCacheSize;
+ }
+
static constexpr size_t StaticStringSize() {
return kDexCacheStringCacheSize;
}
@@ -156,7 +155,7 @@ class MANAGED DexCache FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
+ void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
@@ -211,7 +210,7 @@ class MANAGED DexCache FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
}
- mirror::String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
+ String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
@@ -226,6 +225,8 @@ class MANAGED DexCache FINAL : public Object {
void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -254,11 +255,11 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(StringsOffset(), strings);
}
- GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset());
+ TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
}
- void SetResolvedTypes(GcRoot<Class>* resolved_types)
+ void SetResolvedTypes(TypeDexCacheType* resolved_types)
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
@@ -323,7 +324,7 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
- void SetLocation(ObjPtr<mirror::String> location) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
// NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
// provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
@@ -340,7 +341,7 @@ class MANAGED DexCache FINAL : public Object {
ObjPtr<String> location,
StringDexCacheType* strings,
uint32_t num_strings,
- GcRoot<Class>* resolved_types,
+ TypeDexCacheType* resolved_types,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
@@ -351,12 +352,16 @@ class MANAGED DexCache FINAL : public Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
@@ -366,7 +371,7 @@ class MANAGED DexCache FINAL : public Object {
uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
// num_resolved_method_types_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
- uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements.
+ uint64_t resolved_types_; // TypeDexCacheType*, array with num_resolved_types_ elements.
uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
// elements.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8f978e122c..5693f67646 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -51,7 +51,8 @@ TEST_F(DexCacheTest, Open) {
EXPECT_TRUE(dex_cache->StaticStringSize() == dex_cache->NumStrings()
|| java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings());
- EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
+ EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes()
+ || java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 10fc90bc27..fd22d9e646 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -71,7 +71,7 @@ static void EnableDebugger() {
static void EnableDebugFeatures(uint32_t debug_flags) {
// Must match values in com.android.internal.os.Zygote.
enum {
- DEBUG_ENABLE_DEBUGGER = 1,
+ DEBUG_ENABLE_JDWP = 1,
DEBUG_ENABLE_CHECKJNI = 1 << 1,
DEBUG_ENABLE_ASSERT = 1 << 2,
DEBUG_ENABLE_SAFEMODE = 1 << 3,
@@ -79,6 +79,7 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
DEBUG_GENERATE_DEBUG_INFO = 1 << 5,
DEBUG_ALWAYS_JIT = 1 << 6,
DEBUG_NATIVE_DEBUGGABLE = 1 << 7,
+ DEBUG_JAVA_DEBUGGABLE = 1 << 8,
};
Runtime* const runtime = Runtime::Current();
@@ -100,11 +101,11 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ENABLE_JNI_LOGGING;
}
- Dbg::SetJdwpAllowed((debug_flags & DEBUG_ENABLE_DEBUGGER) != 0);
- if ((debug_flags & DEBUG_ENABLE_DEBUGGER) != 0) {
+ Dbg::SetJdwpAllowed((debug_flags & DEBUG_ENABLE_JDWP) != 0);
+ if ((debug_flags & DEBUG_ENABLE_JDWP) != 0) {
EnableDebugger();
}
- debug_flags &= ~DEBUG_ENABLE_DEBUGGER;
+ debug_flags &= ~DEBUG_ENABLE_JDWP;
const bool safe_mode = (debug_flags & DEBUG_ENABLE_SAFEMODE) != 0;
if (safe_mode) {
@@ -130,6 +131,14 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ALWAYS_JIT;
}
+ if ((debug_flags & DEBUG_JAVA_DEBUGGABLE) != 0) {
+ runtime->AddCompilerOption("--debuggable");
+ runtime->SetJavaDebuggable(true);
+ // Deoptimize the boot image as it may be non-debuggable.
+ runtime->DeoptimizeBootImage();
+ debug_flags &= ~DEBUG_JAVA_DEBUGGABLE;
+ }
+
if ((debug_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
runtime->AddCompilerOption("--generate-debug-info");
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index f1c350f23c..0b667fec45 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -53,7 +53,7 @@ static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) {
static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
+ CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds());
return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(dex::TypeIndex(type_index)));
}
@@ -69,8 +69,11 @@ static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint typ
jobject type) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
- dex_cache->SetResolvedType(dex::TypeIndex(type_index), soa.Decode<mirror::Class>(type));
+ CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds());
+ ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type);
+ if (t != nullptr) {
+ dex_cache->SetResolvedType(dex::TypeIndex(type_index), t);
+ }
}
static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
@@ -78,7 +81,10 @@ static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint s
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
- dex_cache->SetResolvedString(dex::StringIndex(string_index), soa.Decode<mirror::String>(string));
+ ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string);
+ if (s != nullptr) {
+ dex_cache->SetResolvedString(dex::StringIndex(string_index), s);
+ }
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/oat.h b/runtime/oat.h
index 106bd4096f..62f010ba97 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '0', '5', '\0' }; // Stack map alignment change.
+ static constexpr uint8_t kOatVersion[] = { '1', '0', '6', '\0' }; // hash-based DexCache types
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8554fa2693..b19ace5464 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -530,7 +530,7 @@ bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
class_path = OatFile::kSpecialSharedLibrary;
}
argv.push_back(class_path);
- if (runtime->IsDebuggable()) {
+ if (runtime->IsJavaDebuggable()) {
argv.push_back("--debuggable");
}
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
index b68fc60c6c..c2f17924da 100644
--- a/runtime/openjdkjvmti/ti_class_loader.cc
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -61,9 +61,14 @@ namespace openjdkjvmti {
bool ClassLoaderHelper::AddToClassLoader(art::Thread* self,
art::Handle<art::mirror::ClassLoader> loader,
const art::DexFile* dex_file) {
+ art::ScopedObjectAccessUnchecked soa(self);
art::StackHandleScope<2> hs(self);
- art::Handle<art::mirror::Object> java_dex_file_obj(hs.NewHandle(FindSourceDexFileObject(self,
- loader)));
+ if (art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
+ art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, *dex_file);
+ return true;
+ }
+ art::Handle<art::mirror::Object> java_dex_file_obj(
+ hs.NewHandle(FindSourceDexFileObject(self, loader)));
if (java_dex_file_obj.IsNull()) {
return false;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index b76d74ae79..4b8108accf 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -75,9 +75,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
StackVisitor::StackWalkKind::kIncludeInlinedFrames),
allocator_(allocator),
obsoleted_methods_(obsoleted_methods),
- obsolete_maps_(obsolete_maps),
- is_runtime_frame_(false) {
- }
+ obsolete_maps_(obsolete_maps) { }
~ObsoleteMethodStackVisitor() OVERRIDE {}
@@ -100,21 +98,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
art::ArtMethod* old_method = GetMethod();
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- bool prev_was_runtime_frame_ = is_runtime_frame_;
- is_runtime_frame_ = old_method->IsRuntimeMethod();
if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
- // The check below works since when we deoptimize we set shadow frames for all frames until a
- // native/runtime transition and for those set the return PC to a function that will complete
- // the deoptimization. This does leave us with the unfortunate side-effect that frames just
- // below runtime frames cannot be deoptimized at the moment.
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- // TODO b/33616143
- if (!IsShadowFrame() && prev_was_runtime_frame_) {
- LOG(FATAL) << "Deoptimization failed due to runtime method in stack. See b/33616143";
- }
// We cannot ensure that the right dex file is used in inlined frames so we don't support
// redefining them.
DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
@@ -163,9 +147,6 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
// values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
// the redefined classes ClassExt by the caller.
std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- bool is_runtime_frame_;
};
jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -464,7 +445,8 @@ void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(art::mirror::C
art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
art::mirror::ClassExt* ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
- CallbackCtx ctx(art_klass->GetClassLoader()->GetAllocator());
+ art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
+ CallbackCtx ctx(linker->GetAllocatorForClassLoader(art_klass->GetClassLoader()));
// Add all the declared methods to the map
for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
ctx.obsolete_methods.insert(&m);
@@ -508,18 +490,6 @@ void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
}
}
-// TODO It should be possible to only deoptimize the specific obsolete methods.
-// TODO ReJitEverything can (sort of) fail. In certain cases it will skip deoptimizing some frames.
-// If one of these frames is an obsolete method we have a problem. b/33616143
-// TODO This shouldn't be necessary once we can ensure that the current method is not kept in
-// registers across suspend points.
-// TODO Pending b/33630159
-void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
- art::ScopedAssertNoThreadSuspension nts("Deoptimizing everything!");
- art::instrumentation::Instrumentation* i = runtime_->GetInstrumentation();
- i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
-}
-
bool Redefiner::ClassRedefinition::CheckClass() {
// TODO Might just want to put it in a ObjPtr and NoSuspend assert.
art::StackHandleScope<1> hs(driver_->self_);
@@ -733,33 +703,32 @@ class RedefinitionDataHolder {
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
holder->SetMirrorClass(klass_index, GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
- holder->SetSourceClassLoader(klass_index, loader.Get());
- if (loader.Get() == nullptr) {
- // TODO Better error msg.
- RecordFailure(ERR(INTERNAL), "Unable to find class loader!");
- return false;
- }
- art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
- ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
- if (dex_file_obj.Get() == nullptr) {
- // TODO Better error msg.
- RecordFailure(ERR(INTERNAL), "Unable to find class loader!");
- return false;
- }
- holder->SetNewDexFileCookie(klass_index,
- ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
- dex_file_obj,
- dex_file_.get()).Ptr());
- if (holder->GetNewDexFileCookie(klass_index) == nullptr) {
- driver_->self_->AssertPendingOOMException();
- driver_->self_->ClearException();
- RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
- return false;
+ // The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
+ if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
+ holder->SetSourceClassLoader(klass_index, loader.Get());
+ art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
+ ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
+ holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ if (dex_file_obj.Get() == nullptr) {
+ // TODO Better error msg.
+ RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
+ return false;
+ }
+ holder->SetNewDexFileCookie(klass_index,
+ ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
+ dex_file_obj,
+ dex_file_.get()).Ptr());
+ if (holder->GetNewDexFileCookie(klass_index) == nullptr) {
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
+ return false;
+ }
}
holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
if (holder->GetNewDexCache(klass_index) == nullptr) {
@@ -846,6 +815,13 @@ jvmtiError Redefiner::Run() {
// cleaned up by the GC eventually.
return result_;
}
+ int32_t counter = 0;
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (holder.GetSourceClassLoader(counter) == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ }
+ counter++;
+ }
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
art::gc::Heap* heap = runtime_->GetHeap();
@@ -864,26 +840,20 @@ jvmtiError Redefiner::Run() {
// TODO We need to update all debugger MethodIDs so they note the method they point to is
// obsolete or implement some other well defined semantics.
// TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
- int32_t cnt = 0;
+ counter = 0;
for (Redefiner::ClassRedefinition& redef : redefinitions_) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
- art::mirror::Class* klass = holder.GetMirrorClass(cnt);
- ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(cnt),
- holder.GetNewDexFileCookie(cnt));
+ if (holder.GetSourceClassLoader(counter) != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
+ holder.GetNewDexFileCookie(counter));
+ }
+ art::mirror::Class* klass = holder.GetMirrorClass(counter);
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(cnt), holder.GetOriginalDexFileBytes(cnt));
- cnt++;
+ redef.UpdateClass(klass, holder.GetNewDexCache(counter),
+ holder.GetOriginalDexFileBytes(counter));
+ counter++;
}
- // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
- // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
- // DexCache. (b/33630159)
- // TODO This can fail (leave some methods optimized) near runtime methods (including
- // quick-to-interpreter transition function).
- // TODO We probably don't need this at all once we have a way to ensure that the
- // current_art_method is never stashed in a (physical) register by the JIT and lost to the
- // stack-walker.
- EnsureObsoleteMethodsAreDeoptimized();
// TODO Verify the new Class.
// TODO Shrink the obsolete method maps if possible?
// TODO find appropriate class loader.
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 85df6e1024..5bcaef8971 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -127,6 +127,10 @@ class Redefiner {
art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ const art::DexFile& GetDexFile() {
+ return *dex_file_;
+ }
+
art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -239,14 +243,6 @@ class Redefiner {
REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
- // pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
- // DexCache.
- void EnsureObsoleteMethodsAreDeoptimized()
- REQUIRES(art::Locks::mutator_lock_)
- REQUIRES(!art::Locks::thread_list_lock_,
- !art::Locks::classlinker_classes_lock_);
-
void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
void RecordFailure(jvmtiError result, const std::string& error_msg) {
RecordFailure(result, "NO CLASS", error_msg);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index ccc5f7a8ee..9113f83cd4 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -300,8 +300,6 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xplugin:_")
.WithType<std::vector<Plugin>>().AppendValues()
.IntoKey(M::Plugins)
- .Define("-Xfully-deoptable")
- .IntoKey(M::FullyDeoptable)
.Define("-XX:ThreadSuspendTimeout=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::ThreadSuspendTimeout)
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8d758a4a4b..4e76951189 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -347,9 +347,11 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
callee_method_ = method;
return true;
} else if (!single_frame_deopt_ &&
- !Runtime::Current()->IsDeoptimizeable(GetCurrentQuickFramePc())) {
+ !Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) {
// We hit some code that's not deoptimizeable. However, Single-frame deoptimization triggered
// from compiled code is always allowed since HDeoptimize always saves the full environment.
+ LOG(WARNING) << "Got request to deoptimize un-deoptimizable method "
+ << method->PrettyMethod();
FinishStackWalk();
return false; // End stack walk.
} else {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b30e5109b6..693b8f4e2f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -245,7 +245,7 @@ Runtime::Runtime()
force_native_bridge_(false),
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
- is_fully_deoptable_(false),
+ is_java_debuggable_(false),
zygote_max_failed_boots_(0),
experimental_flags_(ExperimentalFlags::kNone),
oat_file_manager_(nullptr),
@@ -826,14 +826,6 @@ bool Runtime::IsShuttingDown(Thread* self) {
return IsShuttingDownLocked();
}
-bool Runtime::IsDebuggable() const {
- if (IsFullyDeoptable()) {
- return true;
- }
- const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
- return oat_file != nullptr && oat_file->IsDebuggable();
-}
-
void Runtime::StartDaemonThreads() {
ScopedTrace trace(__FUNCTION__);
VLOG(startup) << "Runtime::StartDaemonThreads entering";
@@ -1039,6 +1031,12 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
+ for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ if (option.starts_with("--debuggable")) {
+ SetJavaDebuggable(true);
+ break;
+ }
+ }
image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
image_location_ = runtime_options.GetOrDefault(Opt::Image);
@@ -1053,8 +1051,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
- is_fully_deoptable_ = runtime_options.Exists(Opt::FullyDeoptable);
-
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
@@ -1259,6 +1255,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
ScopedTrace trace2("AddImageStringsToTable");
GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
}
+ if (IsJavaDebuggable()) {
+ // Now that we have loaded the boot image, deoptimize its methods if we are running
+ // debuggable, as the code may have been compiled non-debuggable.
+ DeoptimizeBootImage();
+ }
} else {
std::vector<std::string> dex_filenames;
Split(boot_class_path_string_, ':', &dex_filenames);
@@ -1405,7 +1406,7 @@ static bool EnsureJvmtiPlugin(Runtime* runtime,
}
// Is the process debuggable? Otherwise, do not attempt to load the plugin.
- if (!runtime->IsDebuggable()) {
+ if (!runtime->IsJavaDebuggable()) {
*error_msg = "Process is not debuggable.";
return false;
}
@@ -2206,9 +2207,15 @@ bool Runtime::IsVerificationSoftFail() const {
return verify_ == verifier::VerifyMode::kSoftFail;
}
-bool Runtime::IsDeoptimizeable(uintptr_t code) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return !heap_->IsInBootImageOatFile(reinterpret_cast<void *>(code));
+bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
+ // We only support async deopt (ie the compiled code is not explicitly asking for
+ // deopt, but something else like the debugger) in debuggable JIT code.
+ // We could look at the oat file where `code` is being defined,
+ // and check whether it's been compiled debuggable, but we decided to
+ // only rely on the JIT for debuggable apps.
+ return IsJavaDebuggable() &&
+ GetJit() != nullptr &&
+ GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
}
LinearAlloc* Runtime::CreateLinearAlloc() {
@@ -2292,4 +2299,43 @@ RuntimeCallbacks* Runtime::GetRuntimeCallbacks() {
return callbacks_.get();
}
+// Used to patch boot image method entry point to interpreter bridge.
+class UpdateEntryPointsClassVisitor : public ClassVisitor {
+ public:
+ explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
+ : instrumentation_(instrumentation) {}
+
+ bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : klass->GetMethods(pointer_size)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+ !m.IsNative() &&
+ !m.IsProxyMethod()) {
+ instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
+ }
+ }
+ return true;
+ }
+
+ private:
+ instrumentation::Instrumentation* const instrumentation_;
+};
+
+void Runtime::SetJavaDebuggable(bool value) {
+ is_java_debuggable_ = value;
+ // Do not call DeoptimizeBootImage just yet, the runtime may still be starting up.
+}
+
+void Runtime::DeoptimizeBootImage() {
+ // If we've already started and we are setting this runtime to debuggable,
+ // we patch entry points of methods in boot image to interpreter bridge, as
+ // boot image code may be AOT compiled as not debuggable.
+ if (!GetInstrumentation()->IsForcedInterpretOnly()) {
+ ScopedObjectAccess soa(Thread::Current());
+ UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
+ GetClassLinker()->VisitClasses(&visitor);
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f7d6810ff5..30b1756d5d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -434,7 +434,7 @@ class Runtime {
kInitialize
};
- jit::Jit* GetJit() {
+ jit::Jit* GetJit() const {
return jit_.get();
}
@@ -569,15 +569,14 @@ class Runtime {
return jit_options_.get();
}
- bool IsDebuggable() const;
-
- bool IsFullyDeoptable() const {
- return is_fully_deoptable_;
+ bool IsJavaDebuggable() const {
+ return is_java_debuggable_;
}
- void SetFullyDeoptable(bool value) {
- is_fully_deoptable_ = value;
- }
+ void SetJavaDebuggable(bool value);
+
+ // Deoptimize the boot image, called for Java debuggable apps.
+ void DeoptimizeBootImage();
bool IsNativeDebuggable() const {
return is_native_debuggable_;
@@ -639,9 +638,9 @@ class Runtime {
return zygote_no_threads_;
}
- // Returns if the code can be deoptimized. Code may be compiled with some
+ // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
- bool IsDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a saved copy of the environment (getenv/setenv values).
// Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
@@ -863,8 +862,8 @@ class Runtime {
// Whether we are running under native debugger.
bool is_native_debuggable_;
- // Whether we are expected to be deoptable at all points.
- bool is_fully_deoptable_;
+ // Whether Java code needs to be debuggable.
+ bool is_java_debuggable_;
// The maximum number of failed boots we allow before pruning the dalvik cache
// and trying again. This option is only inspected when we're running as a
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 0a996a9e55..495296cf7d 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -14,56 +14,33 @@
* limitations under the License.
*/
+#include "runtime.h"
+
#include <signal.h>
-#include <string.h>
-#include <sys/utsname.h>
-#include <inttypes.h>
-#include "base/logging.h"
-#include "base/mutex.h"
-#include "thread-inl.h"
-#include "utils.h"
+#include <cstring>
-namespace art {
+#include "runtime_common.h"
-static constexpr bool kUseSignalHandler = false;
+namespace art {
struct sigaction old_action;
-void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handling_unexpected_signal = false;
- if (handling_unexpected_signal) {
- LogHelper::LogLineLowStack(__FILE__,
- __LINE__,
- ::android::base::FATAL_WITHOUT_ABORT,
- "HandleUnexpectedSignal reentered\n");
- _exit(1);
- }
- handling_unexpected_signal = true;
- gAborting++; // set before taking any locks
- MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- // Print this out first in case DumpObject faults.
- LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
- }
+void HandleUnexpectedSignalAndroid(int signal_number, siginfo_t* info, void* raw_context) {
+ HandleUnexpectedSignalCommon(signal_number, info, raw_context, /* running_on_linux */ false);
+
// Run the old signal handler.
old_action.sa_sigaction(signal_number, info, raw_context);
}
void Runtime::InitPlatformSignalHandlers() {
- if (kUseSignalHandler) {
- struct sigaction action;
- memset(&action, 0, sizeof(action));
- sigemptyset(&action.sa_mask);
- action.sa_sigaction = HandleUnexpectedSignal;
- // Use the three-argument sa_sigaction handler.
- action.sa_flags |= SA_SIGINFO;
- // Use the alternate signal stack so we can catch stack overflows.
- action.sa_flags |= SA_ONSTACK;
- int rc = 0;
- rc += sigaction(SIGSEGV, &action, &old_action);
- CHECK_EQ(rc, 0);
+ // Enable the signal handler dumping crash information to the logcat
+ // when the Android root is not "/system".
+ const char* android_root = getenv("ANDROID_ROOT");
+ if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
+ InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
+ &old_action,
+ /* handle_timeout_signal */ false);
}
}
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
new file mode 100644
index 0000000000..70aff37961
--- /dev/null
+++ b/runtime/runtime_common.cc
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_common.h"
+
+#include <signal.h>
+
+#include <cinttypes>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include "android-base/stringprintf.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "native_stack_dump.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+using android::base::StringPrintf;
+
+static constexpr bool kUseSigRTTimeout = true;
+static constexpr bool kDumpNativeStackOnTimeout = true;
+
+const char* GetSignalName(int signal_number) {
+ switch (signal_number) {
+ case SIGABRT: return "SIGABRT";
+ case SIGBUS: return "SIGBUS";
+ case SIGFPE: return "SIGFPE";
+ case SIGILL: return "SIGILL";
+ case SIGPIPE: return "SIGPIPE";
+ case SIGSEGV: return "SIGSEGV";
+#if defined(SIGSTKFLT)
+ case SIGSTKFLT: return "SIGSTKFLT";
+#endif
+ case SIGTRAP: return "SIGTRAP";
+ }
+ return "??";
+}
+
+const char* GetSignalCodeName(int signal_number, int signal_code) {
+ // Try the signal-specific codes...
+ switch (signal_number) {
+ case SIGILL:
+ switch (signal_code) {
+ case ILL_ILLOPC: return "ILL_ILLOPC";
+ case ILL_ILLOPN: return "ILL_ILLOPN";
+ case ILL_ILLADR: return "ILL_ILLADR";
+ case ILL_ILLTRP: return "ILL_ILLTRP";
+ case ILL_PRVOPC: return "ILL_PRVOPC";
+ case ILL_PRVREG: return "ILL_PRVREG";
+ case ILL_COPROC: return "ILL_COPROC";
+ case ILL_BADSTK: return "ILL_BADSTK";
+ }
+ break;
+ case SIGBUS:
+ switch (signal_code) {
+ case BUS_ADRALN: return "BUS_ADRALN";
+ case BUS_ADRERR: return "BUS_ADRERR";
+ case BUS_OBJERR: return "BUS_OBJERR";
+ }
+ break;
+ case SIGFPE:
+ switch (signal_code) {
+ case FPE_INTDIV: return "FPE_INTDIV";
+ case FPE_INTOVF: return "FPE_INTOVF";
+ case FPE_FLTDIV: return "FPE_FLTDIV";
+ case FPE_FLTOVF: return "FPE_FLTOVF";
+ case FPE_FLTUND: return "FPE_FLTUND";
+ case FPE_FLTRES: return "FPE_FLTRES";
+ case FPE_FLTINV: return "FPE_FLTINV";
+ case FPE_FLTSUB: return "FPE_FLTSUB";
+ }
+ break;
+ case SIGSEGV:
+ switch (signal_code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+#if defined(SEGV_BNDERR)
+ case SEGV_BNDERR: return "SEGV_BNDERR";
+#endif
+ }
+ break;
+ case SIGTRAP:
+ switch (signal_code) {
+ case TRAP_BRKPT: return "TRAP_BRKPT";
+ case TRAP_TRACE: return "TRAP_TRACE";
+ }
+ break;
+ }
+ // Then the other codes...
+ switch (signal_code) {
+ case SI_USER: return "SI_USER";
+#if defined(SI_KERNEL)
+ case SI_KERNEL: return "SI_KERNEL";
+#endif
+ case SI_QUEUE: return "SI_QUEUE";
+ case SI_TIMER: return "SI_TIMER";
+ case SI_MESGQ: return "SI_MESGQ";
+ case SI_ASYNCIO: return "SI_ASYNCIO";
+#if defined(SI_SIGIO)
+ case SI_SIGIO: return "SI_SIGIO";
+#endif
+#if defined(SI_TKILL)
+ case SI_TKILL: return "SI_TKILL";
+#endif
+ }
+ // Then give up...
+ return "?";
+}
+
+struct UContext {
+ explicit UContext(void* raw_context)
+ : context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {}
+
+ void Dump(std::ostream& os) const;
+
+ void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const;
+ void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const;
+
+ void DumpX86Flags(std::ostream& os, uint32_t flags) const;
+
+ mcontext_t& context;
+};
+
+void UContext::Dump(std::ostream& os) const {
+ // TODO: support non-x86 hosts.
+#if defined(__APPLE__) && defined(__i386__)
+ DumpRegister32(os, "eax", context->__ss.__eax);
+ DumpRegister32(os, "ebx", context->__ss.__ebx);
+ DumpRegister32(os, "ecx", context->__ss.__ecx);
+ DumpRegister32(os, "edx", context->__ss.__edx);
+ os << '\n';
+
+ DumpRegister32(os, "edi", context->__ss.__edi);
+ DumpRegister32(os, "esi", context->__ss.__esi);
+ DumpRegister32(os, "ebp", context->__ss.__ebp);
+ DumpRegister32(os, "esp", context->__ss.__esp);
+ os << '\n';
+
+ DumpRegister32(os, "eip", context->__ss.__eip);
+ os << " ";
+ DumpRegister32(os, "eflags", context->__ss.__eflags);
+ DumpX86Flags(os, context->__ss.__eflags);
+ os << '\n';
+
+ DumpRegister32(os, "cs", context->__ss.__cs);
+ DumpRegister32(os, "ds", context->__ss.__ds);
+ DumpRegister32(os, "es", context->__ss.__es);
+ DumpRegister32(os, "fs", context->__ss.__fs);
+ os << '\n';
+ DumpRegister32(os, "gs", context->__ss.__gs);
+ DumpRegister32(os, "ss", context->__ss.__ss);
+#elif defined(__linux__) && defined(__i386__)
+ DumpRegister32(os, "eax", context.gregs[REG_EAX]);
+ DumpRegister32(os, "ebx", context.gregs[REG_EBX]);
+ DumpRegister32(os, "ecx", context.gregs[REG_ECX]);
+ DumpRegister32(os, "edx", context.gregs[REG_EDX]);
+ os << '\n';
+
+ DumpRegister32(os, "edi", context.gregs[REG_EDI]);
+ DumpRegister32(os, "esi", context.gregs[REG_ESI]);
+ DumpRegister32(os, "ebp", context.gregs[REG_EBP]);
+ DumpRegister32(os, "esp", context.gregs[REG_ESP]);
+ os << '\n';
+
+ DumpRegister32(os, "eip", context.gregs[REG_EIP]);
+ os << " ";
+ DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
+ DumpX86Flags(os, context.gregs[REG_EFL]);
+ os << '\n';
+
+ DumpRegister32(os, "cs", context.gregs[REG_CS]);
+ DumpRegister32(os, "ds", context.gregs[REG_DS]);
+ DumpRegister32(os, "es", context.gregs[REG_ES]);
+ DumpRegister32(os, "fs", context.gregs[REG_FS]);
+ os << '\n';
+ DumpRegister32(os, "gs", context.gregs[REG_GS]);
+ DumpRegister32(os, "ss", context.gregs[REG_SS]);
+#elif defined(__linux__) && defined(__x86_64__)
+ DumpRegister64(os, "rax", context.gregs[REG_RAX]);
+ DumpRegister64(os, "rbx", context.gregs[REG_RBX]);
+ DumpRegister64(os, "rcx", context.gregs[REG_RCX]);
+ DumpRegister64(os, "rdx", context.gregs[REG_RDX]);
+ os << '\n';
+
+ DumpRegister64(os, "rdi", context.gregs[REG_RDI]);
+ DumpRegister64(os, "rsi", context.gregs[REG_RSI]);
+ DumpRegister64(os, "rbp", context.gregs[REG_RBP]);
+ DumpRegister64(os, "rsp", context.gregs[REG_RSP]);
+ os << '\n';
+
+ DumpRegister64(os, "r8 ", context.gregs[REG_R8]);
+ DumpRegister64(os, "r9 ", context.gregs[REG_R9]);
+ DumpRegister64(os, "r10", context.gregs[REG_R10]);
+ DumpRegister64(os, "r11", context.gregs[REG_R11]);
+ os << '\n';
+
+ DumpRegister64(os, "r12", context.gregs[REG_R12]);
+ DumpRegister64(os, "r13", context.gregs[REG_R13]);
+ DumpRegister64(os, "r14", context.gregs[REG_R14]);
+ DumpRegister64(os, "r15", context.gregs[REG_R15]);
+ os << '\n';
+
+ DumpRegister64(os, "rip", context.gregs[REG_RIP]);
+ os << " ";
+ DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
+ DumpX86Flags(os, context.gregs[REG_EFL]);
+ os << '\n';
+
+ DumpRegister32(os, "cs", (context.gregs[REG_CSGSFS]) & 0x0FFFF);
+ DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF);
+ DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF);
+ os << '\n';
+#else
+ os << "Unknown architecture/word size/OS in ucontext dump";
+#endif
+}
+
+void UContext::DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
+ os << StringPrintf(" %6s: 0x%08x", name, value);
+}
+
+void UContext::DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
+ os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
+}
+
+void UContext::DumpX86Flags(std::ostream& os, uint32_t flags) const {
+ os << " [";
+ if ((flags & (1 << 0)) != 0) {
+ os << " CF";
+ }
+ if ((flags & (1 << 2)) != 0) {
+ os << " PF";
+ }
+ if ((flags & (1 << 4)) != 0) {
+ os << " AF";
+ }
+ if ((flags & (1 << 6)) != 0) {
+ os << " ZF";
+ }
+ if ((flags & (1 << 7)) != 0) {
+ os << " SF";
+ }
+ if ((flags & (1 << 8)) != 0) {
+ os << " TF";
+ }
+ if ((flags & (1 << 9)) != 0) {
+ os << " IF";
+ }
+ if ((flags & (1 << 10)) != 0) {
+ os << " DF";
+ }
+ if ((flags & (1 << 11)) != 0) {
+ os << " OF";
+ }
+ os << " ]";
+}
+
+int GetTimeoutSignal() {
+#if defined(__APPLE__)
+ // Mac does not support realtime signals.
+ UNUSED(kUseSigRTTimeout);
+ return -1;
+#else
+ return kUseSigRTTimeout ? (SIGRTMIN + 2) : -1;
+#endif
+}
+
+static bool IsTimeoutSignal(int signal_number) {
+ return signal_number == GetTimeoutSignal();
+}
+
+#if defined(__APPLE__)
+// On macOS, clang complains about art::HandleUnexpectedSignalCommon's
+// stack frame size being too large; disable that warning locally.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+#endif
+
+void HandleUnexpectedSignalCommon(int signal_number,
+ siginfo_t* info,
+ void* raw_context,
+ bool running_on_linux) {
+ bool handle_timeout_signal = running_on_linux;
+ bool dump_on_stderr = running_on_linux;
+
+ static bool handling_unexpected_signal = false;
+ if (handling_unexpected_signal) {
+ LogHelper::LogLineLowStack(__FILE__,
+ __LINE__,
+ ::android::base::FATAL_WITHOUT_ABORT,
+ "HandleUnexpectedSignal reentered\n");
+ if (handle_timeout_signal) {
+ if (IsTimeoutSignal(signal_number)) {
+ // Ignore a recursive timeout.
+ return;
+ }
+ }
+ _exit(1);
+ }
+ handling_unexpected_signal = true;
+
+ gAborting++; // set before taking any locks
+ MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+ bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
+ signal_number == SIGFPE || signal_number == SIGSEGV);
+
+ OsInfo os_info;
+ const char* cmd_line = GetCmdLine();
+ if (cmd_line == nullptr) {
+ cmd_line = "<unset>"; // Because no-one called InitLogging.
+ }
+ pid_t tid = GetTid();
+ std::string thread_name(GetThreadName(tid));
+ UContext thread_context(raw_context);
+ Backtrace thread_backtrace(raw_context);
+
+ std::ostringstream stream;
+ stream << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
+ << StringPrintf("Fatal signal %d (%s), code %d (%s)",
+ signal_number,
+ GetSignalName(signal_number),
+ info->si_code,
+ GetSignalCodeName(signal_number, info->si_code))
+ << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << '\n'
+ << "OS: " << Dumpable<OsInfo>(os_info) << '\n'
+ << "Cmdline: " << cmd_line << '\n'
+ << "Thread: " << tid << " \"" << thread_name << "\"" << '\n'
+ << "Registers:\n" << Dumpable<UContext>(thread_context) << '\n'
+ << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << '\n';
+ if (dump_on_stderr) {
+ // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
+ // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
+ // considering this is an abort situation.
+ std::cerr << stream.str() << std::flush;
+ } else {
+ LOG(FATAL_WITHOUT_ABORT) << stream.str() << std::flush;
+ }
+ if (kIsDebugBuild && signal_number == SIGSEGV) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
+ }
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ if (handle_timeout_signal && IsTimeoutSignal(signal_number)) {
+ // Special timeout signal. Try to dump all threads.
+ // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
+ // are of value here.
+ runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
+ std::cerr << std::endl;
+ }
+
+ if (dump_on_stderr) {
+ std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
+ } else {
+ LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
+ }
+ }
+}
+
+#if defined(__APPLE__)
+#pragma GCC diagnostic pop
+#endif
+
+void InitPlatformSignalHandlersCommon(void (*newact)(int, siginfo_t*, void*),
+ struct sigaction* oldact,
+ bool handle_timeout_signal) {
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ sigemptyset(&action.sa_mask);
+ action.sa_sigaction = newact;
+ // Use the three-argument sa_sigaction handler.
+ action.sa_flags |= SA_SIGINFO;
+ // Use the alternate signal stack so we can catch stack overflows.
+ action.sa_flags |= SA_ONSTACK;
+
+ int rc = 0;
+ rc += sigaction(SIGABRT, &action, oldact);
+ rc += sigaction(SIGBUS, &action, oldact);
+ rc += sigaction(SIGFPE, &action, oldact);
+ rc += sigaction(SIGILL, &action, oldact);
+ rc += sigaction(SIGPIPE, &action, oldact);
+ rc += sigaction(SIGSEGV, &action, oldact);
+#if defined(SIGSTKFLT)
+ rc += sigaction(SIGSTKFLT, &action, oldact);
+#endif
+ rc += sigaction(SIGTRAP, &action, oldact);
+ // Special dump-all timeout.
+ if (handle_timeout_signal && GetTimeoutSignal() != -1) {
+ rc += sigaction(GetTimeoutSignal(), &action, oldact);
+ }
+ CHECK_EQ(rc, 0);
+}
+
+} // namespace art
diff --git a/runtime/runtime_common.h b/runtime/runtime_common.h
new file mode 100644
index 0000000000..832b6bbf3e
--- /dev/null
+++ b/runtime/runtime_common.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_COMMON_H_
+#define ART_RUNTIME_RUNTIME_COMMON_H_
+
+// Code shared by runtime/runtime_android.cc and runtime/runtime_linux.cc.
+
+#if defined(__APPLE__)
+// On macOS, _XOPEN_SOURCE must be defined to access ucontext
+// routines, as they are considered deprecated on that platform.
+#define _XOPEN_SOURCE
+#endif
+
+#include <sys/utsname.h>
+#include <ucontext.h>
+
+#include <iomanip>
+
+#include "base/dumpable.h"
+#include "native_stack_dump.h"
+#include "utils.h"
+
+namespace art {
+
+struct Backtrace {
+ public:
+ explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
+ void Dump(std::ostream& os) const {
+ DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
+ }
+ private:
+ // Stores the context of the signal that was unexpected and will terminate the runtime. The
+ // DumpNativeStack code will take care of casting it to the expected type. This is required
+ // as our signal handler runs on an alternate stack.
+ void* raw_context_;
+};
+
+struct OsInfo {
+ void Dump(std::ostream& os) const {
+ utsname info;
+ uname(&info);
+ // Linux 2.6.38.8-gg784 (x86_64)
+ // Darwin 11.4.0 (x86_64)
+ os << info.sysname << " " << info.release << " (" << info.machine << ")";
+ }
+};
+
+const char* GetSignalName(int signal_number);
+const char* GetSignalCodeName(int signal_number, int signal_code);
+
+// Return the signal number we recognize as timeout. -1 means not active/supported.
+int GetTimeoutSignal();
+
+void HandleUnexpectedSignalCommon(int signal_number,
+ siginfo_t* info,
+ void* raw_context,
+ bool running_on_linux);
+
+void InitPlatformSignalHandlersCommon(void (*newact)(int, siginfo_t*, void*),
+ struct sigaction* oldact,
+ bool handle_timeout_signal);
+
+} // namespace art
+
+#endif // ART_RUNTIME_RUNTIME_COMMON_H_
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index b8894d2569..ad61cf373b 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -17,359 +17,19 @@
#include "runtime.h"
#include <signal.h>
-#include <string.h>
-#include <sys/utsname.h>
-#include <inttypes.h>
#include <iostream>
-#include <sstream>
-#include "android-base/stringprintf.h"
-
-#include "base/dumpable.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "native_stack_dump.h"
-#include "thread-inl.h"
-#include "thread_list.h"
-#include "utils.h"
+#include "runtime_common.h"
namespace art {
-using android::base::StringPrintf;
-
-static constexpr bool kUseSigRTTimeout = true;
-static constexpr bool kDumpNativeStackOnTimeout = true;
-
-struct Backtrace {
- public:
- explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
- void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
- }
- private:
- // Stores the context of the signal that was unexpected and will terminate the runtime. The
- // DumpNativeStack code will take care of casting it to the expected type. This is required
- // as our signal handler runs on an alternate stack.
- void* raw_context_;
-};
-
-struct OsInfo {
- void Dump(std::ostream& os) const {
- utsname info;
- uname(&info);
- // Linux 2.6.38.8-gg784 (x86_64)
- // Darwin 11.4.0 (x86_64)
- os << info.sysname << " " << info.release << " (" << info.machine << ")";
- }
-};
-
-static const char* GetSignalName(int signal_number) {
- switch (signal_number) {
- case SIGABRT: return "SIGABRT";
- case SIGBUS: return "SIGBUS";
- case SIGFPE: return "SIGFPE";
- case SIGILL: return "SIGILL";
- case SIGPIPE: return "SIGPIPE";
- case SIGSEGV: return "SIGSEGV";
-#if defined(SIGSTKFLT)
- case SIGSTKFLT: return "SIGSTKFLT";
-#endif
- case SIGTRAP: return "SIGTRAP";
- }
- return "??";
-}
-
-static const char* GetSignalCodeName(int signal_number, int signal_code) {
- // Try the signal-specific codes...
- switch (signal_number) {
- case SIGILL:
- switch (signal_code) {
- case ILL_ILLOPC: return "ILL_ILLOPC";
- case ILL_ILLOPN: return "ILL_ILLOPN";
- case ILL_ILLADR: return "ILL_ILLADR";
- case ILL_ILLTRP: return "ILL_ILLTRP";
- case ILL_PRVOPC: return "ILL_PRVOPC";
- case ILL_PRVREG: return "ILL_PRVREG";
- case ILL_COPROC: return "ILL_COPROC";
- case ILL_BADSTK: return "ILL_BADSTK";
- }
- break;
- case SIGBUS:
- switch (signal_code) {
- case BUS_ADRALN: return "BUS_ADRALN";
- case BUS_ADRERR: return "BUS_ADRERR";
- case BUS_OBJERR: return "BUS_OBJERR";
- }
- break;
- case SIGFPE:
- switch (signal_code) {
- case FPE_INTDIV: return "FPE_INTDIV";
- case FPE_INTOVF: return "FPE_INTOVF";
- case FPE_FLTDIV: return "FPE_FLTDIV";
- case FPE_FLTOVF: return "FPE_FLTOVF";
- case FPE_FLTUND: return "FPE_FLTUND";
- case FPE_FLTRES: return "FPE_FLTRES";
- case FPE_FLTINV: return "FPE_FLTINV";
- case FPE_FLTSUB: return "FPE_FLTSUB";
- }
- break;
- case SIGSEGV:
- switch (signal_code) {
- case SEGV_MAPERR: return "SEGV_MAPERR";
- case SEGV_ACCERR: return "SEGV_ACCERR";
-#if defined(SEGV_BNDERR)
- case SEGV_BNDERR: return "SEGV_BNDERR";
-#endif
- }
- break;
- case SIGTRAP:
- switch (signal_code) {
- case TRAP_BRKPT: return "TRAP_BRKPT";
- case TRAP_TRACE: return "TRAP_TRACE";
- }
- break;
- }
- // Then the other codes...
- switch (signal_code) {
- case SI_USER: return "SI_USER";
-#if defined(SI_KERNEL)
- case SI_KERNEL: return "SI_KERNEL";
-#endif
- case SI_QUEUE: return "SI_QUEUE";
- case SI_TIMER: return "SI_TIMER";
- case SI_MESGQ: return "SI_MESGQ";
- case SI_ASYNCIO: return "SI_ASYNCIO";
-#if defined(SI_SIGIO)
- case SI_SIGIO: return "SI_SIGIO";
-#endif
-#if defined(SI_TKILL)
- case SI_TKILL: return "SI_TKILL";
-#endif
- }
- // Then give up...
- return "?";
-}
-
-struct UContext {
- explicit UContext(void* raw_context) :
- context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {
- }
-
- void Dump(std::ostream& os) const {
- // TODO: support non-x86 hosts (not urgent because this code doesn't run on targets).
-#if defined(__APPLE__) && defined(__i386__)
- DumpRegister32(os, "eax", context->__ss.__eax);
- DumpRegister32(os, "ebx", context->__ss.__ebx);
- DumpRegister32(os, "ecx", context->__ss.__ecx);
- DumpRegister32(os, "edx", context->__ss.__edx);
- os << '\n';
-
- DumpRegister32(os, "edi", context->__ss.__edi);
- DumpRegister32(os, "esi", context->__ss.__esi);
- DumpRegister32(os, "ebp", context->__ss.__ebp);
- DumpRegister32(os, "esp", context->__ss.__esp);
- os << '\n';
-
- DumpRegister32(os, "eip", context->__ss.__eip);
- os << " ";
- DumpRegister32(os, "eflags", context->__ss.__eflags);
- DumpX86Flags(os, context->__ss.__eflags);
- os << '\n';
-
- DumpRegister32(os, "cs", context->__ss.__cs);
- DumpRegister32(os, "ds", context->__ss.__ds);
- DumpRegister32(os, "es", context->__ss.__es);
- DumpRegister32(os, "fs", context->__ss.__fs);
- os << '\n';
- DumpRegister32(os, "gs", context->__ss.__gs);
- DumpRegister32(os, "ss", context->__ss.__ss);
-#elif defined(__linux__) && defined(__i386__)
- DumpRegister32(os, "eax", context.gregs[REG_EAX]);
- DumpRegister32(os, "ebx", context.gregs[REG_EBX]);
- DumpRegister32(os, "ecx", context.gregs[REG_ECX]);
- DumpRegister32(os, "edx", context.gregs[REG_EDX]);
- os << '\n';
-
- DumpRegister32(os, "edi", context.gregs[REG_EDI]);
- DumpRegister32(os, "esi", context.gregs[REG_ESI]);
- DumpRegister32(os, "ebp", context.gregs[REG_EBP]);
- DumpRegister32(os, "esp", context.gregs[REG_ESP]);
- os << '\n';
-
- DumpRegister32(os, "eip", context.gregs[REG_EIP]);
- os << " ";
- DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
- DumpX86Flags(os, context.gregs[REG_EFL]);
- os << '\n';
-
- DumpRegister32(os, "cs", context.gregs[REG_CS]);
- DumpRegister32(os, "ds", context.gregs[REG_DS]);
- DumpRegister32(os, "es", context.gregs[REG_ES]);
- DumpRegister32(os, "fs", context.gregs[REG_FS]);
- os << '\n';
- DumpRegister32(os, "gs", context.gregs[REG_GS]);
- DumpRegister32(os, "ss", context.gregs[REG_SS]);
-#elif defined(__linux__) && defined(__x86_64__)
- DumpRegister64(os, "rax", context.gregs[REG_RAX]);
- DumpRegister64(os, "rbx", context.gregs[REG_RBX]);
- DumpRegister64(os, "rcx", context.gregs[REG_RCX]);
- DumpRegister64(os, "rdx", context.gregs[REG_RDX]);
- os << '\n';
-
- DumpRegister64(os, "rdi", context.gregs[REG_RDI]);
- DumpRegister64(os, "rsi", context.gregs[REG_RSI]);
- DumpRegister64(os, "rbp", context.gregs[REG_RBP]);
- DumpRegister64(os, "rsp", context.gregs[REG_RSP]);
- os << '\n';
-
- DumpRegister64(os, "r8 ", context.gregs[REG_R8]);
- DumpRegister64(os, "r9 ", context.gregs[REG_R9]);
- DumpRegister64(os, "r10", context.gregs[REG_R10]);
- DumpRegister64(os, "r11", context.gregs[REG_R11]);
- os << '\n';
-
- DumpRegister64(os, "r12", context.gregs[REG_R12]);
- DumpRegister64(os, "r13", context.gregs[REG_R13]);
- DumpRegister64(os, "r14", context.gregs[REG_R14]);
- DumpRegister64(os, "r15", context.gregs[REG_R15]);
- os << '\n';
-
- DumpRegister64(os, "rip", context.gregs[REG_RIP]);
- os << " ";
- DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
- DumpX86Flags(os, context.gregs[REG_EFL]);
- os << '\n';
-
- DumpRegister32(os, "cs", (context.gregs[REG_CSGSFS]) & 0x0FFFF);
- DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF);
- DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF);
- os << '\n';
-#else
- os << "Unknown architecture/word size/OS in ucontext dump";
-#endif
- }
-
- void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
- os << StringPrintf(" %6s: 0x%08x", name, value);
- }
-
- void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
- os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
- }
+void HandleUnexpectedSignalLinux(int signal_number, siginfo_t* info, void* raw_context) {
+ HandleUnexpectedSignalCommon(signal_number, info, raw_context, /* running_on_linux */ true);
- void DumpX86Flags(std::ostream& os, uint32_t flags) const {
- os << " [";
- if ((flags & (1 << 0)) != 0) {
- os << " CF";
- }
- if ((flags & (1 << 2)) != 0) {
- os << " PF";
- }
- if ((flags & (1 << 4)) != 0) {
- os << " AF";
- }
- if ((flags & (1 << 6)) != 0) {
- os << " ZF";
- }
- if ((flags & (1 << 7)) != 0) {
- os << " SF";
- }
- if ((flags & (1 << 8)) != 0) {
- os << " TF";
- }
- if ((flags & (1 << 9)) != 0) {
- os << " IF";
- }
- if ((flags & (1 << 10)) != 0) {
- os << " DF";
- }
- if ((flags & (1 << 11)) != 0) {
- os << " OF";
- }
- os << " ]";
- }
-
- mcontext_t& context;
-};
-
-// Return the signal number we recognize as timeout. -1 means not active/supported.
-static int GetTimeoutSignal() {
-#if defined(__APPLE__)
- // Mac does not support realtime signals.
- UNUSED(kUseSigRTTimeout);
- return -1;
-#else
- return kUseSigRTTimeout ? (SIGRTMIN + 2) : -1;
-#endif
-}
-
-static bool IsTimeoutSignal(int signal_number) {
- return signal_number == GetTimeoutSignal();
-}
-
-void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handlingUnexpectedSignal = false;
- if (handlingUnexpectedSignal) {
- LogHelper::LogLineLowStack(__FILE__,
- __LINE__,
- ::android::base::FATAL_WITHOUT_ABORT,
- "HandleUnexpectedSignal reentered\n");
- if (IsTimeoutSignal(signal_number)) {
- // Ignore a recursive timeout.
- return;
- }
- _exit(1);
- }
- handlingUnexpectedSignal = true;
-
- gAborting++; // set before taking any locks
- MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
-
- bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
- signal_number == SIGFPE || signal_number == SIGSEGV);
-
- OsInfo os_info;
- const char* cmd_line = GetCmdLine();
- if (cmd_line == nullptr) {
- cmd_line = "<unset>"; // Because no-one called InitLogging.
- }
- pid_t tid = GetTid();
- std::string thread_name(GetThreadName(tid));
- UContext thread_context(raw_context);
- Backtrace thread_backtrace(raw_context);
-
- // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
- // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
- // considering this is an abort situation.
-
- std::cerr << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
- << StringPrintf("Fatal signal %d (%s), code %d (%s)",
- signal_number, GetSignalName(signal_number),
- info->si_code,
- GetSignalCodeName(signal_number, info->si_code))
- << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << std::endl
- << "OS: " << Dumpable<OsInfo>(os_info) << std::endl
- << "Cmdline: " << cmd_line << std::endl
- << "Thread: " << tid << " \"" << thread_name << "\"" << std::endl
- << "Registers:\n" << Dumpable<UContext>(thread_context) << std::endl
- << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << std::endl;
- if (kIsDebugBuild && signal_number == SIGSEGV) {
- PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- }
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- if (IsTimeoutSignal(signal_number)) {
- // Special timeout signal. Try to dump all threads.
- // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
- // are of value here.
- runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
- std::cerr << std::endl;
- }
- std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
- }
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
+ pid_t tid = GetTid();
+ std::string thread_name(GetThreadName(tid));
std::cerr << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name
<< "\""
@@ -398,31 +58,9 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
void Runtime::InitPlatformSignalHandlers() {
// On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
- struct sigaction action;
- memset(&action, 0, sizeof(action));
- sigemptyset(&action.sa_mask);
- action.sa_sigaction = HandleUnexpectedSignal;
- // Use the three-argument sa_sigaction handler.
- action.sa_flags |= SA_SIGINFO;
- // Use the alternate signal stack so we can catch stack overflows.
- action.sa_flags |= SA_ONSTACK;
-
- int rc = 0;
- rc += sigaction(SIGABRT, &action, nullptr);
- rc += sigaction(SIGBUS, &action, nullptr);
- rc += sigaction(SIGFPE, &action, nullptr);
- rc += sigaction(SIGILL, &action, nullptr);
- rc += sigaction(SIGPIPE, &action, nullptr);
- rc += sigaction(SIGSEGV, &action, nullptr);
-#if defined(SIGSTKFLT)
- rc += sigaction(SIGSTKFLT, &action, nullptr);
-#endif
- rc += sigaction(SIGTRAP, &action, nullptr);
- // Special dump-all timeout.
- if (GetTimeoutSignal() != -1) {
- rc += sigaction(GetTimeoutSignal(), &action, nullptr);
- }
- CHECK_EQ(rc, 0);
+ InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
+ nullptr,
+ /* handle_timeout_signal */ true);
}
} // namespace art
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index ad748b04d3..e68a1b2681 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -123,7 +123,6 @@ RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::k
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library>
-RUNTIME_OPTIONS_KEY (Unit, FullyDeoptable) // -Xfully-deoptable
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 52d488fbae..eea68aa74e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2854,13 +2854,16 @@ void Thread::QuickDeliverException() {
if (Dbg::IsForcedInterpreterNeededForException(this)) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
- if (Runtime::Current()->IsDeoptimizeable(visitor.caller_pc)) {
+ if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
// Save the exception into the deoptimization context so it can be restored
// before entering the interpreter.
PushDeoptimizationContext(
JValue(), /*is_reference */ false, /* from_code */ false, exception);
artDeoptimize(this);
UNREACHABLE();
+ } else {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << visitor.caller->PrettyMethod();
}
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 01c940e9df..df8acc37a2 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -455,7 +455,6 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
Closure* flip_callback,
gc::collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
- const uint64_t start_time = NanoTime();
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
@@ -464,13 +463,17 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
+ // ThreadFlipBegin happens before we suspend all the threads, so it does not count towards the
+ // pause.
+ const uint64_t suspend_start_time = NanoTime();
SuspendAllInternal(self, self, nullptr);
// Run the flip callback for the collector.
Locks::mutator_lock_->ExclusiveLock(self);
+ suspend_all_historam_.AdjustAndAddValue(NanoTime() - suspend_start_time);
flip_callback->Run(self);
Locks::mutator_lock_->ExclusiveUnlock(self);
- collector->RegisterPause(NanoTime() - start_time);
+ collector->RegisterPause(NanoTime() - suspend_start_time);
// Resume runnable threads.
size_t runnable_thread_count = 0;
@@ -629,8 +632,9 @@ void ThreadList::SuspendAllInternal(Thread* self,
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
// Update global suspend all state for attaching threads.
++suspend_all_count_;
- if (debug_suspend)
+ if (debug_suspend) {
++debug_suspend_all_count_;
+ }
pending_threads.StoreRelaxed(list_.size() - num_ignored);
// Increment everybody's suspend count (except those that should be ignored).
for (const auto& thread : list_) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index d9179c3892..d24a5e5c4a 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -88,7 +88,10 @@ void ThreadPoolWorker::Run() {
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(),
+ true,
+ nullptr,
+ worker->thread_pool_->create_peers_));
worker->thread_ = Thread::Current();
// Thread pool workers cannot call into java.
worker->thread_->SetCanCallIntoJava(false);
@@ -112,7 +115,7 @@ void ThreadPool::RemoveAllTasks(Thread* self) {
tasks_.clear();
}
-ThreadPool::ThreadPool(const char* name, size_t num_threads)
+ThreadPool::ThreadPool(const char* name, size_t num_threads, bool create_peers)
: name_(name),
task_queue_lock_("task queue lock"),
task_queue_condition_("task queue condition", task_queue_lock_),
@@ -124,7 +127,8 @@ ThreadPool::ThreadPool(const char* name, size_t num_threads)
total_wait_time_(0),
// Add one since the caller of constructor waits on the barrier too.
creation_barier_(num_threads + 1),
- max_active_workers_(num_threads) {
+ max_active_workers_(num_threads),
+ create_peers_(create_peers) {
Thread* self = Thread::Current();
while (GetThreadCount() < num_threads) {
const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
@@ -217,6 +221,7 @@ Task* ThreadPool::TryGetTaskLocked() {
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
+ CHECK(!create_peers_);
Task* task = nullptr;
while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 7ecfcd1289..a465e11055 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -105,11 +105,17 @@ class ThreadPool {
// Remove all tasks in the queue.
void RemoveAllTasks(Thread* self) REQUIRES(!task_queue_lock_);
- ThreadPool(const char* name, size_t num_threads);
+ // Create a named thread pool with the given number of threads.
+ //
+ // If create_peers is true, all worker threads will have a Java peer object. Note that if the
+ // pool is asked to do work on the current thread (see Wait), a peer may not be available. Wait
+ // will conservatively abort if create_peers and do_work are true.
+ ThreadPool(const char* name, size_t num_threads, bool create_peers = false);
virtual ~ThreadPool();
// Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
// wait till all already running tasks are done.
+ // When the pool was created with peers for workers, do_work must not be true (see ThreadPool()).
void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_);
size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_);
@@ -159,6 +165,7 @@ class ThreadPool {
uint64_t total_wait_time_;
Barrier creation_barier_;
size_t max_active_workers_ GUARDED_BY(task_queue_lock_);
+ const bool create_peers_;
private:
friend class ThreadPoolWorker;
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 14c2c3bac8..28aa21f7a2 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -20,6 +20,7 @@
#include "atomic.h"
#include "common_runtime_test.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
namespace art {
@@ -159,4 +160,55 @@ TEST_F(ThreadPoolTest, RecursiveTest) {
EXPECT_EQ((1 << depth) - 1, count.LoadSequentiallyConsistent());
}
+class PeerTask : public Task {
+ public:
+ PeerTask() {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ CHECK(self->GetPeer() != nullptr);
+ }
+
+ void Finalize() {
+ delete this;
+ }
+};
+
+class NoPeerTask : public Task {
+ public:
+ NoPeerTask() {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ CHECK(self->GetPeer() == nullptr);
+ }
+
+ void Finalize() {
+ delete this;
+ }
+};
+
+// Tests for create_peer functionality.
+TEST_F(ThreadPoolTest, PeerTest) {
+ Thread* self = Thread::Current();
+ {
+ ThreadPool thread_pool("Thread pool test thread pool", 1);
+ thread_pool.AddTask(self, new NoPeerTask());
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, false, false);
+ }
+
+ {
+ // To create peers, the runtime needs to be started.
+ self->TransitionFromSuspendedToRunnable();
+ bool started = runtime_->Start();
+ ASSERT_TRUE(started);
+
+ ThreadPool thread_pool("Thread pool test thread pool", 1, true);
+ thread_pool.AddTask(self, new PeerTask());
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, false, false);
+ }
+}
+
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 2add955f8e..3a9975a4e2 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -905,6 +905,9 @@ void Trace::FlushBuf() {
void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
+ // Ensure we always use the non-obsolete version of the method so that entry/exit events have the
+ // same pointer value.
+ method = method->GetNonObsoleteMethod();
// Advance cur_offset_ atomically.
int32_t new_offset;
int32_t old_offset = 0;
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index bd1b044dae..2812c21004 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -48,9 +48,11 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, cons
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
}
-inline constexpr size_t DexCacheArraysLayout::Alignment() {
- // GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
- static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
+constexpr size_t DexCacheArraysLayout::Alignment() {
+ // mirror::Type/String/MethodTypeDexCacheType alignment is 8,
+ // i.e. higher than or equal to the pointer alignment.
+ static_assert(alignof(mirror::TypeDexCacheType) == 8,
+ "Expecting alignof(ClassDexCacheType) == 8");
static_assert(alignof(mirror::StringDexCacheType) == 8,
"Expecting alignof(StringDexCacheType) == 8");
static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
@@ -60,17 +62,22 @@ inline constexpr size_t DexCacheArraysLayout::Alignment() {
}
template <typename T>
-static constexpr PointerSize GcRootAsPointerSize() {
+constexpr PointerSize GcRootAsPointerSize() {
static_assert(sizeof(GcRoot<T>) == 4U, "Unexpected GcRoot size");
return PointerSize::k32;
}
inline size_t DexCacheArraysLayout::TypeOffset(dex::TypeIndex type_idx) const {
- return types_offset_ + ElementOffset(GcRootAsPointerSize<mirror::Class>(), type_idx.index_);
+ return types_offset_ + ElementOffset(PointerSize::k64,
+ type_idx.index_ % mirror::DexCache::kDexCacheTypeCacheSize);
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements);
+ size_t cache_size = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (num_elements < cache_size) {
+ cache_size = num_elements;
+ }
+ return ArraySize(PointerSize::k64, cache_size);
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b915457557..ba429d8c3e 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2399,7 +2399,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
// If this is a primitive type, fail HARD.
- mirror::Class* klass = dex_cache_->GetResolvedType(type_idx);
+ ObjPtr<mirror::Class> klass =
+ ClassLinker::LookupResolvedType(type_idx, dex_cache_.Get(), class_loader_.Get());
if (klass != nullptr && klass->IsPrimitive()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
<< dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
@@ -3684,9 +3685,10 @@ inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
}
const RegType& MethodVerifier::ResolveClassAndCheckAccess(dex::TypeIndex class_idx) {
- mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
+ mirror::Class* klass =
+ ClassLinker::LookupResolvedType(class_idx, dex_cache_.Get(), class_loader_.Get()).Ptr();
const RegType* result = nullptr;
- if (klass != nullptr) {
+ if (klass != nullptr && !klass->IsErroneous()) {
bool precise = klass->CannotBeAssignedFromOtherTypes();
if (precise && !IsInstantiableOrPrimitive(klass)) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 06f193af32..fad8a9f100 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -730,16 +730,19 @@ public class Main {
Math.rint(+2.1);
Assert.assertEquals(Math.rint(+0.0), +0.0d, 0.0);
Assert.assertEquals(Math.rint(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.rint(+0.5), +0.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(+2.0), +2.0d, 0.0);
Assert.assertEquals(Math.rint(+2.1), +2.0d, 0.0);
- Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(+2.9), +3.0d, 0.0);
Assert.assertEquals(Math.rint(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.rint(+3.5), +4.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(-2.0), -2.0d, 0.0);
Assert.assertEquals(Math.rint(-2.1), -2.0d, 0.0);
- Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.rint(-3.5), -4.0d, 0.0); // expects tie-to-even
// 2^52 - 1.5
Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x432FFFFFFFFFFFFDl)),
Double.longBitsToDouble(0x432FFFFFFFFFFFFCl), 0.0);
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index d3a033b12a..6cea673b41 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -47,7 +47,7 @@ class TestVisitor : public StackVisitor {
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
GetCurrentOatQuickMethodHeader()->IsOptimized() &&
- !Runtime::Current()->IsDebuggable()) {
+ !Runtime::Current()->IsJavaDebuggable()) {
CHECK_EQ(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value), false);
} else {
CHECK(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value));
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index db437686f0..bf0cbe66c1 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -52,7 +52,6 @@ public class Main {
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK-START-MIPS: int Main.testSimple(int) sharpening (after)
- /// CHECK-NOT: MipsDexCacheArraysBase
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK-START-MIPS64: int Main.testSimple(int) sharpening (after)
@@ -69,10 +68,6 @@ public class Main {
/// CHECK: ArmDexCacheArraysBase
/// CHECK-NOT: ArmDexCacheArraysBase
- /// CHECK-START-MIPS: int Main.testSimple(int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NOT: MipsDexCacheArraysBase
-
/// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
/// CHECK-NOT: X86ComputeBaseMethodAddress
@@ -95,7 +90,6 @@ public class Main {
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK-START-MIPS: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK-NOT: MipsDexCacheArraysBase
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
@@ -120,14 +114,6 @@ public class Main {
/// CHECK: ArmDexCacheArraysBase
/// CHECK-NEXT: If
- /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NEXT: If
-
/// CHECK-START-X86: int Main.testDiamond(boolean, int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
/// CHECK-NOT: X86ComputeBaseMethodAddress
@@ -182,24 +168,6 @@ public class Main {
/// CHECK: begin_block
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (before)
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
- /// CHECK: InvokeStaticOrDirect
- /// CHECK-NOT: InvokeStaticOrDirect
-
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
- /// CHECK: ArrayLength
- /// CHECK-NEXT: MipsDexCacheArraysBase
- /// CHECK-NEXT: Goto
- /// CHECK: begin_block
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
-
public static int testLoop(int[] array, int x) {
// PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop.
for (int i : array) {
@@ -228,16 +196,6 @@ public class Main {
/// CHECK-NEXT: ArmDexCacheArraysBase
/// CHECK-NEXT: Goto
- /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_mips (before)
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_mips (after)
- /// CHECK: If
- /// CHECK: begin_block
- /// CHECK: ArrayLength
- /// CHECK-NEXT: MipsDexCacheArraysBase
- /// CHECK-NEXT: Goto
-
public static int testLoopWithDiamond(int[] array, boolean negate, int x) {
// PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop
// but not outside the if.
@@ -381,20 +339,12 @@ public class Main {
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
- /// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_arm (after)
- /// CHECK-DAG: ArmDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
-
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
- /// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_mips (after)
- /// CHECK-DAG: MipsDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
-
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 7cc0b8b652..7509d9b4f3 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -154,8 +154,8 @@ public class Main {
/// CHECK-NOT: Phi
//
/// CHECK-START: int Main.polynomialInt() instruction_simplifier$after_bce (after)
- /// CHECK-DAG: <<Int:i\d+>> IntConstant -45 loop:none
- /// CHECK-DAG: Return [<<Int>>] loop:none
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant -45 loop:none
+ /// CHECK-DAG: Return [<<Int>>] loop:none
static int polynomialInt() {
int x = 0;
for (int i = 0; i < 10; i++) {
@@ -164,6 +164,81 @@ public class Main {
return x;
}
+ // Regression test for b/34779592 (found with fuzz testing): overflow for last value
+ // of division truncates to zero, for multiplication it simply truncates.
+ //
+ /// CHECK-START: int Main.geoIntDivLastValue(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.geoIntDivLastValue(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: int Main.geoIntDivLastValue(int) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Return [<<Int>>] loop:none
+ static int geoIntDivLastValue(int x) {
+ for (int i = 0; i < 2; i++) {
+ x /= 1081788608;
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.geoIntMulLastValue(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.geoIntMulLastValue(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: int Main.geoIntMulLastValue(int) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant -194211840 loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Par>>,<<Int>>] loop:none
+ /// CHECK-DAG: Return [<<Mul>>] loop:none
+ static int geoIntMulLastValue(int x) {
+ for (int i = 0; i < 2; i++) {
+ x *= 1081788608;
+ }
+ return x;
+ }
+
+ /// CHECK-START: long Main.geoLongDivLastValue(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue(long) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue(long) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: Return [<<Long>>] loop:none
+ static long geoLongDivLastValue(long x) {
+ for (int i = 0; i < 10; i++) {
+ x /= 1081788608;
+ }
+ return x;
+ }
+
+ /// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: long Main.geoLongMulLastValue(long) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Par:j\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Long:j\d+>> LongConstant -8070450532247928832 loop:none
+ /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Par>>,<<Long>>] loop:none
+ /// CHECK-DAG: Return [<<Mul>>] loop:none
+ static long geoLongMulLastValue(long x) {
+ for (int i = 0; i < 10; i++) {
+ x *= 1081788608;
+ }
+ return x;
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -185,6 +260,42 @@ public class Main {
expectEquals(-45, polynomialIntFromLong());
expectEquals(-45, polynomialInt());
+ expectEquals(0, geoIntDivLastValue(0));
+ expectEquals(0, geoIntDivLastValue(1));
+ expectEquals(0, geoIntDivLastValue(2));
+ expectEquals(0, geoIntDivLastValue(1081788608));
+ expectEquals(0, geoIntDivLastValue(-1081788608));
+ expectEquals(0, geoIntDivLastValue(2147483647));
+ expectEquals(0, geoIntDivLastValue(-2147483648));
+
+ expectEquals( 0, geoIntMulLastValue(0));
+ expectEquals( -194211840, geoIntMulLastValue(1));
+ expectEquals( -388423680, geoIntMulLastValue(2));
+ expectEquals(-1041498112, geoIntMulLastValue(1081788608));
+ expectEquals( 1041498112, geoIntMulLastValue(-1081788608));
+ expectEquals( 194211840, geoIntMulLastValue(2147483647));
+ expectEquals( 0, geoIntMulLastValue(-2147483648));
+
+ expectEquals(0L, geoLongDivLastValue(0L));
+ expectEquals(0L, geoLongDivLastValue(1L));
+ expectEquals(0L, geoLongDivLastValue(2L));
+ expectEquals(0L, geoLongDivLastValue(1081788608L));
+ expectEquals(0L, geoLongDivLastValue(-1081788608L));
+ expectEquals(0L, geoLongDivLastValue(2147483647L));
+ expectEquals(0L, geoLongDivLastValue(-2147483648L));
+ expectEquals(0L, geoLongDivLastValue(9223372036854775807L));
+ expectEquals(0L, geoLongDivLastValue(-9223372036854775808L));
+
+ expectEquals( 0L, geoLongMulLastValue(0L));
+ expectEquals(-8070450532247928832L, geoLongMulLastValue(1L));
+ expectEquals( 2305843009213693952L, geoLongMulLastValue(2L));
+ expectEquals( 0L, geoLongMulLastValue(1081788608L));
+ expectEquals( 0L, geoLongMulLastValue(-1081788608L));
+ expectEquals( 8070450532247928832L, geoLongMulLastValue(2147483647L));
+ expectEquals( 0L, geoLongMulLastValue(-2147483648L));
+ expectEquals( 8070450532247928832L, geoLongMulLastValue(9223372036854775807L));
+ expectEquals( 0L, geoLongMulLastValue(-9223372036854775808L));
+
System.out.println("passed");
}
@@ -193,4 +304,10 @@ public class Main {
throw new Error("Expected: " + expected + ", found: " + result);
}
}
+
+ private static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
}
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index b035896166..6d4b645db6 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -24,7 +24,8 @@ extern "C" JNIEXPORT void JNICALL Java_Main_nativeClearResolvedTypes(JNIEnv*, jc
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = soa.Decode<mirror::Class>(cls)->GetDexCache();
for (size_t i = 0, num_types = dex_cache->NumResolvedTypes(); i != num_types; ++i) {
- dex_cache->SetResolvedType(dex::TypeIndex(i), ObjPtr<mirror::Class>(nullptr));
+ mirror::TypeDexCachePair cleared(nullptr, mirror::TypeDexCachePair::InvalidIndexForSlot(i));
+ dex_cache->GetResolvedTypes()[i].store(cleared, std::memory_order_relaxed);
}
}
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index 59801ff648..8f96ee63ef 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -38,43 +38,32 @@ static void JNICALL GarbageCollectionStart(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
}
extern "C" JNIEXPORT void JNICALL Java_Main_setupGcCallback(
- JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
callbacks.GarbageCollectionFinish = GarbageCollectionFinish;
callbacks.GarbageCollectionStart = GarbageCollectionStart;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error setting callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
+ JvmtiErrorToException(env, ret);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env ATTRIBUTE_UNUSED,
+extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jboolean enable) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_GARBAGE_COLLECTION_START,
nullptr);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error enabling/disabling gc callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
}
ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
nullptr);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error enabling/disabling gc callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
}
}
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
index 0664592cd1..4a2eb34a1d 100755
--- a/test/909-attach-agent/run
+++ b/test/909-attach-agent/run
@@ -22,10 +22,12 @@ if [[ "$@" == *"-O"* ]]; then
fi
./default-run "$@" --android-runtime-option -Xplugin:${plugin} \
- --android-runtime-option -Xfully-deoptable \
+ --android-runtime-option -Xcompiler-option \
+ --android-runtime-option --debuggable \
--args agent:${agent}=909-attach-agent
-./default-run "$@" --android-runtime-option -Xfully-deoptable \
+./default-run "$@" --android-runtime-option -Xcompiler-option \
+ --android-runtime-option --debuggable \
--args agent:${agent}=909-attach-agent
./default-run "$@" --args agent:${agent}=909-attach-agent
diff --git a/test/909-attach-agent/src/Main.java b/test/909-attach-agent/src/Main.java
index 8a8a087458..569b89ad7d 100644
--- a/test/909-attach-agent/src/Main.java
+++ b/test/909-attach-agent/src/Main.java
@@ -19,7 +19,7 @@ import java.io.IOException;
public class Main {
public static void main(String[] args) {
- System.out.println("Hello, world!");
+ System.err.println("Hello, world!");
for(String a : args) {
if(a.startsWith("agent:")) {
String agent = a.substring(6);
@@ -30,6 +30,6 @@ public class Main {
}
}
}
- System.out.println("Goodbye!");
+ System.err.println("Goodbye!");
}
}
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
index 1b03200ba5..2b3296f1f2 100644
--- a/test/916-obsolete-jit/src/Main.java
+++ b/test/916-obsolete-jit/src/Main.java
@@ -116,37 +116,27 @@ public class Main {
doTest(new Transform(), new TestWatcher());
}
- // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
- // the JIT and/or (2) inability to deoptimize frames near runtime functions.
- // TODO Fix one/both of these issues.
- public static void doCall(Runnable r) {
- r.run();
- }
-
private static boolean interpreting = true;
private static boolean retry = false;
public static void doTest(Transform t, TestWatcher w) {
// Get the methods that need to be optimized.
Method say_hi_method;
- Method do_call_method;
// Figure out if we can even JIT at all.
final boolean has_jit = hasJit();
try {
say_hi_method = Transform.class.getDeclaredMethod(
"sayHi", Runnable.class, Consumer.class);
- do_call_method = Main.class.getDeclaredMethod("doCall", Runnable.class);
} catch (Exception e) {
System.out.println("Unable to find methods!");
e.printStackTrace();
return;
}
// Makes sure the stack is the way we want it for the test and does the redefinition. It will
- // set the retry boolean to true if we need to go around again due to a bad stack.
+ // set the retry boolean to true if the stack does not have a JIT-compiled sayHi entry. This can
+ // only happen if the method gets GC'd.
Runnable do_redefinition = () -> {
- if (has_jit &&
- (Main.isInterpretedFunction(say_hi_method, true) ||
- Main.isInterpretedFunction(do_call_method, false))) {
+ if (has_jit && Main.isInterpretedFunction(say_hi_method, true)) {
// Try again. We are not running the right jitted methods/cannot redefine them now.
retry = true;
} else {
@@ -161,7 +151,6 @@ public class Main {
do {
// Run ensureJitCompiled here since it might get GCd
ensureJitCompiled(Transform.class, "sayHi");
- ensureJitCompiled(Main.class, "doCall");
// Clear output.
w.clear();
// Try and redefine.
diff --git a/test/916-obsolete-jit/src/Transform.java b/test/916-obsolete-jit/src/Transform.java
index f4dcf09dc6..9c9adbc22d 100644
--- a/test/916-obsolete-jit/src/Transform.java
+++ b/test/916-obsolete-jit/src/Transform.java
@@ -29,13 +29,7 @@ class Transform {
reporter.accept("Pre Start private method call");
Start(reporter);
reporter.accept("Post Start private method call");
- // TODO Revisit with b/33616143
- // TODO Uncomment this once either b/33630159 or b/33616143 are resolved.
- // r.run();
- // TODO This doCall function is a very temporary fix until we get either deoptimization near
- // runtime frames working, forcing current method to be always read from the stack or both
- // working.
- Main.doCall(r);
+ r.run();
reporter.accept("Pre Finish private method call");
Finish(reporter);
reporter.accept("Post Finish private method call");
diff --git a/test/919-obsolete-fields/src/Main.java b/test/919-obsolete-fields/src/Main.java
index 1d893f125a..ffb9897236 100644
--- a/test/919-obsolete-fields/src/Main.java
+++ b/test/919-obsolete-fields/src/Main.java
@@ -120,13 +120,6 @@ public class Main {
doTest(new Transform(w), w);
}
- // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
- // the JIT and/or (2) inability to deoptimize frames near runtime functions.
- // TODO Fix one/both of these issues.
- public static void doCall(Runnable r) {
- r.run();
- }
-
private static boolean interpreting = true;
private static boolean retry = false;
diff --git a/test/919-obsolete-fields/src/Transform.java b/test/919-obsolete-fields/src/Transform.java
index abd1d19b66..c8e3cbd934 100644
--- a/test/919-obsolete-fields/src/Transform.java
+++ b/test/919-obsolete-fields/src/Transform.java
@@ -34,12 +34,7 @@ class Transform {
reporter.accept("Pre Start private method call");
Start();
reporter.accept("Post Start private method call");
- // TODO Revist with b/33616143
- // TODO Uncomment this
- // r.run();
- // TODO This is a very temporary fix until we get either deoptimization near runtime frames
- // working, forcing current method to be always read from the stack or both working.
- Main.doCall(r);
+ r.run();
reporter.accept("Pre Finish private method call");
Finish();
reporter.accept("Post Finish private method call");
diff --git a/test/938-load-transform-bcp/build b/test/938-load-transform-bcp/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/938-load-transform-bcp/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/938-load-transform-bcp/expected.txt b/test/938-load-transform-bcp/expected.txt
new file mode 100644
index 0000000000..16c3f8f726
--- /dev/null
+++ b/test/938-load-transform-bcp/expected.txt
@@ -0,0 +1,2 @@
+ol.foo() -> 'This is foo for val=123'
+ol.toString() -> 'This is toString() for val=123'
diff --git a/test/938-load-transform-bcp/info.txt b/test/938-load-transform-bcp/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/938-load-transform-bcp/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/938-load-transform-bcp/run b/test/938-load-transform-bcp/run
new file mode 100755
index 0000000000..adb1a1c507
--- /dev/null
+++ b/test/938-load-transform-bcp/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --no-app-image
diff --git a/test/938-load-transform-bcp/src-ex/TestMain.java b/test/938-load-transform-bcp/src-ex/TestMain.java
new file mode 100644
index 0000000000..3757a0f778
--- /dev/null
+++ b/test/938-load-transform-bcp/src-ex/TestMain.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.OptionalLong;
+public class TestMain {
+ public static void runTest() {
+ // This should be our redefined OptionalLong.
+ OptionalLong ol = OptionalLong.of(123);
+ try {
+ // OptionalLong is a class that is unlikely to be used by the time this test starts.
+ Method foo = OptionalLong.class.getMethod("foo");
+ System.out.println("ol.foo() -> '" + (String)foo.invoke(ol) + "'");
+ System.out.println("ol.toString() -> '" + ol.toString() + "'");
+ } catch (Exception e) {
+ System.out.println(
+ "Exception occured (did something load OptionalLong before this test method!: "
+ + e.toString());
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/test/938-load-transform-bcp/src/Main.java b/test/938-load-transform-bcp/src/Main.java
new file mode 100644
index 0000000000..13bc5da461
--- /dev/null
+++ b/test/938-load-transform-bcp/src/Main.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+import java.util.Base64;
+
+class Main {
+ public static String TEST_NAME = "938-load-transform-bcp";
+
+ /**
+ * base64 encoded class/dex file for
+ *
+ * // Yes this version of OptionalLong is not compatible with the real one but since it isn't used
+ * // for anything in the runtime initialization it should be fine.
+ *
+ * package java.util;
+ * public final class OptionalLong {
+ * private long val;
+ *
+ * private OptionalLong(long abc) {
+ * this.val = abc;
+ * }
+ *
+ * public static OptionalLong of(long abc) {
+ * return new OptionalLong(abc);
+ * }
+ *
+ * public String foo() {
+ * return "This is foo for val=" + val;
+ * }
+ *
+ * public String toString() {
+ * return "This is toString() for val=" + val;
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAKQoADAAaCQADABsHABwKAAMAHQcAHgoABQAaCAAfCgAFACAKAAUAIQoABQAiCAAj" +
+ "BwAkAQADdmFsAQABSgEABjxpbml0PgEABChKKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAC" +
+ "b2YBABsoSilMamF2YS91dGlsL09wdGlvbmFsTG9uZzsBAANmb28BABQoKUxqYXZhL2xhbmcvU3Ry" +
+ "aW5nOwEACHRvU3RyaW5nAQAKU291cmNlRmlsZQEAEU9wdGlvbmFsTG9uZy5qYXZhDAAPACUMAA0A" +
+ "DgEAFmphdmEvdXRpbC9PcHRpb25hbExvbmcMAA8AEAEAF2phdmEvbGFuZy9TdHJpbmdCdWlsZGVy" +
+ "AQAUVGhpcyBpcyBmb28gZm9yIHZhbD0MACYAJwwAJgAoDAAXABYBABtUaGlzIGlzIHRvU3RyaW5n" +
+ "KCkgZm9yIHZhbD0BABBqYXZhL2xhbmcvT2JqZWN0AQADKClWAQAGYXBwZW5kAQAtKExqYXZhL2xh" +
+ "bmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7AQAcKEopTGphdmEvbGFuZy9TdHJp" +
+ "bmdCdWlsZGVyOwAxAAMADAAAAAEAAgANAA4AAAAEAAIADwAQAAEAEQAAACoAAwADAAAACiq3AAEq" +
+ "H7UAArEAAAABABIAAAAOAAMAAAAFAAQABgAJAAcACQATABQAAQARAAAAIQAEAAIAAAAJuwADWR63" +
+ "AASwAAAAAQASAAAABgABAAAACgABABUAFgABABEAAAAvAAMAAQAAABe7AAVZtwAGEge2AAgqtAAC" +
+ "tgAJtgAKsAAAAAEAEgAAAAYAAQAAAA4AAQAXABYAAQARAAAALwADAAEAAAAXuwAFWbcABhILtgAI" +
+ "KrQAArYACbYACrAAAAABABIAAAAGAAEAAAASAAEAGAAAAAIAGQ==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAOe/TYJCvVthTToFA3tveMDhwTo7uDf0IcBAAAcAAAAHhWNBIAAAAAAAAAAHwDAAAU" +
+ "AAAAcAAAAAYAAADAAAAABgAAANgAAAABAAAAIAEAAAkAAAAoAQAAAQAAAHABAACMAgAAkAEAAFYC" +
+ "AABeAgAAYQIAAGQCAABoAgAAbAIAAIACAACUAgAArwIAAMkCAADcAgAA8gIAAA8DAAASAwAAFgMA" +
+ "AB4DAAAyAwAANwMAADsDAABFAwAAAQAAAAUAAAAGAAAABwAAAAgAAAAMAAAAAgAAAAIAAAAAAAAA" +
+ "AwAAAAMAAABIAgAABAAAAAMAAABQAgAAAwAAAAQAAABIAgAADAAAAAUAAAAAAAAADQAAAAUAAABI" +
+ "AgAABAAAABMAAAABAAQAAAAAAAMABAAAAAAAAwABAA4AAAADAAIADgAAAAMAAAASAAAABAAFAAAA" +
+ "AAAEAAAAEAAAAAQAAwARAAAABAAAABIAAAAEAAAAEQAAAAEAAAAAAAAACQAAAAAAAABiAwAAAAAA" +
+ "AAQAAwABAAAASgMAAAYAAABwEAAAAQBaEgAADgAEAAIAAwAAAFIDAAAGAAAAIgAEAHAwBQAgAxEA" +
+ "BQABAAMAAABYAwAAFwAAACIAAwBwEAEAAAAbAQoAAABuIAMAEAAMAFNCAABuMAIAIAMMAG4QBAAA" +
+ "AAwAEQAAAAUAAQADAAAAXQMAABcAAAAiAAMAcBABAAAAGwELAAAAbiADABAADABTQgAAbjACACAD" +
+ "DABuEAQAAAAMABEAAAABAAAAAAAAAAEAAAACAAY8aW5pdD4AAUoAAUwAAkxKAAJMTAASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRl" +
+ "cjsAGExqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwART3B0aW9uYWxMb25nLmphdmEAFFRoaXMgaXMg" +
+ "Zm9vIGZvciB2YWw9ABtUaGlzIGlzIHRvU3RyaW5nKCkgZm9yIHZhbD0AAVYAAlZKAAZhcHBlbmQA" +
+ "EmVtaXR0ZXI6IGphY2stNC4yMgADZm9vAAJvZgAIdG9TdHJpbmcAA3ZhbAAFAQAHDjwtAAoBAAcO" +
+ "AA4ABw4AEgAHDgAAAQICAAIFgoAEkAMCCawDBgHIAwIBiAQAAA0AAAAAAAAAAQAAAAAAAAABAAAA" +
+ "FAAAAHAAAAACAAAABgAAAMAAAAADAAAABgAAANgAAAAEAAAAAQAAACABAAAFAAAACQAAACgBAAAG" +
+ "AAAAAQAAAHABAAABIAAABAAAAJABAAABEAAAAgAAAEgCAAACIAAAFAAAAFYCAAADIAAABAAAAEoD" +
+ "AAAAIAAAAQAAAGIDAAAAEAAAAQAAAHwDAAA=");
+
+ public static ClassLoader getClassLoaderFor(String location) throws Exception {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> ctor = class_loader_class.getConstructor(String.class, ClassLoader.class);
+ return (ClassLoader)ctor.newInstance(location + "/" + TEST_NAME + "-ex.jar",
+ Main.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ // Running on RI. Use URLClassLoader.
+ return new java.net.URLClassLoader(
+ new java.net.URL[] { new java.net.URL("file://" + location + "/classes-ex/") });
+ }
+ }
+
+ public static void main(String[] args) {
+ // TODO WHAT TO TRANSFORM
+ addCommonTransformationResult("java/util/OptionalLong", CLASS_BYTES, DEX_BYTES);
+ enableCommonRetransformation(true);
+ try {
+ /* this is the "alternate" DEX/Jar file */
+ ClassLoader new_loader = getClassLoaderFor(System.getenv("DEX_LOCATION"));
+ Class<?> klass = (Class<?>)new_loader.loadClass("TestMain");
+ if (klass == null) {
+ throw new AssertionError("loadClass failed");
+ }
+ Method run_test = klass.getMethod("runTest");
+ run_test.invoke(null);
+ } catch (Exception e) {
+ System.out.println(e.toString());
+ e.printStackTrace();
+ }
+ }
+
+ // Transforms the class
+ private static native void enableCommonRetransformation(boolean enable);
+ private static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/939-hello-transformation-bcp/build b/test/939-hello-transformation-bcp/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/939-hello-transformation-bcp/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/939-hello-transformation-bcp/expected.txt b/test/939-hello-transformation-bcp/expected.txt
new file mode 100644
index 0000000000..90fd25828d
--- /dev/null
+++ b/test/939-hello-transformation-bcp/expected.txt
@@ -0,0 +1,3 @@
+ol.toString() -> 'OptionalLong[-559038737]'
+Redefining OptionalLong!
+ol.toString() -> 'Redefined OptionalLong!'
diff --git a/test/939-hello-transformation-bcp/info.txt b/test/939-hello-transformation-bcp/info.txt
new file mode 100644
index 0000000000..d230a382bd
--- /dev/null
+++ b/test/939-hello-transformation-bcp/info.txt
@@ -0,0 +1,6 @@
+Tests basic functions in the jvmti plugin.
+
+Note this function is reliant on the definition of java.util.OptionalLong not
+changing. If this classes definition changes we will need to update this class
+so that the CLASS_BYTES and DEX_BYTES fields contain dex/class bytes for an
+OptionalLong with all the same methods and fields.
diff --git a/test/939-hello-transformation-bcp/run b/test/939-hello-transformation-bcp/run
new file mode 100755
index 0000000000..c6e62ae6cd
--- /dev/null
+++ b/test/939-hello-transformation-bcp/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/939-hello-transformation-bcp/src/Main.java b/test/939-hello-transformation-bcp/src/Main.java
new file mode 100644
index 0000000000..bdf7f592ef
--- /dev/null
+++ b/test/939-hello-transformation-bcp/src/Main.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+import java.util.OptionalLong;
+public class Main {
+
+ /**
+ * This is the base64 encoded class/dex.
+ *
+ * package java.util;
+ * import java.util.function.LongConsumer;
+ * import java.util.function.LongSupplier;
+ * import java.util.function.Supplier;
+ * public final class OptionalLong {
+ * // Make sure we have a <clinit> function since the real implementation of OptionalLong does.
+ * static { EMPTY = null; }
+ * private static final OptionalLong EMPTY;
+ * private final boolean isPresent;
+ * private final long value;
+ * private OptionalLong() { isPresent = false; value = 0; }
+ * private OptionalLong(long l) { this(); }
+ * public static OptionalLong empty() { return null; }
+ * public static OptionalLong of(long value) { return null; }
+ * public long getAsLong() { return 0; }
+ * public boolean isPresent() { return false; }
+ * public void ifPresent(LongConsumer c) { }
+ * public long orElse(long l) { return 0; }
+ * public long orElseGet(LongSupplier s) { return 0; }
+ * public<X extends Throwable> long orElseThrow(Supplier<X> s) throws X { return 0; }
+ * public boolean equals(Object o) { return false; }
+ * public int hashCode() { return 0; }
+ * public String toString() { return "Redefined OptionalLong!"; }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAOAoACAAwCQAHADEJAAcAMgoABwAwCAAzCQAHADQHADUHADYBAAVFTVBUWQEAGExq" +
+ "YXZhL3V0aWwvT3B0aW9uYWxMb25nOwEACWlzUHJlc2VudAEAAVoBAAV2YWx1ZQEAAUoBAAY8aW5p" +
+ "dD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAEKEopVgEABWVtcHR5AQAaKClMamF2" +
+ "YS91dGlsL09wdGlvbmFsTG9uZzsBAAJvZgEAGyhKKUxqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwEA" +
+ "CWdldEFzTG9uZwEAAygpSgEAAygpWgEACWlmUHJlc2VudAEAJChMamF2YS91dGlsL2Z1bmN0aW9u" +
+ "L0xvbmdDb25zdW1lcjspVgEABm9yRWxzZQEABChKKUoBAAlvckVsc2VHZXQBACQoTGphdmEvdXRp" +
+ "bC9mdW5jdGlvbi9Mb25nU3VwcGxpZXI7KUoBAAtvckVsc2VUaHJvdwEAIChMamF2YS91dGlsL2Z1" +
+ "bmN0aW9uL1N1cHBsaWVyOylKAQAKRXhjZXB0aW9ucwcANwEACVNpZ25hdHVyZQEAQjxYOkxqYXZh" +
+ "L2xhbmcvVGhyb3dhYmxlOz4oTGphdmEvdXRpbC9mdW5jdGlvbi9TdXBwbGllcjxUWDs+OylKXlRY" +
+ "OwEABmVxdWFscwEAFShMamF2YS9sYW5nL09iamVjdDspWgEACGhhc2hDb2RlAQADKClJAQAIdG9T" +
+ "dHJpbmcBABQoKUxqYXZhL2xhbmcvU3RyaW5nOwEACDxjbGluaXQ+AQAKU291cmNlRmlsZQEAEU9w" +
+ "dGlvbmFsTG9uZy5qYXZhDAAPABAMAAsADAwADQAOAQAXUmVkZWZpbmVkIE9wdGlvbmFsTG9uZyEM" +
+ "AAkACgEAFmphdmEvdXRpbC9PcHRpb25hbExvbmcBABBqYXZhL2xhbmcvT2JqZWN0AQATamF2YS9s" +
+ "YW5nL1Rocm93YWJsZQAxAAcACAAAAAMAGgAJAAoAAAASAAsADAAAABIADQAOAAAADgACAA8AEAAB" +
+ "ABEAAAAnAAMAAQAAAA8qtwABKgO1AAIqCbUAA7EAAAABABIAAAAGAAEAAAALAAIADwATAAEAEQAA" +
+ "AB0AAQADAAAABSq3AASxAAAAAQASAAAABgABAAAADAAJABQAFQABABEAAAAaAAEAAAAAAAIBsAAA" +
+ "AAEAEgAAAAYAAQAAAA0ACQAWABcAAQARAAAAGgABAAIAAAACAbAAAAABABIAAAAGAAEAAAAOAAEA" +
+ "GAAZAAEAEQAAABoAAgABAAAAAgmtAAAAAQASAAAABgABAAAADwABAAsAGgABABEAAAAaAAEAAQAA" +
+ "AAIDrAAAAAEAEgAAAAYAAQAAABAAAQAbABwAAQARAAAAGQAAAAIAAAABsQAAAAEAEgAAAAYAAQAA" +
+ "ABEAAQAdAB4AAQARAAAAGgACAAMAAAACCa0AAAABABIAAAAGAAEAAAASAAEAHwAgAAEAEQAAABoA" +
+ "AgACAAAAAgmtAAAAAQASAAAABgABAAAAEwABACEAIgADABEAAAAaAAIAAgAAAAIJrQAAAAEAEgAA" +
+ "AAYAAQAAABQAIwAAAAQAAQAkACUAAAACACYAAQAnACgAAQARAAAAGgABAAIAAAACA6wAAAABABIA" +
+ "AAAGAAEAAAAVAAEAKQAqAAEAEQAAABoAAQABAAAAAgOsAAAAAQASAAAABgABAAAAFgABACsALAAB" +
+ "ABEAAAAbAAEAAQAAAAMSBbAAAAABABIAAAAGAAEAAAAXAAgALQAQAAEAEQAAAB0AAQAAAAAABQGz" +
+ "AAaxAAAAAQASAAAABgABAAAABwABAC4AAAACAC8=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCvAoivSJqk6GdYOgJmvrM/b2/flxhw99q8BwAAcAAAAHhWNBIAAAAAAAAAAPgGAAAq" +
+ "AAAAcAAAAA0AAAAYAQAADQAAAEwBAAADAAAA6AEAAA8AAAAAAgAAAQAAAHgCAAAkBQAAmAIAACoE" +
+ "AAA4BAAAPQQAAEcEAABPBAAAUwQAAFoEAABdBAAAYAQAAGQEAABoBAAAawQAAG8EAACOBAAAqgQA" +
+ "AL4EAADSBAAA6QQAAAMFAAAmBQAASQUAAGcFAACGBQAAmQUAALIFAAC1BQAAuQUAAL0FAADABQAA" +
+ "xAUAANgFAADfBQAA5wUAAPIFAAD8BQAABwYAABIGAAAWBgAAHgYAACkGAAA2BgAAQAYAAAYAAAAH" +
+ "AAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABMAAAAVAAAAGAAAABsAAAAGAAAAAAAAAAAA" +
+ "AAAHAAAAAQAAAAAAAAAIAAAAAQAAAAQEAAAJAAAAAQAAAAwEAAAJAAAAAQAAABQEAAAKAAAABQAA" +
+ "AAAAAAAKAAAABwAAAAAAAAALAAAABwAAAAQEAAAYAAAACwAAAAAAAAAZAAAACwAAAAQEAAAaAAAA" +
+ "CwAAABwEAAAbAAAADAAAAAAAAAAcAAAADAAAACQEAAAHAAcABQAAAAcADAAjAAAABwABACkAAAAE" +
+ "AAgAAwAAAAcACAACAAAABwAIAAMAAAAHAAkAAwAAAAcABgAeAAAABwAMAB8AAAAHAAEAIAAAAAcA" +
+ "AAAhAAAABwAKACIAAAAHAAsAIwAAAAcABwAkAAAABwACACUAAAAHAAMAJgAAAAcABAAnAAAABwAF" +
+ "ACgAAAAHAAAAEQAAAAQAAAAAAAAAFgAAAOwDAACtBgAAAAAAAAIAAACVBgAApQYAAAEAAAAAAAAA" +
+ "RwYAAAQAAAASAGkAAAAOAAMAAQABAAAATQYAAAsAAABwEAAAAgASAFwgAQAWAAAAWiACAA4AAAAD" +
+ "AAMAAQAAAFIGAAAEAAAAcBACAAAADgABAAAAAAAAAFgGAAACAAAAEgARAAMAAgAAAAAAXQYAAAIA" +
+ "AAASABEAAwACAAAAAABjBgAAAgAAABIADwADAAEAAAAAAGkGAAADAAAAFgAAABAAAAACAAEAAAAA" +
+ "AG4GAAACAAAAEgAPAAIAAgAAAAAAcwYAAAEAAAAOAAAAAgABAAAAAAB5BgAAAgAAABIADwAFAAMA" +
+ "AAAAAH4GAAADAAAAFgAAABAAAAAEAAIAAAAAAIQGAAADAAAAFgAAABAAAAAEAAIAAAAAAIoGAAAD" +
+ "AAAAFgAAABAAAAACAAEAAAAAAJAGAAAEAAAAGwAXAAAAEQAAAAAAAAAAAAEAAAAAAAAADQAAAJgC" +
+ "AAABAAAAAQAAAAEAAAAJAAAAAQAAAAoAAAABAAAACAAAAAEAAAAEAAw8VFg7PjspSl5UWDsAAzxY" +
+ "OgAIPGNsaW5pdD4ABjxpbml0PgACPigABUVNUFRZAAFJAAFKAAJKSgACSkwAAUwAAkxKAB1MZGFs" +
+ "dmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAaTGRhbHZpay9hbm5vdGF0aW9uL1Rocm93czsAEkxq" +
+ "YXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABVMamF2YS9sYW5nL1Rocm93YWJs" +
+ "ZTsAGExqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwAhTGphdmEvdXRpbC9mdW5jdGlvbi9Mb25nQ29u" +
+ "c3VtZXI7ACFMamF2YS91dGlsL2Z1bmN0aW9uL0xvbmdTdXBwbGllcjsAHExqYXZhL3V0aWwvZnVu" +
+ "Y3Rpb24vU3VwcGxpZXIAHUxqYXZhL3V0aWwvZnVuY3Rpb24vU3VwcGxpZXI7ABFPcHRpb25hbExv" +
+ "bmcuamF2YQAXUmVkZWZpbmVkIE9wdGlvbmFsTG9uZyEAAVYAAlZKAAJWTAABWgACWkwAEmVtaXR0" +
+ "ZXI6IGphY2stNC4yMgAFZW1wdHkABmVxdWFscwAJZ2V0QXNMb25nAAhoYXNoQ29kZQAJaWZQcmVz" +
+ "ZW50AAlpc1ByZXNlbnQAAm9mAAZvckVsc2UACW9yRWxzZUdldAALb3JFbHNlVGhyb3cACHRvU3Ry" +
+ "aW5nAAV2YWx1ZQAHAAcOOQALAAcOAAwBAAcOAA0ABw4ADgEABw4AFQEABw4ADwAHDgAWAAcOABEB" +
+ "AAcOABAABw4AEgEABw4AEwEABw4AFAEABw4AFwAHDgACAgEpHAUXARcQFwQXFBcAAgMBKRwBGAYB" +
+ "AgUJABoBEgESAYiABKQFAYKABLwFAYKABOQFAQn8BQYJkAYFAaQGAQG4BgEB0AYBAeQGAQH4BgIB" +
+ "jAcBAaQHAQG8BwEB1AcAAAAQAAAAAAAAAAEAAAAAAAAAAQAAACoAAABwAAAAAgAAAA0AAAAYAQAA" +
+ "AwAAAA0AAABMAQAABAAAAAMAAADoAQAABQAAAA8AAAAAAgAABgAAAAEAAAB4AgAAAxAAAAEAAACY" +
+ "AgAAASAAAA4AAACkAgAABiAAAAEAAADsAwAAARAAAAUAAAAEBAAAAiAAACoAAAAqBAAAAyAAAA4A" +
+ "AABHBgAABCAAAAIAAACVBgAAACAAAAEAAACtBgAAABAAAAEAAAD4BgAA");
+
+ public static void main(String[] args) {
+ // OptionalLong is a class that is unlikely to be used by the time this test starts and is not
+ // likely to be changed in any meaningful way in the future.
+ OptionalLong ol = OptionalLong.of(0xDEADBEEF);
+ System.out.println("ol.toString() -> '" + ol.toString() + "'");
+ System.out.println("Redefining OptionalLong!");
+ doCommonClassRedefinition(OptionalLong.class, CLASS_BYTES, DEX_BYTES);
+ System.out.println("ol.toString() -> '" + ol.toString() + "'");
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] class_file,
+ byte[] dex_file);
+}
diff --git a/test/957-methodhandle-transforms/expected.txt b/test/957-methodhandle-transforms/expected.txt
index 05b80e78a7..cf6b5a14b5 100644
--- a/test/957-methodhandle-transforms/expected.txt
+++ b/test/957-methodhandle-transforms/expected.txt
@@ -59,3 +59,20 @@ a: a, b:42, c: 43
a: a, b:100, c: 99
a: a, b:8.9, c: 9.1
a: a, b:6.7, c: 7.8
+a: a, b: b, c:c, d:d
+a: a, b: b, c:c, d:d
+a: a, b: b, c:c, d:d
+a: a+b, b: c, c: d
+a: a, b: b+c, c: d
+a: a, b: b, c: c+d
+voidFilter
+a: a, b: b, c: c
+voidFilter
+a: a, b: b, c: c
+a: foo, b:45, c:56, d:bar
+a: foo, b:56, c:57, d:bar
+a: foo, b:56, c:57, d:bar
+a: foo, b:45, c:46, d:bar
+a: c+d ,b:c ,c:d ,d:e
+c+d
+a: a ,b:c ,c:d ,d:e
diff --git a/test/957-methodhandle-transforms/src/Main.java b/test/957-methodhandle-transforms/src/Main.java
index 4035857b9a..b6bbe74b9c 100644
--- a/test/957-methodhandle-transforms/src/Main.java
+++ b/test/957-methodhandle-transforms/src/Main.java
@@ -38,6 +38,10 @@ public class Main {
testSpreaders_primitive();
testInvokeWithArguments();
testAsCollector();
+ testFilterArguments();
+ testCollectArguments();
+ testInsertArguments();
+ testFoldArguments();
}
public static void testThrowException() throws Throwable {
@@ -1374,6 +1378,269 @@ public class Main {
assertEquals(51, (int) target.asCollector(double[].class, 2).invoke("a", 6.7, 7.8));
}
+ public static String filter1(char a) {
+ return String.valueOf(a);
+ }
+
+ public static char filter2(String b) {
+ return b.charAt(0);
+ }
+
+ public static String badFilter1(char a, char b) {
+ return "bad";
+ }
+
+ public static int filterTarget(String a, char b, String c, char d) {
+ System.out.println("a: " + a + ", b: " + b + ", c:" + c + ", d:" + d);
+ return 56;
+ }
+
+ public static void testFilterArguments() throws Throwable {
+ MethodHandle filter1 = MethodHandles.lookup().findStatic(
+ Main.class, "filter1", MethodType.methodType(String.class, char.class));
+ MethodHandle filter2 = MethodHandles.lookup().findStatic(
+ Main.class, "filter2", MethodType.methodType(char.class, String.class));
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "filterTarget", MethodType.methodType(int.class,
+ String.class, char.class, String.class, char.class));
+
+ // In all the cases below, the values printed will be 'a', 'b', 'c', 'd'.
+
+ // Filter arguments [0, 1] - all other arguments are passed through
+ // as is.
+ MethodHandle adapter = MethodHandles.filterArguments(
+ target, 0, filter1, filter2);
+ assertEquals(56, (int) adapter.invokeExact('a', "bXXXX", "c", 'd'));
+
+ // Filter arguments [1, 2].
+ adapter = MethodHandles.filterArguments(target, 1, filter2, filter1);
+ assertEquals(56, (int) adapter.invokeExact("a", "bXXXX", 'c', 'd'));
+
+ // Filter arguments [2, 3].
+ adapter = MethodHandles.filterArguments(target, 2, filter1, filter2);
+ assertEquals(56, (int) adapter.invokeExact("a", 'b', 'c', "dXXXXX"));
+
+ // Try out a few error cases :
+
+ // The return types of the filter doesn't align with the expected argument
+ // type of the target.
+ try {
+ adapter = MethodHandles.filterArguments(target, 2, filter2, filter1);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // There are more filters than arguments.
+ try {
+ adapter = MethodHandles.filterArguments(target, 3, filter2, filter1);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // We pass in an obviously bogus position.
+ try {
+ adapter = MethodHandles.filterArguments(target, -1, filter2, filter1);
+ fail();
+ } catch (ArrayIndexOutOfBoundsException expected) {
+ }
+
+ // We pass in a function that has more than one argument.
+ MethodHandle badFilter1 = MethodHandles.lookup().findStatic(
+ Main.class, "badFilter1",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ try {
+ adapter = MethodHandles.filterArguments(target, 0, badFilter1, filter2);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
+ static void voidFilter(char a, char b) {
+ System.out.println("voidFilter");
+ }
+
+ static String filter(char a, char b) {
+ return String.valueOf(a) + "+" + b;
+ }
+
+ static char badFilter(char a, char b) {
+ return 0;
+ }
+
+ static int target(String a, String b, String c) {
+ System.out.println("a: " + a + ", b: " + b + ", c: " + c);
+ return 57;
+ }
+
+ public static void testCollectArguments() throws Throwable {
+ // Test non-void filters.
+ MethodHandle filter = MethodHandles.lookup().findStatic(
+ Main.class, "filter",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "target",
+ MethodType.methodType(int.class, String.class, String.class, String.class));
+
+ // Filter at position 0.
+ MethodHandle adapter = MethodHandles.collectArguments(target, 0, filter);
+ assertEquals(57, (int) adapter.invokeExact('a', 'b', "c", "d"));
+
+ // Filter at position 1.
+ adapter = MethodHandles.collectArguments(target, 1, filter);
+ assertEquals(57, (int) adapter.invokeExact("a", 'b', 'c', "d"));
+
+ // Filter at position 2.
+ adapter = MethodHandles.collectArguments(target, 2, filter);
+ assertEquals(57, (int) adapter.invokeExact("a", "b", 'c', 'd'));
+
+ // Test void filters. Note that we're passing in one more argument
+ // than usual because the filter returns nothing - we have to invoke with
+ // the full set of filter args and the full set of target args.
+ filter = MethodHandles.lookup().findStatic(Main.class, "voidFilter",
+ MethodType.methodType(void.class, char.class, char.class));
+ adapter = MethodHandles.collectArguments(target, 0, filter);
+ assertEquals(57, (int) adapter.invokeExact('a', 'b', "a", "b", "c"));
+
+ adapter = MethodHandles.collectArguments(target, 1, filter);
+ assertEquals(57, (int) adapter.invokeExact("a", 'a', 'b', "b", "c"));
+
+ // Test out a few failure cases.
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "filter",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ // Bogus filter position.
+ try {
+ adapter = MethodHandles.collectArguments(target, 3, filter);
+ fail();
+ } catch (IndexOutOfBoundsException expected) {
+ }
+
+ // Mismatch in filter return type.
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "badFilter",
+ MethodType.methodType(char.class, char.class, char.class));
+ try {
+ adapter = MethodHandles.collectArguments(target, 0, filter);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
+ static int insertReceiver(String a, int b, Integer c, String d) {
+ System.out.println("a: " + a + ", b:" + b + ", c:" + c + ", d:" + d);
+ return 73;
+ }
+
+ public static void testInsertArguments() throws Throwable {
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "insertReceiver",
+ MethodType.methodType(int.class,
+ String.class, int.class, Integer.class, String.class));
+
+ // Basic single element array inserted at position 0.
+ MethodHandle adapter = MethodHandles.insertArguments(
+ target, 0, new Object[] { "foo" });
+ assertEquals(73, (int) adapter.invokeExact(45, Integer.valueOf(56), "bar"));
+
+ // Exercise unboxing.
+ adapter = MethodHandles.insertArguments(
+ target, 1, new Object[] { Integer.valueOf(56), 57 });
+ assertEquals(73, (int) adapter.invokeExact("foo", "bar"));
+
+ // Exercise a widening conversion.
+ adapter = MethodHandles.insertArguments(
+ target, 1, new Object[] { (short) 56, Integer.valueOf(57) });
+ assertEquals(73, (int) adapter.invokeExact("foo", "bar"));
+
+ // Insert an argument at the last position.
+ adapter = MethodHandles.insertArguments(
+ target, 3, new Object[] { "bar" });
+ assertEquals(73, (int) adapter.invokeExact("foo", 45, Integer.valueOf(46)));
+
+ // Exercise a few error cases.
+
+ // A reference type that can't be cast to another reference type.
+ try {
+ MethodHandles.insertArguments(target, 3, new Object[] { new Object() });
+ fail();
+ } catch (ClassCastException expected) {
+ }
+
+ // A boxed type that can't be unboxed correctly.
+ try {
+ MethodHandles.insertArguments(target, 1, new Object[] { Long.valueOf(56) });
+ fail();
+ } catch (ClassCastException expected) {
+ }
+ }
+
+ public static String foldFilter(char a, char b) {
+ return String.valueOf(a) + "+" + b;
+ }
+
+ public static void voidFoldFilter(String e, char a, char b) {
+ System.out.println(String.valueOf(a) + "+" + b);
+ }
+
+ public static int foldTarget(String a, char b, char c, String d) {
+ System.out.println("a: " + a + " ,b:" + b + " ,c:" + c + " ,d:" + d);
+ return 89;
+ }
+
+ public static void mismatchedVoidFilter(Integer a) {
+ }
+
+ public static Integer mismatchedNonVoidFilter(char a, char b) {
+ return null;
+ }
+
+ public static void testFoldArguments() throws Throwable {
+ // Test non-void filters.
+ MethodHandle filter = MethodHandles.lookup().findStatic(
+ Main.class, "foldFilter",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "foldTarget",
+ MethodType.methodType(int.class, String.class,
+ char.class, char.class, String.class));
+
+ // Folder with a non-void type.
+ MethodHandle adapter = MethodHandles.foldArguments(target, filter);
+ assertEquals(89, (int) adapter.invokeExact('c', 'd', "e"));
+
+ // Folder with a void type.
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "voidFoldFilter",
+ MethodType.methodType(void.class, String.class, char.class, char.class));
+ adapter = MethodHandles.foldArguments(target, filter);
+ assertEquals(89, (int) adapter.invokeExact("a", 'c', 'd', "e"));
+
+ // Test a few erroneous cases.
+
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "mismatchedVoidFilter",
+ MethodType.methodType(void.class, Integer.class));
+ try {
+ adapter = MethodHandles.foldArguments(target, filter);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "mismatchedNonVoidFilter",
+ MethodType.methodType(Integer.class, char.class, char.class));
+ try {
+ adapter = MethodHandles.foldArguments(target, filter);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
public static void fail() {
System.out.println("FAIL");
Thread.dumpStack();
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index cb798f049d..1b4f19509f 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -136,9 +136,9 @@ ifeq ($(ART_TEST_OPTIMIZING_GRAPH_COLOR),true)
COMPILER_TYPES += regalloc_gc
OPTIMIZING_COMPILER_TYPES += regalloc_gc
endif
-RELOCATE_TYPES := relocate
-ifeq ($(ART_TEST_RUN_TEST_NO_RELOCATE),true)
- RELOCATE_TYPES += no-relocate
+RELOCATE_TYPES := no-relocate
+ifeq ($(ART_TEST_RUN_TEST_RELOCATE),true)
+ RELOCATE_TYPES += relocate
endif
ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
RELOCATE_TYPES += relocate-npatchoat
@@ -161,7 +161,9 @@ JNI_TYPES := checkjni
ifeq ($(ART_TEST_JNI_FORCECOPY),true)
JNI_TYPES += forcecopy
endif
+ifeq ($(ART_TEST_RUN_TEST_IMAGE),true)
IMAGE_TYPES := picimage
+endif
ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
IMAGE_TYPES += no-image
endif
@@ -605,6 +607,7 @@ TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS :=
TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS := \
454-get-vreg \
457-regs \
+ 602-deoptimizeable
ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -707,8 +710,10 @@ endif
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
-# Tests that check semantics for a non-debuggable app.
+# 909: Tests that check semantics for a non-debuggable app.
+# 137: relies on AOT code and debuggable makes us JIT always.
TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS := \
+ 137-cfi \
909-attach-agent \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 28fa130443..186a1513ee 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -39,7 +39,7 @@ OPTIMIZE="y"
PATCHOAT=""
PREBUILD="y"
QUIET="n"
-RELOCATE="y"
+RELOCATE="n"
STRIP_DEX="n"
SECONDARY_DEX=""
TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
@@ -344,7 +344,7 @@ if [ "$IS_JVMTI_TEST" = "y" ]; then
else
FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
FLAGS="${FLAGS} -Xplugin:${plugin}"
- FLAGS="${FLAGS} -Xfully-deoptable"
+ FLAGS="${FLAGS} -Xcompiler-option --debuggable"
# Always make the compilation be debuggable.
COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
fi
@@ -364,6 +364,8 @@ fi
if [ "$HAVE_IMAGE" = "n" ]; then
+ # Add 5 minutes to give some time to generate the boot image.
+ TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + 300))
DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art"
else
DALVIKVM_BOOT_OPT="-Ximage:${BOOT_IMAGE}"
diff --git a/test/run-test b/test/run-test
index a228789701..27c700e89e 100755
--- a/test/run-test
+++ b/test/run-test
@@ -111,7 +111,7 @@ target_mode="yes"
dev_mode="no"
update_mode="no"
debug_mode="no"
-relocate="yes"
+relocate="no"
runtime="art"
usage="no"
build_only="no"
@@ -156,6 +156,7 @@ while true; do
shift
elif [ "x$1" = "x--jvm" ]; then
target_mode="no"
+ DEX_LOCATION="$tmp_dir"
runtime="jvm"
image_args=""
prebuild_mode="no"
@@ -626,8 +627,8 @@ if [ "$usage" = "yes" ]; then
echo " --strip-dex Strip the dex files before starting test."
echo " --relocate Force the use of relocating in the test, making"
echo " the image and oat files be relocated to a random"
- echo " address before running. (default)"
- echo " --no-relocate Force the use of no relocating in the test"
+ echo " address before running."
+ echo " --no-relocate Force the use of no relocating in the test. (default)"
echo " --image Run the test using a precompiled boot image. (default)"
echo " --no-image Run the test without a precompiled boot image."
echo " --host Use the host-mode virtual machine."
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 621d45a1bc..c5ed46034d 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -115,6 +115,8 @@ static AgentLib agents[] = {
{ "935-non-retransformable", common_transform::OnLoad, nullptr },
{ "936-search-onload", Test936SearchOnload::OnLoad, nullptr },
{ "937-hello-retransform-package", common_retransform::OnLoad, nullptr },
+ { "938-load-transform-bcp", common_retransform::OnLoad, nullptr },
+ { "939-hello-transformation-bcp", common_redefine::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {