summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--build/Android.oat.mk6
-rw-r--r--build/art.go15
-rw-r--r--cmdline/cmdline_types.h3
-rw-r--r--compiler/Android.bp8
-rw-r--r--compiler/driver/compiler_driver-inl.h91
-rw-r--r--compiler/driver/compiler_driver.cc283
-rw-r--r--compiler/driver/compiler_driver.h123
-rw-r--r--compiler/driver/compiler_options.cc6
-rw-r--r--compiler/driver/compiler_options.h5
-rw-r--r--compiler/elf_builder.h62
-rw-r--r--compiler/elf_writer_quick.cc33
-rw-r--r--compiler/elf_writer_test.cc19
-rw-r--r--compiler/image_writer.cc2
-rw-r--r--compiler/intrinsics_list.h4
-rw-r--r--compiler/jni/jni_compiler_test.cc4
-rw-r--r--compiler/jni/quick/jni_compiler.cc32
-rw-r--r--compiler/optimizing/builder.h4
-rw-r--r--compiler/optimizing/code_generator_arm.cc8
-rw-r--r--compiler/optimizing/code_generator_arm64.cc10
-rw-r--r--compiler/optimizing/code_generator_mips.cc9
-rw-r--r--compiler/optimizing/code_generator_mips64.cc11
-rw-r--r--compiler/optimizing/code_generator_x86.cc7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc9
-rw-r--r--compiler/optimizing/dead_code_elimination.cc9
-rw-r--r--compiler/optimizing/induction_var_analysis.cc39
-rw-r--r--compiler/optimizing/induction_var_analysis.h11
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc85
-rw-r--r--compiler/optimizing/induction_var_range.cc17
-rw-r--r--compiler/optimizing/induction_var_range.h10
-rw-r--r--compiler/optimizing/inliner.h4
-rw-r--r--compiler/optimizing/loop_optimization.cc94
-rw-r--r--compiler/optimizing/loop_optimization.h10
-rw-r--r--compiler/optimizing/nodes.cc2
-rw-r--r--compiler/optimizing/nodes.h21
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc317
-rw-r--r--compiler/optimizing/optimizing_compiler.cc14
-rw-r--r--compiler/optimizing/optimizing_unit_test.h2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/optimizing/reference_type_propagation.h6
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc8
-rw-r--r--compiler/optimizing/ssa_builder.h4
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc49
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h19
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.cc39
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h19
-rw-r--r--compiler/utils/jni_macro_assembler.h61
-rw-r--r--compiler/utils/mips/assembler_mips.h30
-rw-r--r--compiler/utils/mips64/assembler_mips64.h31
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc50
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h21
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc46
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h19
-rw-r--r--dex2oat/dex2oat.cc23
-rw-r--r--oatdump/oatdump.cc13
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/check_jni.cc10
-rw-r--r--runtime/class_linker-inl.h2
-rw-r--r--runtime/class_linker.cc12
-rw-r--r--runtime/class_linker_test.cc6
-rw-r--r--runtime/common_throws.cc57
-rw-r--r--runtime/common_throws.h58
-rw-r--r--runtime/debugger.cc30
-rw-r--r--runtime/dex_file_annotations.cc2
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h10
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc2
-rw-r--r--runtime/gc/allocation_listener.h50
-rw-r--r--runtime/gc/allocation_record.cc5
-rw-r--r--runtime/gc/allocation_record.h3
-rw-r--r--runtime/gc/collector/semi_space.cc1
-rw-r--r--runtime/gc/heap-inl.h53
-rw-r--r--runtime/gc/heap.cc332
-rw-r--r--runtime/gc/heap.h130
-rw-r--r--runtime/handle_scope-inl.h141
-rw-r--r--runtime/handle_scope.h187
-rw-r--r--runtime/handle_scope_test.cc85
-rw-r--r--runtime/imtable-inl.h73
-rw-r--r--runtime/imtable.h12
-rw-r--r--runtime/imtable_test.cc104
-rw-r--r--runtime/indirect_reference_table-inl.h2
-rw-r--r--runtime/indirect_reference_table.cc2
-rw-r--r--runtime/indirect_reference_table_test.cc65
-rw-r--r--runtime/interpreter/interpreter.cc1
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/config_arm4
-rw-r--r--runtime/interpreter/mterp/config_arm644
-rw-r--r--runtime/interpreter/mterp/config_mips4
-rw-r--r--runtime/interpreter/mterp/config_mips644
-rw-r--r--runtime/interpreter/mterp/config_x864
-rw-r--r--runtime/interpreter/mterp/config_x86_644
-rw-r--r--runtime/interpreter/mterp/mips/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S24
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S24
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S26
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S22
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S20
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S20
-rw-r--r--runtime/interpreter/mterp/x86/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_unused_fb.S1
-rw-r--r--runtime/interpreter/unstarted_runtime.cc5
-rw-r--r--runtime/jni_internal.cc24
-rw-r--r--runtime/jvalue-inl.h32
-rw-r--r--runtime/jvalue.h9
-rw-r--r--runtime/mirror/array-inl.h8
-rw-r--r--runtime/mirror/class-inl.h88
-rw-r--r--runtime/mirror/class.cc250
-rw-r--r--runtime/mirror/class.h154
-rw-r--r--runtime/mirror/field-inl.h2
-rw-r--r--runtime/mirror/method.cc12
-rw-r--r--runtime/mirror/method_type.cc2
-rw-r--r--runtime/mirror/object.cc50
-rw-r--r--runtime/mirror/object.h9
-rw-r--r--runtime/mirror/object_array-inl.h54
-rw-r--r--runtime/mirror/object_test.cc8
-rw-r--r--runtime/mirror/stack_trace_element.cc6
-rw-r--r--runtime/mirror/string-inl.h16
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc18
-rw-r--r--runtime/native/java_lang_Class.cc13
-rw-r--r--runtime/native/java_lang_System.cc7
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc2
-rw-r--r--runtime/native_stack_dump.cc8
-rw-r--r--runtime/obj_ptr-inl.h2
-rw-r--r--runtime/openjdkjvmti/Android.bp5
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc53
-rw-r--r--runtime/openjdkjvmti/art_jvmti.h11
-rw-r--r--runtime/openjdkjvmti/events-inl.h124
-rw-r--r--runtime/openjdkjvmti/events.cc249
-rw-r--r--runtime/openjdkjvmti/events.h110
-rw-r--r--runtime/openjdkjvmti/heap.cc166
-rw-r--r--runtime/openjdkjvmti/heap.h47
-rw-r--r--runtime/openjdkjvmti/object_tagging.cc154
-rw-r--r--runtime/openjdkjvmti/object_tagging.h92
-rw-r--r--runtime/proxy_test.cc4
-rw-r--r--runtime/reference_table.cc2
-rw-r--r--runtime/reflection-inl.h2
-rw-r--r--runtime/reflection_test.cc47
-rw-r--r--runtime/runtime_android.cc6
-rw-r--r--runtime/runtime_linux.cc53
-rw-r--r--runtime/thread.cc16
-rw-r--r--runtime/thread.h14
-rw-r--r--runtime/verify_object-inl.h4
-rw-r--r--test/530-checker-loops2/src/Main.java34
-rw-r--r--test/618-checker-induction/src/Main.java71
-rw-r--r--test/619-checker-current-method/expected.txt0
-rw-r--r--test/619-checker-current-method/info.txt2
-rw-r--r--test/619-checker-current-method/src/Main.java33
-rw-r--r--test/902-hello-transformation/transform.cc2
-rw-r--r--test/903-hello-tagging/tagging.cc3
-rwxr-xr-xtest/904-object-allocation/build17
-rw-r--r--test/904-object-allocation/expected.txt8
-rw-r--r--test/904-object-allocation/info.txt1
-rwxr-xr-xtest/904-object-allocation/run43
-rw-r--r--test/904-object-allocation/src/Main.java132
-rw-r--r--test/904-object-allocation/tracking.cc102
-rw-r--r--test/904-object-allocation/tracking.h30
-rwxr-xr-xtest/905-object-free/build17
-rw-r--r--test/905-object-free/expected.txt10
-rw-r--r--test/905-object-free/info.txt1
-rwxr-xr-xtest/905-object-free/run43
-rw-r--r--test/905-object-free/src/Main.java72
-rw-r--r--test/905-object-free/tracking_free.cc79
-rw-r--r--test/905-object-free/tracking_free.h30
-rwxr-xr-xtest/906-iterate-heap/build17
-rw-r--r--test/906-iterate-heap/expected.txt2
-rw-r--r--test/906-iterate-heap/info.txt1
-rw-r--r--test/906-iterate-heap/iterate_heap.cc187
-rw-r--r--test/906-iterate-heap/iterate_heap.h30
-rwxr-xr-xtest/906-iterate-heap/run43
-rw-r--r--test/906-iterate-heap/src/Main.java146
-rw-r--r--test/Android.bp25
-rw-r--r--test/Android.run-test.mk13
-rw-r--r--test/IMTA/Interfaces.java24
-rw-r--r--test/IMTB/Interfaces.java28
-rwxr-xr-xtest/etc/run-test-jar2
-rw-r--r--test/ti-agent/common_load.cc9
-rw-r--r--test/ti-agent/common_load.h28
186 files changed, 4756 insertions, 1913 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 850702ad23..4f273e5979 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -29,6 +29,8 @@ GTEST_DEX_DIRECTORIES := \
GetMethodSignature \
ImageLayoutA \
ImageLayoutB \
+ IMTA \
+ IMTB \
Instrumentation \
Interfaces \
Lookup \
@@ -88,6 +90,7 @@ ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB
+ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
@@ -593,6 +596,7 @@ ART_GTEST_dex_file_test_DEX_DEPS :=
ART_GTEST_exception_test_DEX_DEPS :=
ART_GTEST_elf_writer_test_HOST_DEPS :=
ART_GTEST_elf_writer_test_TARGET_DEPS :=
+ART_GTEST_imtable_test_DEX_DEPS :=
ART_GTEST_jni_compiler_test_DEX_DEPS :=
ART_GTEST_jni_internal_test_DEX_DEPS :=
ART_GTEST_oat_file_assistant_test_DEX_DEPS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index c4887e61ff..e297b4f531 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -122,7 +122,8 @@ $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(3)ART_HOST_ARCH) \
$$(LOCAL_$(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
- --host --android-root=$$(HOST_OUT) --include-patch-information --generate-debug-info \
+ --host --android-root=$$(HOST_OUT) --include-patch-information \
+ --generate-debug-info --generate-build-id \
$$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS)
$$(core_oat_name): $$(core_image_name)
@@ -239,7 +240,8 @@ $$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(3)TARGET_ARCH) \
--instruction-set-variant=$$($(3)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$$($(3)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
- --android-root=$$(PRODUCT_OUT)/system --include-patch-information --generate-debug-info \
+ --android-root=$$(PRODUCT_OUT)/system --include-patch-information \
+ --generate-debug-info --generate-build-id \
$$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
$$(core_oat_name): $$(core_image_name)
diff --git a/build/art.go b/build/art.go
index b826538696..1164cbc553 100644
--- a/build/art.go
+++ b/build/art.go
@@ -15,7 +15,6 @@
package art
import (
- "android/soong"
"android/soong/android"
"android/soong/cc"
"fmt"
@@ -222,13 +221,13 @@ func testInstall(ctx android.InstallHookContext) {
var artTestMutex sync.Mutex
func init() {
- soong.RegisterModuleType("art_cc_library", artLibrary)
- soong.RegisterModuleType("art_cc_binary", artBinary)
- soong.RegisterModuleType("art_cc_test", artTest)
- soong.RegisterModuleType("art_cc_test_library", artTestLibrary)
- soong.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
- soong.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
- soong.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
+ android.RegisterModuleType("art_cc_library", artLibrary)
+ android.RegisterModuleType("art_cc_binary", artBinary)
+ android.RegisterModuleType("art_cc_test", artTest)
+ android.RegisterModuleType("art_cc_test_library", artTestLibrary)
+ android.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
+ android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
+ android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
}
func artGlobalDefaultsFactory() (blueprint.Module, []interface{}) {
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index b229be4abb..72d7df31fb 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -507,7 +507,8 @@ struct XGcOption {
bool verify_pre_gc_rosalloc_ = kIsDebugBuild;
bool verify_pre_sweeping_rosalloc_ = false;
bool verify_post_gc_rosalloc_ = false;
- bool measure_ = kIsDebugBuild;
+ // Do no measurements for kUseTableLookupReadBarrier to avoid test timeouts. b/31679493
+ bool measure_ = kIsDebugBuild && !kUseTableLookupReadBarrier;
bool gcstress_ = false;
};
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 6faac095c7..61f682c2bd 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -191,6 +191,14 @@ art_cc_defaults {
],
include_dirs: ["art/disassembler"],
export_include_dirs: ["."],
+
+ // For SHA-1 checksumming of build ID
+ static: {
+ whole_static_libs: ["libcrypto"],
+ },
+ shared: {
+ shared_libs: ["libcrypto"],
+ },
}
gensrcs {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 0884a2a9f5..d807fcad96 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,10 +31,6 @@
namespace art {
-inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
- return mUnit->GetClassLinker()->FindDexCache(Thread::Current(), *mUnit->GetDexFile(), false);
-}
-
inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit) {
return soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()).Ptr();
@@ -87,10 +83,6 @@ inline ArtField* CompilerDriver::ResolveFieldWithDexFile(
return resolved_field;
}
-inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
- return Runtime::Current()->GetClassLinker()->FindDexCache(Thread::Current(), *dex_file, false);
-}
-
inline ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -100,23 +92,6 @@ inline ArtField* CompilerDriver::ResolveField(
is_static);
}
-inline void CompilerDriver::GetResolvedFieldDexFileLocation(
- ArtField* resolved_field, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
- ObjPtr<mirror::Class> declaring_class = resolved_field->GetDeclaringClass();
- *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
- *declaring_class_idx = declaring_class->GetDexTypeIndex();
- *declaring_field_idx = resolved_field->GetDexFieldIndex();
-}
-
-inline bool CompilerDriver::IsFieldVolatile(ArtField* field) {
- return field->IsVolatile();
-}
-
-inline MemberOffset CompilerDriver::GetFieldOffset(ArtField* field) {
- return field->GetOffset();
-}
-
inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx) {
@@ -219,43 +194,6 @@ inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
return result.first;
}
-inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer_class,
- ArtField* resolved_field) {
- DCHECK(resolved_field->IsStatic());
- ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
- return referrer_class == fields_class;
-}
-
-inline bool CompilerDriver::CanAssumeClassIsInitialized(mirror::Class* klass) {
- // Being loaded is a pre-requisite for being initialized but let's do the cheap check first.
- //
- // NOTE: When AOT compiling an app, we eagerly initialize app classes (and potentially their
- // super classes in the boot image) but only those that have a trivial initialization, i.e.
- // without <clinit>() or static values in the dex file for that class or any of its super
- // classes. So while we could see the klass as initialized during AOT compilation and have
- // it only loaded at runtime, the needed initialization would have to be trivial and
- // unobservable from Java, so we may as well treat it as initialized.
- if (!klass->IsInitialized()) {
- return false;
- }
- return CanAssumeClassIsLoaded(klass);
-}
-
-inline bool CompilerDriver::CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class,
- mirror::Class* klass) {
- return (referrer_class != nullptr
- && !referrer_class->IsInterface()
- && referrer_class->IsSubClass(klass))
- || CanAssumeClassIsInitialized(klass);
-}
-
-inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
- ArtField* resolved_field) {
- DCHECK(resolved_field->IsStatic());
- ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
- return CanReferrerAssumeClassIsInitialized(referrer_class, fields_class.Ptr());
-}
-
inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -275,35 +213,6 @@ inline ArtMethod* CompilerDriver::ResolveMethod(
return resolved_method;
}
-inline void CompilerDriver::GetResolvedMethodDexFileLocation(
- ArtMethod* resolved_method, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) {
- mirror::Class* declaring_class = resolved_method->GetDeclaringClass();
- *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
- *declaring_class_idx = declaring_class->GetDexTypeIndex();
- *declaring_method_idx = resolved_method->GetDexMethodIndex();
-}
-
-inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
- ArtMethod* resolved_method, InvokeType type) {
- if (type == kVirtual || type == kSuper) {
- return resolved_method->GetMethodIndex();
- } else if (type == kInterface) {
- return resolved_method->GetDexMethodIndex();
- } else {
- return DexFile::kDexNoIndex16;
- }
-}
-
-inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class,
- ArtMethod* resolved_method) {
- if (!resolved_method->IsStatic()) {
- return true;
- }
- mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- return CanReferrerAssumeClassIsInitialized(referrer_class, methods_class);
-}
-
} // namespace art
#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e2f8d929c3..8d64c65b1d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -95,8 +95,6 @@ class CompilerDriver::AOTCompilationStats {
public:
AOTCompilationStats()
: stats_lock_("AOT compilation statistics lock"),
- types_in_dex_cache_(0), types_not_in_dex_cache_(0),
- strings_in_dex_cache_(0), strings_not_in_dex_cache_(0),
resolved_types_(0), unresolved_types_(0),
resolved_instance_fields_(0), unresolved_instance_fields_(0),
resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
@@ -112,8 +110,6 @@ class CompilerDriver::AOTCompilationStats {
}
void Dump() {
- DumpStat(types_in_dex_cache_, types_not_in_dex_cache_, "types known to be in dex cache");
- DumpStat(strings_in_dex_cache_, strings_not_in_dex_cache_, "strings known to be in dex cache");
DumpStat(resolved_types_, unresolved_types_, "types resolved");
DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
DumpStat(resolved_local_static_fields_ + resolved_static_fields_, unresolved_static_fields_,
@@ -164,26 +160,6 @@ class CompilerDriver::AOTCompilationStats {
#define STATS_LOCK()
#endif
- void TypeInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- types_in_dex_cache_++;
- }
-
- void TypeNotInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- types_not_in_dex_cache_++;
- }
-
- void StringInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- strings_in_dex_cache_++;
- }
-
- void StringNotInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- strings_not_in_dex_cache_++;
- }
-
void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_types_++;
@@ -225,67 +201,6 @@ class CompilerDriver::AOTCompilationStats {
type_based_devirtualization_++;
}
- // Indicate that a method of the given type was resolved at compile time.
- void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- resolved_methods_[type]++;
- }
-
- // Indicate that a method of the given type was unresolved at compile time as it was in an
- // unknown dex file.
- void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- unresolved_methods_[type]++;
- }
-
- // Indicate that a type of virtual method dispatch has been converted into a direct method
- // dispatch.
- void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK(type == kVirtual || type == kInterface || type == kSuper);
- STATS_LOCK();
- virtual_made_direct_[type]++;
- }
-
- // Indicate that a method of the given type was able to call directly into boot.
- void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- direct_calls_to_boot_[type]++;
- }
-
- // Indicate that a method of the given type was able to be resolved directly from boot.
- void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- direct_methods_to_boot_[type]++;
- }
-
- void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) {
- STATS_LOCK();
- if (flags == 0) {
- unresolved_methods_[type]++;
- } else {
- DCHECK_NE((flags & kFlagMethodResolved), 0);
- resolved_methods_[type]++;
- if ((flags & kFlagVirtualMadeDirect) != 0) {
- virtual_made_direct_[type]++;
- if ((flags & kFlagPreciseTypeDevirtualization) != 0) {
- type_based_devirtualization_++;
- }
- } else {
- DCHECK_EQ((flags & kFlagPreciseTypeDevirtualization), 0);
- }
- if ((flags & kFlagDirectCallToBoot) != 0) {
- direct_calls_to_boot_[type]++;
- }
- if ((flags & kFlagDirectMethodToBoot) != 0) {
- direct_methods_to_boot_[type]++;
- }
- }
- }
-
// A check-cast could be eliminated due to verifier type analysis.
void SafeCast() REQUIRES(!stats_lock_) {
STATS_LOCK();
@@ -301,12 +216,6 @@ class CompilerDriver::AOTCompilationStats {
private:
Mutex stats_lock_;
- size_t types_in_dex_cache_;
- size_t types_not_in_dex_cache_;
-
- size_t strings_in_dex_cache_;
- size_t strings_not_in_dex_cache_;
-
size_t resolved_types_;
size_t unresolved_types_;
@@ -472,7 +381,8 @@ static void SetupIntrinsic(Thread* self,
? cls->FindDeclaredDirectMethod(method_name, signature, image_size)
: cls->FindDeclaredVirtualMethod(method_name, signature, image_size);
if (method == nullptr) {
- LOG(FATAL) << "Could not find method of intrinsic " << class_name << method_name << signature;
+ LOG(FATAL) << "Could not find method of intrinsic "
+ << class_name << " " << method_name << " " << signature;
}
DCHECK_EQ(method->GetInvokeType(), invoke_type);
method->SetIntrinsic(static_cast<uint32_t>(intrinsic));
@@ -497,11 +407,13 @@ void CompilerDriver::CompileAll(jobject class_loader,
// those compilations will pick up a boot image that have the ArtMethod already
// set with the intrinsics flag.
ScopedObjectAccess soa(Thread::Current());
-#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvironmentOrCache, SideEffects, Exceptions, ClassName, MethodName, Signature) \
+#define SETUP_INTRINSICS(Name, InvokeType, NeedsEnvironmentOrCache, SideEffects, Exceptions, \
+ ClassName, MethodName, Signature) \
SetupIntrinsic(soa.Self(), Intrinsics::k##Name, InvokeType, ClassName, MethodName, Signature);
#include "intrinsics_list.h"
-INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+INTRINSICS_LIST(SETUP_INTRINSICS)
#undef INTRINSICS_LIST
+#undef SETUP_INTRINSICS
}
// Compile:
// 1) Compile all classes and methods enabled for compilation. May fall back to dex-to-dex
@@ -845,9 +757,10 @@ void CompilerDriver::Resolve(jobject class_loader,
// TODO: Collect the relevant string indices in parallel, then allocate them sequentially in a
// stable order.
-static void ResolveConstStrings(CompilerDriver* driver,
+static void ResolveConstStrings(Handle<mirror::DexCache> dex_cache,
const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+ const DexFile::CodeItem* code_item)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (code_item == nullptr) {
// Abstract or native method.
return;
@@ -855,18 +768,18 @@ static void ResolveConstStrings(CompilerDriver* driver,
const uint16_t* code_ptr = code_item->insns_;
const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_;
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
while (code_ptr < code_end) {
const Instruction* inst = Instruction::At(code_ptr);
switch (inst->Opcode()) {
- case Instruction::CONST_STRING: {
- uint32_t string_index = inst->VRegB_21c();
- driver->CanAssumeStringIsPresentInDexCache(dex_file, string_index);
- break;
- }
+ case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO: {
- uint32_t string_index = inst->VRegB_31c();
- driver->CanAssumeStringIsPresentInDexCache(dex_file, string_index);
+ uint32_t string_index = (inst->Opcode() == Instruction::CONST_STRING)
+ ? inst->VRegB_21c()
+ : inst->VRegB_31c();
+ mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
+ CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
break;
}
@@ -881,7 +794,13 @@ static void ResolveConstStrings(CompilerDriver* driver,
static void ResolveConstStrings(CompilerDriver* driver,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
+
for (const DexFile* dex_file : dex_files) {
+ dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file, false));
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
size_t class_def_count = dex_file->NumClassDefs();
@@ -922,7 +841,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
continue;
}
previous_direct_method_idx = method_idx;
- ResolveConstStrings(driver, *dex_file, it.GetMethodCodeItem());
+ ResolveConstStrings(dex_cache, *dex_file, it.GetMethodCodeItem());
it.Next();
}
// Virtual methods.
@@ -936,7 +855,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
continue;
}
previous_virtual_method_idx = method_idx;
- ResolveConstStrings(driver, *dex_file, it.GetMethodCodeItem());
+ ResolveConstStrings(dex_cache, *dex_file, it.GetMethodCodeItem());
it.Next();
}
DCHECK(!it.HasNext());
@@ -1407,54 +1326,6 @@ void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodRefere
dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index);
}
-bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
- uint32_t type_idx) {
- bool result = false;
- if ((GetCompilerOptions().IsBootImage() &&
- IsImageClass(dex_cache->GetDexFile()->StringDataByIdx(
- dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) ||
- Runtime::Current()->UseJitCompilation()) {
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- result = (resolved_class != nullptr);
- }
-
- if (result) {
- stats_->TypeInDexCache();
- } else {
- stats_->TypeNotInDexCache();
- }
- return result;
-}
-
-bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
- uint32_t string_idx) {
- // See also Compiler::ResolveDexFile
-
- bool result = false;
- if (GetCompilerOptions().IsBootImage() || Runtime::Current()->UseJitCompilation()) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
- soa.Self(), dex_file, false)));
- if (GetCompilerOptions().IsBootImage()) {
- // We resolve all const-string strings when building for the image.
- class_linker->ResolveString(dex_file, string_idx, dex_cache);
- result = true;
- } else {
- // Just check whether the dex cache already has the string.
- DCHECK(Runtime::Current()->UseJitCompilation());
- result = (dex_cache->GetResolvedString(string_idx) != nullptr);
- }
- }
- if (result) {
- stats_->StringInDexCache();
- } else {
- stats_->StringNotInDexCache();
- }
- return result;
-}
-
bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
Handle<mirror::DexCache> dex_cache,
uint32_t type_idx) {
@@ -1518,108 +1389,6 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
return result;
}
-bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
- bool* is_type_initialized, bool* use_direct_type_ptr,
- uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
- ScopedObjectAccess soa(Thread::Current());
- Runtime* runtime = Runtime::Current();
- mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(
- soa.Self(), dex_file, false);
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- if (resolved_class == nullptr) {
- return false;
- }
- if (GetCompilerOptions().GetCompilePic()) {
- // Do not allow a direct class pointer to be used when compiling for position-independent
- return false;
- }
- *out_is_finalizable = resolved_class->IsFinalizable();
- gc::Heap* heap = runtime->GetHeap();
- const bool compiling_boot = heap->IsCompilingBoot();
- const bool support_boot_image_fixup = GetSupportBootImageFixup();
- if (compiling_boot) {
- // boot -> boot class pointers.
- // True if the class is in the image at boot compiling time.
- const bool is_image_class = GetCompilerOptions().IsBootImage() && IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_));
- // True if pc relative load works.
- if (is_image_class && support_boot_image_fixup) {
- *is_type_initialized = resolved_class->IsInitialized();
- *use_direct_type_ptr = false;
- *direct_type_ptr = 0;
- return true;
- } else {
- return false;
- }
- } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) {
- *is_type_initialized = resolved_class->IsInitialized();
- // If the class may move around, then don't embed it as a direct pointer.
- *use_direct_type_ptr = true;
- *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
- return true;
- } else {
- // True if the class is in the image at app compiling time.
- const bool class_in_image = heap->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
- if (class_in_image && support_boot_image_fixup) {
- // boot -> app class pointers.
- *is_type_initialized = resolved_class->IsInitialized();
- // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
- *use_direct_type_ptr = !GetCompilerOptions().GetIncludePatchInformation();
- *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
- return true;
- } else {
- // app -> app class pointers.
- // Give up because app does not have an image and class
- // isn't created at compile time. TODO: implement this
- // if/when each app gets an image.
- return false;
- }
- }
-}
-
-bool CompilerDriver::CanEmbedReferenceTypeInCode(ClassReference* ref,
- bool* use_direct_ptr,
- uintptr_t* direct_type_ptr) {
- CHECK(ref != nullptr);
- CHECK(use_direct_ptr != nullptr);
- CHECK(direct_type_ptr != nullptr);
-
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
- bool is_initialized = false;
- bool unused_finalizable;
- // Make sure we have a finished Reference class object before attempting to use it.
- if (!CanEmbedTypeInCode(*reference_class->GetDexCache()->GetDexFile(),
- reference_class->GetDexTypeIndex(), &is_initialized,
- use_direct_ptr, direct_type_ptr, &unused_finalizable) ||
- !is_initialized) {
- return false;
- }
- ref->first = &reference_class->GetDexFile();
- ref->second = reference_class->GetDexClassDefIndex();
- return true;
-}
-
-uint32_t CompilerDriver::GetReferenceSlowFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetSlowPathFlagOffset().Uint32Value();
-}
-
-uint32_t CompilerDriver::GetReferenceDisableFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
-}
-
-DexCacheArraysLayout CompilerDriver::GetDexCacheArraysLayout(const DexFile* dex_file) {
- return ContainsElement(GetDexFilesForOatFile(), dex_file)
- ? DexCacheArraysLayout(GetInstructionSetPointerSize(instruction_set_), dex_file)
- : DexCacheArraysLayout();
-}
-
void CompilerDriver::ProcessedInstanceField(bool resolved) {
if (!resolved) {
stats_->UnresolvedInstanceField();
@@ -1638,10 +1407,6 @@ void CompilerDriver::ProcessedStaticField(bool resolved, bool local) {
}
}
-void CompilerDriver::ProcessedInvoke(InvokeType invoke_type, int flags) {
- stats_->ProcessedInvoke(invoke_type, flags);
-}
-
ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
const DexCompilationUnit* mUnit, bool is_put,
const ScopedObjectAccess& soa) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index eb1222c315..9a4dd857fc 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -182,15 +182,6 @@ class CompilerDriver {
uint16_t class_def_index)
REQUIRES(!requires_constructor_barrier_lock_);
- // Callbacks from compiler to see what runtime checks must be generated.
-
- bool CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
- uint32_t type_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
- REQUIRES(!Locks::mutator_lock_);
-
// Are runtime access checks necessary in the compiled code?
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
Handle<mirror::DexCache> dex_cache,
@@ -205,24 +196,6 @@ class CompilerDriver {
bool* out_is_finalizable)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
- bool* is_type_initialized, bool* use_direct_type_ptr,
- uintptr_t* direct_type_ptr, bool* out_is_finalizable);
-
- // Query methods for the java.lang.ref.Reference class.
- bool CanEmbedReferenceTypeInCode(ClassReference* ref,
- bool* use_direct_type_ptr, uintptr_t* direct_type_ptr);
- uint32_t GetReferenceSlowFlagOffset() const;
- uint32_t GetReferenceDisableFlagOffset() const;
-
- // Get the DexCache for the
- mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -250,19 +223,6 @@ class CompilerDriver {
uint32_t field_idx, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Get declaration location of a resolved field.
- void GetResolvedFieldDexFileLocation(
- ArtField* resolved_field, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool IsFieldVolatile(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Find a dex cache for a dex file.
- inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
@@ -288,15 +248,6 @@ class CompilerDriver {
uint32_t* storage_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Is static field's in referrer's class?
- bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Is static field's class initialized?
- bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
- ArtField* resolved_field)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -304,37 +255,8 @@ class CompilerDriver {
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Get declaration location of a resolved field.
- void GetResolvedMethodDexFileLocation(
- ArtMethod* resolved_method, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Get the index in the vtable of the method.
- uint16_t GetResolvedMethodVTableIndex(
- ArtMethod* resolved_method, InvokeType type)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Is method's class initialized for an invoke?
- // For static invokes to determine whether we need to consider potential call to <clinit>().
- // For non-static invokes, assuming a non-null reference, the class is always initialized.
- bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
- // dex cache arrays don't have a fixed layout.
- DexCacheArraysLayout GetDexCacheArraysLayout(const DexFile* dex_file);
-
void ProcessedInstanceField(bool resolved);
void ProcessedStaticField(bool resolved, bool local);
- void ProcessedInvoke(InvokeType invoke_type, int flags);
-
- void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- const ScopedObjectAccess& soa, bool is_static,
- ArtField** resolved_field,
- mirror::Class** referrer_class,
- mirror::DexCache** dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
@@ -386,6 +308,7 @@ class CompilerDriver {
void SetDedupeEnabled(bool dedupe_enabled) {
compiled_method_storage_.SetDedupeEnabled(dedupe_enabled);
}
+
bool DedupeEnabled() const {
return compiled_method_storage_.DedupeEnabled();
}
@@ -449,6 +372,13 @@ class CompilerDriver {
return current_dex_to_dex_methods_;
}
+ // Compute constant code and method pointers when possible.
+ void GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
+ ArtMethod* method,
+ /* out */ uintptr_t* direct_code,
+ /* out */ uintptr_t* direct_method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
// Return whether the declaring class of `resolved_member` is
// available to `referrer_class` for read or write access using two
@@ -477,38 +407,9 @@ class CompilerDriver {
uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we assume that the klass is initialized?
- bool CanAssumeClassIsInitialized(mirror::Class* klass)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
- // The only external contract is that unresolved method has flags 0 and resolved non-0.
- enum {
- kBitMethodResolved = 0,
- kBitVirtualMadeDirect,
- kBitPreciseTypeDevirtualization,
- kBitDirectCallToBoot,
- kBitDirectMethodToBoot
- };
- static constexpr int kFlagMethodResolved = 1 << kBitMethodResolved;
- static constexpr int kFlagVirtualMadeDirect = 1 << kBitVirtualMadeDirect;
- static constexpr int kFlagPreciseTypeDevirtualization = 1 << kBitPreciseTypeDevirtualization;
- static constexpr int kFlagDirectCallToBoot = 1 << kBitDirectCallToBoot;
- static constexpr int kFlagDirectMethodToBoot = 1 << kBitDirectMethodToBoot;
- static constexpr int kFlagsMethodResolvedVirtualMadeDirect =
- kFlagMethodResolved | kFlagVirtualMadeDirect;
- static constexpr int kFlagsMethodResolvedPreciseTypeDevirtualization =
- kFlagsMethodResolvedVirtualMadeDirect | kFlagPreciseTypeDevirtualization;
-
- public: // TODO make private or eliminate.
- // Compute constant code and method pointers when possible.
- void GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
- ArtMethod* method,
- /* out */ uintptr_t* direct_code,
- /* out */ uintptr_t* direct_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
+ const DexCompilationUnit* mUnit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void PreCompile(jobject class_loader,
@@ -566,8 +467,6 @@ class CompilerDriver {
REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
- static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index cbcc169f41..c222f90043 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -37,6 +37,7 @@ CompilerOptions::CompilerOptions()
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
+ generate_build_id_(false),
implicit_null_checks_(true),
implicit_so_checks_(true),
implicit_suspend_checks_(false),
@@ -97,6 +98,7 @@ CompilerOptions::CompilerOptions(CompilerFilter::Filter compiler_filter,
debuggable_(debuggable),
generate_debug_info_(generate_debug_info),
generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
+ generate_build_id_(false),
implicit_null_checks_(implicit_null_checks),
implicit_so_checks_(implicit_so_checks),
implicit_suspend_checks_(implicit_suspend_checks),
@@ -196,6 +198,10 @@ bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usa
generate_mini_debug_info_ = true;
} else if (option == "--no-generate-mini-debug-info") {
generate_mini_debug_info_ = false;
+ } else if (option == "--generate-build-id") {
+ generate_build_id_ = true;
+ } else if (option == "--no-generate-build-id") {
+ generate_build_id_ = false;
} else if (option == "--debuggable") {
debuggable_ = true;
} else if (option.starts_with("--top-k-profile-threshold=")) {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 8e4a775558..3c920d9601 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -187,6 +187,10 @@ class CompilerOptions FINAL {
return generate_mini_debug_info_;
}
+ bool GetGenerateBuildId() const {
+ return generate_build_id_;
+ }
+
bool GetImplicitNullChecks() const {
return implicit_null_checks_;
}
@@ -297,6 +301,7 @@ class CompilerOptions FINAL {
bool debuggable_;
bool generate_debug_info_;
bool generate_mini_debug_info_;
+ bool generate_build_id_;
bool implicit_null_checks_;
bool implicit_so_checks_;
bool implicit_suspend_checks_;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 73240bed03..31a75294bc 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -36,6 +36,7 @@ namespace art {
// The basic layout of the elf file:
// Elf_Ehdr - The ELF header.
// Elf_Phdr[] - Program headers for the linker.
+// .note.gnu.build-id - Optional build ID section (SHA-1 digest).
// .rodata - DEX files and oat metadata.
// .text - Compiled code.
// .bss - Zero-initialized writeable section.
@@ -75,6 +76,10 @@ template <typename ElfTypes>
class ElfBuilder FINAL {
public:
static constexpr size_t kMaxProgramHeaders = 16;
+ // SHA-1 digest. Not using SHA_DIGEST_LENGTH from openssl/sha.h to avoid
+ // spreading this header dependency for just this single constant.
+ static constexpr size_t kBuildIdLen = 20;
+
using Elf_Addr = typename ElfTypes::Addr;
using Elf_Off = typename ElfTypes::Off;
using Elf_Word = typename ElfTypes::Word;
@@ -458,6 +463,49 @@ class ElfBuilder FINAL {
} abiflags_;
};
+ class BuildIdSection FINAL : public Section {
+ public:
+ BuildIdSection(ElfBuilder<ElfTypes>* owner,
+ const std::string& name,
+ Elf_Word type,
+ Elf_Word flags,
+ const Section* link,
+ Elf_Word info,
+ Elf_Word align,
+ Elf_Word entsize)
+ : Section(owner, name, type, flags, link, info, align, entsize),
+ digest_start_(-1) {
+ }
+
+ void Write() {
+ // The size fields are 32-bit on both 32-bit and 64-bit systems, confirmed
+ // with the 64-bit linker and libbfd code. The size of name and desc must
+ // be a multiple of 4 and it currently is.
+ this->WriteUint32(4); // namesz.
+ this->WriteUint32(kBuildIdLen); // descsz.
+ this->WriteUint32(3); // type = NT_GNU_BUILD_ID.
+ this->WriteFully("GNU", 4); // name.
+ digest_start_ = this->Seek(0, kSeekCurrent);
+ static_assert(kBuildIdLen % 4 == 0, "expecting a mutliple of 4 for build ID length");
+ this->WriteFully(std::string(kBuildIdLen, '\0').c_str(), kBuildIdLen); // desc.
+ }
+
+ off_t GetDigestStart() {
+ CHECK_GT(digest_start_, 0);
+ return digest_start_;
+ }
+
+ private:
+ bool WriteUint32(uint32_t v) {
+ return this->WriteFully(&v, sizeof(v));
+ }
+
+ // File offset where the build ID digest starts.
+ // Populated with zeros first, then updated with the actual value as the
+ // very last thing in the output file creation.
+ off_t digest_start_;
+ };
+
ElfBuilder(InstructionSet isa, const InstructionSetFeatures* features, OutputStream* output)
: isa_(isa),
features_(features),
@@ -479,6 +527,7 @@ class ElfBuilder FINAL {
shstrtab_(this, ".shstrtab", 0, 1),
abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0,
isa, features),
+ build_id_(this, ".note.gnu.build-id", SHT_NOTE, SHF_ALLOC, nullptr, 0, 4, 0),
started_(false),
write_program_headers_(false),
loaded_size_(0u),
@@ -489,6 +538,7 @@ class ElfBuilder FINAL {
dynamic_.phdr_type_ = PT_DYNAMIC;
eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME;
abiflags_.phdr_type_ = PT_MIPS_ABIFLAGS;
+ build_id_.phdr_type_ = PT_NOTE;
}
~ElfBuilder() {}
@@ -741,6 +791,17 @@ class ElfBuilder FINAL {
abiflags_.End();
}
+ void WriteBuildIdSection() {
+ build_id_.Start();
+ build_id_.Write();
+ build_id_.End();
+ }
+
+ void WriteBuildId(uint8_t build_id[kBuildIdLen]) {
+ stream_.Seek(build_id_.GetDigestStart(), kSeekSet);
+ stream_.WriteFully(build_id, kBuildIdLen);
+ }
+
// Returns true if all writes and seeks on the output stream succeeded.
bool Good() {
return stream_.Good();
@@ -932,6 +993,7 @@ class ElfBuilder FINAL {
Section debug_line_;
StringSection shstrtab_;
AbiflagsSection abiflags_;
+ BuildIdSection build_id_;
std::vector<std::unique_ptr<Section>> other_sections_;
// List of used section in the order in which they were written.
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 36cd2327c4..0d6575cffd 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -16,6 +16,7 @@
#include "elf_writer_quick.h"
+#include <openssl/sha.h>
#include <unordered_map>
#include <unordered_set>
@@ -126,6 +127,8 @@ class ElfWriterQuick FINAL : public ElfWriter {
std::unique_ptr<DebugInfoTask> debug_info_task_;
std::unique_ptr<ThreadPool> debug_info_thread_pool_;
+ void ComputeFileBuildId(uint8_t (*build_id)[ElfBuilder<ElfTypes>::kBuildIdLen]);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
};
@@ -167,6 +170,9 @@ ElfWriterQuick<ElfTypes>::~ElfWriterQuick() {}
template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::Start() {
builder_->Start();
+ if (compiler_options_->GetGenerateBuildId()) {
+ builder_->WriteBuildIdSection();
+ }
}
template <typename ElfTypes>
@@ -275,11 +281,36 @@ void ElfWriterQuick<ElfTypes>::WritePatchLocations(
template <typename ElfTypes>
bool ElfWriterQuick<ElfTypes>::End() {
builder_->End();
-
+ if (compiler_options_->GetGenerateBuildId()) {
+ uint8_t build_id[ElfBuilder<ElfTypes>::kBuildIdLen];
+ ComputeFileBuildId(&build_id);
+ builder_->WriteBuildId(build_id);
+ }
return builder_->Good();
}
template <typename ElfTypes>
+void ElfWriterQuick<ElfTypes>::ComputeFileBuildId(
+ uint8_t (*build_id)[ElfBuilder<ElfTypes>::kBuildIdLen]) {
+ constexpr int kBufSize = 8192;
+ std::vector<char> buffer(kBufSize);
+ int64_t offset = 0;
+ SHA_CTX ctx;
+ SHA1_Init(&ctx);
+ while (true) {
+ int64_t bytes_read = elf_file_->Read(buffer.data(), kBufSize, offset);
+ CHECK_GE(bytes_read, 0);
+ if (bytes_read == 0) {
+ // End of file.
+ break;
+ }
+ SHA1_Update(&ctx, buffer.data(), bytes_read);
+ offset += bytes_read;
+ }
+ SHA1_Final(*build_id, &ctx);
+}
+
+template <typename ElfTypes>
OutputStream* ElfWriterQuick<ElfTypes>::GetStream() {
return builder_->GetStream();
}
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index d5f16637be..b58004976e 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -101,6 +101,25 @@ TEST_F(ElfWriterTest, dlsym) {
}
}
+TEST_F(ElfWriterTest, CheckBuildIdPresent) {
+ std::string elf_location = GetCoreOatLocation();
+ std::string elf_filename = GetSystemImageFilename(elf_location.c_str(), kRuntimeISA);
+ LOG(INFO) << "elf_filename=" << elf_filename;
+
+ std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
+ ASSERT_TRUE(file.get() != nullptr);
+ {
+ std::string error_msg;
+ std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
+ false,
+ false,
+ /*low_4gb*/false,
+ &error_msg));
+ CHECK(ef.get() != nullptr) << error_msg;
+ EXPECT_TRUE(ef->HasSection(".note.gnu.build-id"));
+ }
+}
+
TEST_F(ElfWriterTest, EncodeDecodeOatPatches) {
const std::vector<std::vector<uintptr_t>> test_data {
{ 0, 4, 8, 15, 128, 200 },
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 8ae04a1e49..13c73dcf42 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1389,7 +1389,7 @@ void ImageWriter::ProcessWorkStack(WorkStack* work_stack) {
void ImageWriter::CalculateNewObjectOffsets() {
Thread* const self = Thread::Current();
- StackHandleScopeCollection handles(self);
+ VariableSizedHandleScope handles(self);
std::vector<Handle<ObjectArray<Object>>> image_roots;
for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
image_roots.push_back(handles.NewHandle(CreateImageRoots(i)));
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index 5877f57b74..b617387ef8 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -1,7 +1,7 @@
/*
- * Copyright (C, "", "", "") 2015 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
*
- * Licensed under the Apache License, Version 2.0 (the "License", "", "", "");
+ * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 36e252742c..afb8fce8d7 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -401,7 +401,6 @@ jobject JniCompilerTest::class_loader_;
ScopedCheckHandleScope top_handle_scope_check; \
SCOPED_TRACE("Normal JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kNormal); \
- TEST_DISABLED_FOR_MIPS(); \
SetCheckGenericJni(true); \
TestName ## Impl(); \
}
@@ -420,7 +419,6 @@ jobject JniCompilerTest::class_loader_;
ScopedCheckHandleScope top_handle_scope_check; \
SCOPED_TRACE("@FastNative JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kFast); \
- TEST_DISABLED_FOR_MIPS(); \
SetCheckGenericJni(true); \
TestName ## Impl(); \
}
@@ -532,7 +530,7 @@ struct ScopedCheckHandleScope {
<< "invocations have finished (as before they were invoked).";
}
- HandleScope* const handle_scope_;
+ BaseHandleScope* const handle_scope_;
};
static void expectNumStackReferences(size_t val1, size_t val2) {
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 7e58d789d0..13d8c166cc 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -276,10 +276,32 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
__ IncreaseFrameSize(main_out_arg_size);
// Call the read barrier for the declaring class loaded from the method for a static call.
+ // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static && !is_critical_native) {
- // XX: Why is this necessary only for the jclass? Why not for every single object ref?
- // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
+ const bool kReadBarrierFastPath =
+ (instruction_set != kMips) && (instruction_set != kMips64);
+ std::unique_ptr<JNIMacroLabel> skip_cold_path_label;
+ if (kReadBarrierFastPath) {
+ skip_cold_path_label = __ CreateLabel();
+ // Fast path for supported targets.
+ //
+ // Check if gc_is_marking is set -- if it's not, we don't need
+ // a read barrier so skip it.
+ __ LoadFromThread(main_jni_conv->InterproceduralScratchRegister(),
+ Thread::IsGcMarkingOffset<kPointerSize>(),
+ Thread::IsGcMarkingSize());
+ // Jump over the slow path if gc is marking is false.
+ __ Jump(skip_cold_path_label.get(),
+ JNIMacroUnaryCondition::kZero,
+ main_jni_conv->InterproceduralScratchRegister());
+ }
+
+ // Construct slow path for read barrier:
+ //
+ // Call into the runtime's ReadBarrierJni and have it fix up
+ // the object address if it was moved.
+
ThreadOffset<kPointerSize> read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize,
pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
@@ -310,6 +332,10 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
__ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister());
}
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset.
+
+ if (kReadBarrierFastPath) {
+ __ Bind(skip_cold_path_label.get());
+ }
}
// 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
@@ -322,7 +348,7 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
ThreadOffset<kPointerSize> jni_start =
is_synchronized
? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStartSynchronized)
- : (is_fast_native
+ : ((is_fast_native && !reference_return) // TODO: support @FastNative returning obj
? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastStart)
: QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStart));
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 580ef72767..f896f1199e 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -43,7 +43,7 @@ class HGraphBuilder : public ValueObject {
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
Handle<mirror::DexCache> dex_cache,
- StackHandleScopeCollection* handles)
+ VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
@@ -68,7 +68,7 @@ class HGraphBuilder : public ValueObject {
// Only for unit testing.
HGraphBuilder(HGraph* graph,
const DexFile::CodeItem& code_item,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
Primitive::Type return_type = Primitive::kPrimInt)
: graph_(graph),
dex_file_(nullptr),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9870876879..77d6f23fff 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1129,7 +1129,13 @@ void CodeGeneratorARM::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
- __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
+
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 969d653f97..f02b028541 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1046,7 +1046,15 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
// ... : other preserved fp registers.
// ... : reserved frame space.
// sp[0] : current method.
- __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
+
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
+ } else {
+ __ Claim(frame_size);
+ }
GetAssembler()->cfi().AdjustCFAOffset(frame_size);
GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
frame_size - GetCoreSpillSize());
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b826a2c537..e336df8c6c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -743,9 +743,12 @@ void CodeGeneratorMIPS::GenerateFrameEntry() {
// TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
}
- // Store the current method pointer.
- // TODO: can we not do this if RequiresCurrentMethod() returns false?
- __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ }
}
void CodeGeneratorMIPS::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 02576bda67..010bf24232 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -556,9 +556,14 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
- static_assert(IsInt<16>(kCurrentMethodStackOffset),
- "kCurrentMethodStackOffset must fit into int16_t");
- __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ }
}
void CodeGeneratorMIPS64::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0b23599665..960f01ce9d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -898,7 +898,12 @@ void CodeGeneratorX86::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
- __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 28638d721d..665d028338 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1140,8 +1140,13 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
- CpuRegister(kMethodRegisterArgument));
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+ CpuRegister(kMethodRegisterArgument));
+ }
}
void CodeGeneratorX86_64::GenerateFrameExit() {
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index aa3f26809a..adfe09ba9f 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -343,14 +343,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
for (i.Advance(); !i.Done(); i.Advance()) {
HInstruction* inst = i.Current();
DCHECK(!inst->IsControlFlow());
- if (!inst->HasSideEffects()
- && !inst->CanThrow()
- && !inst->IsSuspendCheck()
- && !inst->IsNativeDebugInfo()
- // If we added an explicit barrier then we should keep it.
- && !inst->IsMemoryBarrier()
- && !inst->IsParameterValue()
- && !inst->HasUses()) {
+ if (inst->IsDeadAndRemovable()) {
block->RemoveInstruction(inst);
MaybeRecordStat(MethodCompilationStat::kRemovedDeadInstruction);
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index c501ccf80f..55fcb12fa8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -87,11 +87,12 @@ HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
: HOptimization(graph, kInductionPassName),
global_depth_(0),
stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
- scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
map_(std::less<HInstruction*>(),
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
cycle_(std::less<HInstruction*>(),
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ type_(Primitive::kPrimVoid),
induction_(std::less<HLoopInformation*>(),
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
@@ -103,7 +104,6 @@ void HInductionVarAnalysis::Run() {
for (HReversePostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
HBasicBlock* graph_block = it_graph.Current();
// Don't analyze irreducible loops.
- // TODO(ajcbik): could/should we remove this restriction?
if (graph_block->IsLoopHeader() && !graph_block->GetLoopInformation()->IsIrreducible()) {
VisitLoop(graph_block->GetLoopInformation());
}
@@ -121,7 +121,7 @@ void HInductionVarAnalysis::VisitLoop(HLoopInformation* loop) {
HBasicBlock* loop_block = it_loop.Current();
DCHECK(loop_block->IsInLoop());
if (loop_block->GetLoopInformation() != loop) {
- continue; // Inner loops already visited.
+ continue; // Inner loops visited later.
}
// Visit phi-operations and instructions.
for (HInstructionIterator it(loop_block->GetPhis()); !it.Done(); it.Advance()) {
@@ -285,6 +285,9 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
} else if (instruction->IsSub()) {
update = SolveAddSub(
loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kSub, true);
+ } else if (instruction->IsXor()) {
+ update = SolveXor(
+ loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), true);
} else if (instruction->IsTypeConversion()) {
update = SolveCnv(instruction->AsTypeConversion());
}
@@ -553,6 +556,27 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveAddSub(HLoopIn
return nullptr;
}
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveXor(HLoopInformation* loop,
+ HInstruction* entry_phi,
+ HInstruction* instruction,
+ HInstruction* x,
+ HInstruction* y,
+ bool is_first_call) {
+ InductionInfo* b = LookupInfo(loop, y);
+ // Solve within a tight cycle on x = x ^ c.
+ if (b != nullptr && b->induction_class == kInvariant) {
+ if (x == entry_phi && entry_phi->InputCount() == 2 && instruction == entry_phi->InputAt(1)) {
+ InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
+ return CreateInduction(kPeriodic, CreateInvariantOp(kXor, initial, b), initial, type_);
+ }
+ }
+ // Try the other way around if considered for first time.
+ if (is_first_call) {
+ return SolveXor(loop, entry_phi, instruction, y, x, false);
+ }
+ return nullptr;
+}
+
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveCnv(HTypeConversion* conversion) {
Primitive::Type from = conversion->GetInputType();
Primitive::Type to = conversion->GetResultType();
@@ -850,8 +874,8 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
int64_t value = -1;
if (IsExact(a, &value)) {
if (value == 0) {
- // Simplify 0 + b = b, 0 * b = 0.
- if (op == kAdd) {
+ // Simplify 0 + b = b, 0 ^ b = b, 0 * b = 0.
+ if (op == kAdd || op == kXor) {
return b;
} else if (op == kMul) {
return a;
@@ -867,8 +891,8 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
}
if (IsExact(b, &value)) {
if (value == 0) {
- // Simplify a + 0 = a, a - 0 = a, a * 0 = 0, -0 = 0.
- if (op == kAdd || op == kSub) {
+ // Simplify a + 0 = a, a - 0 = a, a ^ 0 = a, a * 0 = 0, -0 = 0.
+ if (op == kAdd || op == kSub || op == kXor) {
return a;
} else if (op == kMul || op == kNeg) {
return b;
@@ -939,6 +963,7 @@ std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) {
case kNeg: inv += " - "; break;
case kMul: inv += " * "; break;
case kDiv: inv += " / "; break;
+ case kXor: inv += " ^ "; break;
case kLT: inv += " < "; break;
case kLE: inv += " <= "; break;
case kGT: inv += " > "; break;
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index cd4c830645..06aee31b88 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -64,6 +64,7 @@ class HInductionVarAnalysis : public HOptimization {
kNeg,
kMul,
kDiv,
+ kXor,
kFetch,
// Trip-counts.
kTripCountInLoop, // valid in full loop; loop is finite
@@ -171,7 +172,13 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* x,
HInstruction* y,
InductionOp op,
- bool is_first_call);
+ bool is_first_call); // possibly swaps x and y to try again
+ InductionInfo* SolveXor(HLoopInformation* loop,
+ HInstruction* entry_phi,
+ HInstruction* instruction,
+ HInstruction* x,
+ HInstruction* y,
+ bool is_first_call); // possibly swaps x and y to try again
InductionInfo* SolveCnv(HTypeConversion* conversion);
// Trip count information.
@@ -219,8 +226,8 @@ class HInductionVarAnalysis : public HOptimization {
// Temporary book-keeping during the analysis.
uint32_t global_depth_;
ArenaVector<HInstruction*> stack_;
- ArenaVector<HInstruction*> scc_;
ArenaSafeMap<HInstruction*, NodeInfo> map_;
+ ArenaVector<HInstruction*> scc_;
ArenaSafeMap<HInstruction*, InductionInfo*> cycle_;
Primitive::Type type_;
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 292bc4e06e..7c467f6c94 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -107,7 +107,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
}
// Builds if-statement at depth d.
- HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock **ifF) {
+ HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock** ifF) {
HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
@@ -259,15 +259,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
// k = - i;
// }
BuildLoopNest(1);
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, basic_[0]), 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0);
- HInstruction *mul = InsertInstruction(
+ HInstruction* mul = InsertInstruction(
new (&allocator_) HMul(Primitive::kPrimInt, constant100_, basic_[0]), 0);
- HInstruction *shl = InsertInstruction(
+ HInstruction* shl = InsertInstruction(
new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0);
- HInstruction *neg = InsertInstruction(
+ HInstruction* neg = InsertInstruction(
new (&allocator_) HNeg(Primitive::kPrimInt, basic_[0]), 0);
PerformInductionVarAnalysis();
@@ -291,10 +291,10 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) {
HPhi* k = InsertLoopPhi(0, 0);
k->AddInput(constant0_);
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, k, constant100_), 0);
HInstruction* store1 = InsertArrayStore(add, 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, add, constant1_), 0);
HInstruction* store2 = InsertArrayStore(sub, 0);
k->AddInput(sub);
@@ -381,7 +381,7 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
k->AddInput(constant0_);
HInstruction* store = InsertArrayStore(k, 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0);
k->AddInput(sub);
PerformInductionVarAnalysis();
@@ -407,7 +407,7 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
HInstruction* store = InsertArrayStore(k, 0);
k->AddInput(t);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0], 0), 0);
t->AddInput(sub);
PerformInductionVarAnalysis();
@@ -431,15 +431,15 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) {
HPhi* k = InsertLoopPhi(0, 0);
k->AddInput(constant0_);
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, k, constant100_), 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, k, constant100_), 0);
- HInstruction *mul = InsertInstruction(
+ HInstruction* mul = InsertInstruction(
new (&allocator_) HMul(Primitive::kPrimInt, k, constant100_), 0);
- HInstruction *shl = InsertInstruction(
+ HInstruction* shl = InsertInstruction(
new (&allocator_) HShl(Primitive::kPrimInt, k, constant1_), 0);
- HInstruction *neg = InsertInstruction(
+ HInstruction* neg = InsertInstruction(
new (&allocator_) HNeg(Primitive::kPrimInt, k), 0);
k->AddInput(
InsertInstruction(new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0));
@@ -497,7 +497,7 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
k->AddInput(constant0_);
HInstruction* store = InsertArrayStore(k, 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant1_, k), 0);
k->AddInput(sub);
PerformInductionVarAnalysis();
@@ -506,6 +506,45 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(sub, 0).c_str());
}
+TEST_F(InductionVarAnalysisTest, FindXorPeriodicInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // a[k] = 0;
+ // k = k ^ 1;
+ // }
+ BuildLoopNest(1);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+
+ HInstruction* store = InsertArrayStore(k, 0);
+ HInstruction* x = InsertInstruction(
+ new (&allocator_) HXor(Primitive::kPrimInt, k, constant1_), 0);
+ k->AddInput(x);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("periodic((0), (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(x, 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindXor100PeriodicInduction) {
+ // Setup:
+ // k = 100;
+ // for (int i = 0; i < 100; i++) {
+ // k = k ^ 100;
+ // }
+ BuildLoopNest(1);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant100_);
+
+ HInstruction* x = InsertInstruction(
+ new (&allocator_) HXor(Primitive::kPrimInt, k, constant100_), 0);
+ k->AddInput(x);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("periodic(((100) ^ (100)), (100)):PrimInt", GetInductionInfo(x, 0).c_str());
+}
+
TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
// Setup:
// k = 0;
@@ -526,15 +565,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
k_header->AddInput(k_body);
// Derived expressions.
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, k_body, constant100_), 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, k_body, constant100_), 0);
- HInstruction *mul = InsertInstruction(
+ HInstruction* mul = InsertInstruction(
new (&allocator_) HMul(Primitive::kPrimInt, k_body, constant100_), 0);
- HInstruction *shl = InsertInstruction(
+ HInstruction* shl = InsertInstruction(
new (&allocator_) HShl(Primitive::kPrimInt, k_body, constant1_), 0);
- HInstruction *neg = InsertInstruction(
+ HInstruction* neg = InsertInstruction(
new (&allocator_) HNeg(Primitive::kPrimInt, k_body), 0);
PerformInductionVarAnalysis();
@@ -563,7 +602,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
k[d] = InsertLoopPhi(0, d);
}
- HInstruction *inc = InsertInstruction(
+ HInstruction* inc = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, constant1_, k[9]), 9);
HInstruction* store = InsertArrayStore(inc, 9);
@@ -597,7 +636,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) {
// a[i] = 0;
// }
BuildLoopNest(1);
- HInstruction *conv = InsertInstruction(
+ HInstruction* conv = InsertInstruction(
new (&allocator_) HTypeConversion(Primitive::kPrimByte, basic_[0], -1), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* store2 = InsertArrayStore(basic_[0], 0);
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index cd8b7c7960..140c7f0c40 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -525,6 +525,8 @@ InductionVarRange::Value InductionVarRange::GetVal(HInductionVarAnalysis::Induct
return GetMul(info->op_a, info->op_b, trip, in_body, is_min);
case HInductionVarAnalysis::kDiv:
return GetDiv(info->op_a, info->op_b, trip, in_body, is_min);
+ case HInductionVarAnalysis::kXor:
+ return GetXor(info->op_a, info->op_b);
case HInductionVarAnalysis::kFetch:
return GetFetch(info->fetch, trip, in_body, is_min);
case HInductionVarAnalysis::kTripCountInLoop:
@@ -626,6 +628,21 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct
return Value();
}
+InductionVarRange::Value InductionVarRange::GetXor(
+ HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2) const {
+ int64_t v1 = 0;
+ int64_t v2 = 0;
+ // Only accept exact values.
+ if (IsConstant(info1, kExact, &v1) && IsConstant(info2, kExact, &v2)) {
+ int64_t value = v1 ^ v2;
+ if (CanLongValueFitIntoInt(value)) {
+ return Value(static_cast<int32_t>(value));
+ }
+ }
+ return Value();
+}
+
InductionVarRange::Value InductionVarRange::MulRangeAndConstant(
int64_t value,
HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 63850b34b8..895130064a 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -131,6 +131,14 @@ class InductionVarRange {
*/
void Replace(HInstruction* instruction, HInstruction* fetch, HInstruction* replacement);
+ /**
+ * Incrementally updates induction information for just the given loop.
+ */
+ void ReVisit(HLoopInformation* loop) {
+ induction_analysis_->induction_.erase(loop);
+ induction_analysis_->VisitLoop(loop);
+ }
+
private:
/*
* Enum used in IsConstant() request.
@@ -185,6 +193,8 @@ class InductionVarRange {
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const;
+ Value GetXor(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2) const;
Value MulRangeAndConstant(int64_t value,
HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 486626b1fe..a1dcd58a84 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -38,7 +38,7 @@ class HInliner : public HOptimization {
const DexCompilationUnit& outer_compilation_unit,
const DexCompilationUnit& caller_compilation_unit,
CompilerDriver* compiler_driver,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
OptimizingCompilerStats* stats,
size_t total_number_of_dex_registers,
size_t depth)
@@ -197,7 +197,7 @@ class HInliner : public HOptimization {
const size_t total_number_of_dex_registers_;
const size_t depth_;
size_t number_of_inlined_instructions_;
- StackHandleScopeCollection* const handles_;
+ VariableSizedHandleScope* const handles_;
DISALLOW_COPY_AND_ASSIGN(HInliner);
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 93c6c20d7c..33fa87d568 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -69,33 +69,6 @@ static bool IsEmptyBody(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
i->GetNext() != nullptr && i->GetNext()->IsGoto();
}
-static HBasicBlock* TryRemovePreHeader(HBasicBlock* preheader, HBasicBlock* entry_block) {
- if (preheader->GetPredecessors().size() == 1) {
- HBasicBlock* entry = preheader->GetSinglePredecessor();
- HInstruction* anchor = entry->GetLastInstruction();
- // If the pre-header has a single predecessor we can remove it too if
- // either the pre-header just contains a goto, or if the predecessor
- // is not the entry block so we can push instructions backward
- // (moving computation into the entry block is too dangerous!).
- if (preheader->GetFirstInstruction() == nullptr ||
- preheader->GetFirstInstruction()->IsGoto() ||
- (entry != entry_block && anchor->IsGoto())) {
- // Push non-goto statements backward to empty the pre-header.
- for (HInstructionIterator it(preheader->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (!instruction->IsGoto()) {
- if (!instruction->CanBeMoved()) {
- return nullptr; // pushing failed to move all
- }
- it.Current()->MoveBefore(anchor);
- }
- }
- return entry;
- }
- }
- return nullptr;
-}
-
static void RemoveFromCycle(HInstruction* instruction) {
// A bit more elaborate than the usual instruction removal,
// since there may be a cycle in the use structure.
@@ -115,7 +88,8 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
loop_allocator_(nullptr),
top_loop_(nullptr),
last_loop_(nullptr),
- iset_(nullptr) {
+ iset_(nullptr),
+ induction_simplication_count_(0) {
}
void HLoopOptimization::Run() {
@@ -211,11 +185,17 @@ void HLoopOptimization::RemoveLoop(LoopNode* node) {
void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
for ( ; node != nullptr; node = node->next) {
+ int current_induction_simplification_count = induction_simplication_count_;
if (node->inner != nullptr) {
TraverseLoopsInnerToOuter(node->inner);
}
- // Visit loop after its inner loops have been visited.
+ // Visit loop after its inner loops have been visited. If the induction of any inner
+ // loop has been simplified, recompute the induction information of this loop first.
+ if (current_induction_simplification_count != induction_simplication_count_) {
+ induction_range_.ReVisit(node->loop_info);
+ }
SimplifyInduction(node);
+ SimplifyBlocks(node);
RemoveIfEmptyLoop(node);
}
}
@@ -233,11 +213,41 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
iset_->clear();
int32_t use_count = 0;
if (IsPhiInduction(phi, iset_) &&
- IsOnlyUsedAfterLoop(*node->loop_info, phi, &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
TryReplaceWithLastValue(phi, use_count, preheader)) {
for (HInstruction* i : *iset_) {
RemoveFromCycle(i);
}
+ induction_simplication_count_++;
+ }
+ }
+}
+
+void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
+ for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ // Remove instructions that are dead, usually resulting from eliminating induction cycles.
+ for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
+ HInstruction* instruction = i.Current();
+ if (instruction->IsDeadAndRemovable()) {
+ block->RemoveInstruction(instruction);
+ }
+ }
+ // Remove trivial control flow blocks from the loop body, again usually resulting
+ // from eliminating induction cycles.
+ if (block->GetPredecessors().size() == 1 &&
+ block->GetSuccessors().size() == 1 &&
+ block->GetFirstInstruction()->IsGoto()) {
+ HBasicBlock* pred = block->GetSinglePredecessor();
+ HBasicBlock* succ = block->GetSingleSuccessor();
+ if (succ->GetPredecessors().size() == 1) {
+ pred->ReplaceSuccessor(block, succ);
+ block->ClearDominanceInformation();
+ block->SetDominator(pred); // needed by next disconnect.
+ block->DisconnectAndDelete();
+ pred->AddDominatedBlock(succ);
+ succ->SetDominator(pred);
+ }
}
}
}
@@ -272,41 +282,31 @@ void HLoopOptimization::RemoveIfEmptyLoop(LoopNode* node) {
int32_t use_count = 0;
if (IsEmptyHeader(header, iset_) &&
IsEmptyBody(body, iset_) &&
- IsOnlyUsedAfterLoop(*node->loop_info, header->GetFirstPhi(), &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
TryReplaceWithLastValue(header->GetFirstPhi(), use_count, preheader)) {
- HBasicBlock* entry = TryRemovePreHeader(preheader, graph_->GetEntryBlock());
body->DisconnectAndDelete();
exit->RemovePredecessor(header);
header->RemoveSuccessor(exit);
header->ClearDominanceInformation();
header->SetDominator(preheader); // needed by next disconnect.
header->DisconnectAndDelete();
- // If allowed, remove preheader too, which may expose next outer empty loop
- // Otherwise, link preheader directly to exit to restore the flow graph.
- if (entry != nullptr) {
- entry->ReplaceSuccessor(preheader, exit);
- entry->AddDominatedBlock(exit);
- exit->SetDominator(entry);
- preheader->DisconnectAndDelete();
- } else {
- preheader->AddSuccessor(exit);
- preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
- preheader->AddDominatedBlock(exit);
- exit->SetDominator(preheader);
- }
+ preheader->AddSuccessor(exit);
+ preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
+ preheader->AddDominatedBlock(exit);
+ exit->SetDominator(preheader);
// Update hierarchy.
RemoveLoop(node);
}
}
-bool HLoopOptimization::IsOnlyUsedAfterLoop(const HLoopInformation& loop_info,
+bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
/*out*/ int32_t* use_count) {
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (iset_->find(user) == iset_->end()) { // not excluded?
HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
- if (other_loop_info != nullptr && other_loop_info->IsIn(loop_info)) {
+ if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
return false;
}
++*use_count;
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index b2bf1c8507..9c4b462a1f 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -46,7 +46,7 @@ class HLoopOptimization : public HOptimization {
inner(nullptr),
previous(nullptr),
next(nullptr) {}
- const HLoopInformation* const loop_info;
+ HLoopInformation* const loop_info;
LoopNode* outer;
LoopNode* inner;
LoopNode* previous;
@@ -61,9 +61,10 @@ class HLoopOptimization : public HOptimization {
void TraverseLoopsInnerToOuter(LoopNode* node);
void SimplifyInduction(LoopNode* node);
+ void SimplifyBlocks(LoopNode* node);
void RemoveIfEmptyLoop(LoopNode* node);
- bool IsOnlyUsedAfterLoop(const HLoopInformation& loop_info,
+ bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
/*out*/ int32_t* use_count);
void ReplaceAllUses(HInstruction* instruction, HInstruction* replacement);
@@ -87,6 +88,11 @@ class HLoopOptimization : public HOptimization {
// Contents reside in phase-local heap memory.
ArenaSet<HInstruction*>* iset_;
+ // Counter that tracks how many induction cycles have been simplified. Useful
+ // to trigger incremental updates of induction variable analysis of outer loops
+ // when the induction of inner loops has changed.
+ int32_t induction_simplication_count_;
+
friend class LoopOptimizationTest;
DISALLOW_COPY_AND_ASSIGN(HLoopOptimization);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 874c1edf35..1e69966b98 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -35,7 +35,7 @@ namespace art {
// double).
static constexpr bool kEnableFloatingPointStaticEvaluation = (FLT_EVAL_METHOD == 0);
-void HGraph::InitializeInexactObjectRTI(StackHandleScopeCollection* handles) {
+void HGraph::InitializeInexactObjectRTI(VariableSizedHandleScope* handles) {
ScopedObjectAccess soa(Thread::Current());
// Create the inexact Object reference type and store it in the HGraph.
ClassLinker* linker = Runtime::Current()->GetClassLinker();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 828c0e51c8..6f4f3c9505 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -336,7 +336,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
}
// Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
- void InitializeInexactObjectRTI(StackHandleScopeCollection* handles);
+ void InitializeInexactObjectRTI(VariableSizedHandleScope* handles);
ArenaAllocator* GetArena() const { return arena_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -1931,6 +1931,19 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return !HasEnvironmentUses() && GetUses().HasExactlyOneElement();
}
+ bool IsDeadAndRemovable() const {
+ return
+ !HasSideEffects() &&
+ !CanThrow() &&
+ !IsSuspendCheck() &&
+ !IsControlFlow() &&
+ !IsNativeDebugInfo() &&
+ !IsParameterValue() &&
+ !HasUses() &&
+ // If we added an explicit barrier then we should keep it.
+ !IsMemoryBarrier();
+ }
+
// Does this instruction strictly dominate `other_instruction`?
// Returns false if this instruction and `other_instruction` are the same.
// Aborts if this instruction and `other_instruction` are both phis.
@@ -2080,10 +2093,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// to the current method. Such instructions are:
// (1): Instructions that require an environment, as calling the runtime requires
// to walk the stack and have the current method stored at a specific stack address.
- // (2): Object literals like classes and strings, that are loaded from the dex cache
- // fields of the current method.
+ // (2): HCurrentMethod, potentially used by HInvokeStaticOrDirect, HLoadString, or HLoadClass
+ // to access the dex cache.
bool NeedsCurrentMethod() const {
- return NeedsEnvironment() || IsLoadClass() || IsLoadString();
+ return NeedsEnvironment() || IsCurrentMethod();
}
// Returns whether the code generation of the instruction will require to have access
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 6c5030c9cb..f735dc8cb3 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -1,10 +1,10 @@
static constexpr uint8_t expected_asm_kThumb2[] = {
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x0B, 0xB0,
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x0B, 0xB0,
0xBD, 0xEC, 0x02, 0x8A, 0x60, 0xBD,
};
static constexpr uint8_t expected_cfi_kThumb2[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x42, 0x0A, 0x42,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x0A, 0x42,
0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B, 0x0E,
0x40,
};
@@ -19,20 +19,19 @@ static constexpr uint8_t expected_cfi_kThumb2[] = {
// 0x00000006: .cfi_offset_extended: r81 at cfa-16
// 0x00000006: sub sp, sp, #44
// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: .cfi_remember_state
-// 0x0000000a: add sp, sp, #44
-// 0x0000000c: .cfi_def_cfa_offset: 20
-// 0x0000000c: vpop.f32 {s16-s17}
-// 0x00000010: .cfi_def_cfa_offset: 12
-// 0x00000010: .cfi_restore_extended: r80
-// 0x00000010: .cfi_restore_extended: r81
-// 0x00000010: pop {r5, r6, pc}
-// 0x00000012: .cfi_restore_state
-// 0x00000012: .cfi_def_cfa_offset: 64
+// 0x00000008: .cfi_remember_state
+// 0x00000008: add sp, sp, #44
+// 0x0000000a: .cfi_def_cfa_offset: 20
+// 0x0000000a: vpop.f32 {s16-s17}
+// 0x0000000e: .cfi_def_cfa_offset: 12
+// 0x0000000e: .cfi_restore_extended: r80
+// 0x0000000e: .cfi_restore_extended: r81
+// 0x0000000e: pop {r5, r6, pc}
+// 0x00000010: .cfi_restore_state
+// 0x00000010: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
+ 0xFF, 0x03, 0x01, 0xD1, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0x17, 0x40, 0xF9,
0xF5, 0x7B, 0x43, 0xA9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
@@ -41,7 +40,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
0x44, 0xD4, 0x44, 0xD5, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: str x0, [sp, #-64]!
+// 0x00000000: sub sp, sp, #0x40 (64)
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: str x20, [sp, #40]
// 0x00000008: .cfi_offset: r20 at cfa-24
@@ -67,12 +66,12 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
// 0x00000024: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kX86[] = {
- 0x56, 0x55, 0x83, 0xEC, 0x34, 0x89, 0x04, 0x24, 0x83, 0xC4, 0x34, 0x5D,
+ 0x56, 0x55, 0x83, 0xEC, 0x34, 0x83, 0xC4, 0x34, 0x5D,
0x5E, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86[] = {
0x41, 0x0E, 0x08, 0x86, 0x02, 0x41, 0x0E, 0x0C, 0x85, 0x03, 0x43, 0x0E,
- 0x40, 0x43, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
+ 0x40, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
0x04, 0xC6, 0x41, 0x0B, 0x0E, 0x40,
};
// 0x00000000: push esi
@@ -83,29 +82,28 @@ static constexpr uint8_t expected_cfi_kX86[] = {
// 0x00000002: .cfi_offset: r5 at cfa-12
// 0x00000002: sub esp, 52
// 0x00000005: .cfi_def_cfa_offset: 64
-// 0x00000005: mov [esp], eax
-// 0x00000008: .cfi_remember_state
-// 0x00000008: add esp, 52
-// 0x0000000b: .cfi_def_cfa_offset: 12
-// 0x0000000b: pop ebp
-// 0x0000000c: .cfi_def_cfa_offset: 8
-// 0x0000000c: .cfi_restore: r5
-// 0x0000000c: pop esi
-// 0x0000000d: .cfi_def_cfa_offset: 4
-// 0x0000000d: .cfi_restore: r6
-// 0x0000000d: ret
-// 0x0000000e: .cfi_restore_state
-// 0x0000000e: .cfi_def_cfa_offset: 64
+// 0x00000005: .cfi_remember_state
+// 0x00000005: add esp, 52
+// 0x00000008: .cfi_def_cfa_offset: 12
+// 0x00000008: pop ebp
+// 0x0000000a: .cfi_def_cfa_offset: 8
+// 0x0000000a: .cfi_restore: r5
+// 0x0000000a: pop esi
+// 0x0000000b: .cfi_def_cfa_offset: 4
+// 0x0000000b: .cfi_restore: r6
+// 0x0000000b: ret
+// 0x0000000c: .cfi_restore_state
+// 0x0000000c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kX86_64[] = {
0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24,
- 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24,
+ 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18,
0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E,
- 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47,
+ 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x0A, 0x47, 0xDD, 0x47,
0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6,
0x41, 0x0B, 0x0E, 0x40,
};
@@ -121,34 +119,33 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x0000000d: .cfi_offset: r30 at cfa-32
// 0x0000000d: movsd [rsp + 24], xmm12
// 0x00000014: .cfi_offset: r29 at cfa-40
-// 0x00000014: movq [rsp], rdi
-// 0x00000018: .cfi_remember_state
-// 0x00000018: movsd xmm12, [rsp + 24]
-// 0x0000001f: .cfi_restore: r29
-// 0x0000001f: movsd xmm13, [rsp + 32]
-// 0x00000026: .cfi_restore: r30
-// 0x00000026: addq rsp, 40
-// 0x0000002a: .cfi_def_cfa_offset: 24
-// 0x0000002a: pop rbx
-// 0x0000002b: .cfi_def_cfa_offset: 16
-// 0x0000002b: .cfi_restore: r3
-// 0x0000002b: pop rbp
-// 0x0000002c: .cfi_def_cfa_offset: 8
-// 0x0000002c: .cfi_restore: r6
-// 0x0000002c: ret
-// 0x0000002d: .cfi_restore_state
-// 0x0000002d: .cfi_def_cfa_offset: 64
+// 0x00000014: .cfi_remember_state
+// 0x00000014: movsd xmm12, [rsp + 24]
+// 0x0000001c: .cfi_restore: r29
+// 0x0000001c: movsd xmm13, [rsp + 32]
+// 0x00000022: .cfi_restore: r30
+// 0x00000022: addq rsp, 40
+// 0x00000026: .cfi_def_cfa_offset: 24
+// 0x00000026: pop rbx
+// 0x00000027: .cfi_def_cfa_offset: 16
+// 0x00000027: .cfi_restore: r3
+// 0x00000027: pop rbp
+// 0x00000028: .cfi_def_cfa_offset: 8
+// 0x00000028: .cfi_restore: r6
+// 0x00000028: ret
+// 0x00000029: .cfi_restore_state
+// 0x00000029: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x00, 0x00, 0xA4, 0xAF, 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
+ 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
0x34, 0x00, 0xB0, 0x8F, 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7,
0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_cfi_kMips[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x4C, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
+ 0x48, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
@@ -161,33 +158,33 @@ static constexpr uint8_t expected_cfi_kMips[] = {
// 0x00000010: .cfi_offset: r16 at cfa-12
// 0x00000010: sdc1 f22, +40(r29)
// 0x00000014: sdc1 f20, +32(r29)
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: lw r31, +60(r29)
-// 0x00000020: .cfi_restore: r31
-// 0x00000020: lw r17, +56(r29)
-// 0x00000024: .cfi_restore: r17
-// 0x00000024: lw r16, +52(r29)
-// 0x00000028: .cfi_restore: r16
-// 0x00000028: ldc1 f22, +40(r29)
-// 0x0000002c: ldc1 f20, +32(r29)
-// 0x00000030: jr r31
-// 0x00000034: addiu r29, r29, 64
-// 0x00000038: .cfi_def_cfa_offset: 0
-// 0x00000038: .cfi_restore_state
-// 0x00000038: .cfi_def_cfa_offset: 64
+// 0x00000018: .cfi_remember_state
+// 0x00000018: lw r31, +60(r29)
+// 0x0000001c: .cfi_restore: r31
+// 0x0000001c: lw r17, +56(r29)
+// 0x00000020: .cfi_restore: r17
+// 0x00000020: lw r16, +52(r29)
+// 0x00000024: .cfi_restore: r16
+// 0x00000024: ldc1 f22, +40(r29)
+// 0x00000028: ldc1 f20, +32(r29)
+// 0x0000002c: jr r31
+// 0x00000030: addiu r29, r29, 64
+// 0x00000034: .cfi_def_cfa_offset: 0
+// 0x00000034: .cfi_restore_state
+// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x00, 0x00, 0xA4, 0xFF, 0x18, 0x00, 0xBD, 0x67,
+ 0xE8, 0xFF, 0xBD, 0x67, 0x18, 0x00, 0xBD, 0x67,
0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7, 0x10, 0x00, 0xB0, 0xDF,
0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF, 0x28, 0x00, 0xBD, 0x67,
0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
+
static constexpr uint8_t expected_cfi_kMips64[] = {
0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x44, 0x0A, 0x44,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x0A, 0x44,
0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0, 0x44, 0xD1, 0x44, 0xDF,
0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
@@ -205,29 +202,28 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000018: .cfi_offset: r56 at cfa-40
// 0x00000018: daddiu r29, r29, -24
// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: sd r4, +0(r29)
-// 0x00000020: .cfi_remember_state
-// 0x00000020: daddiu r29, r29, 24
-// 0x00000024: .cfi_def_cfa_offset: 40
-// 0x00000024: ldc1 f24, +0(r29)
-// 0x00000028: .cfi_restore: r56
-// 0x00000028: ldc1 f25, +8(r29)
-// 0x0000002c: .cfi_restore: r57
-// 0x0000002c: ld r16, +16(r29)
-// 0x00000030: .cfi_restore: r16
-// 0x00000030: ld r17, +24(r29)
-// 0x00000034: .cfi_restore: r17
-// 0x00000034: ld r31, +32(r29)
-// 0x00000038: .cfi_restore: r31
-// 0x00000038: daddiu r29, r29, 40
-// 0x0000003c: .cfi_def_cfa_offset: 0
-// 0x0000003c: jr r31
-// 0x00000040: nop
-// 0x00000044: .cfi_restore_state
-// 0x00000044: .cfi_def_cfa_offset: 64
+// 0x0000001c: .cfi_remember_state
+// 0x0000001c: daddiu r29, r29, 24
+// 0x00000020: .cfi_def_cfa_offset: 40
+// 0x00000020: ldc1 f24, +0(r29)
+// 0x00000024: .cfi_restore: r56
+// 0x00000024: ldc1 f25, +8(r29)
+// 0x00000028: .cfi_restore: r57
+// 0x00000028: ld r16, +16(r29)
+// 0x0000002c: .cfi_restore: r16
+// 0x0000002c: ld r17, +24(r29)
+// 0x00000030: .cfi_restore: r17
+// 0x00000030: ld r31, +32(r29)
+// 0x00000034: .cfi_restore: r31
+// 0x00000034: daddiu r29, r29, 40
+// 0x00000038: .cfi_def_cfa_offset: 0
+// 0x00000038: jr r31
+// 0x0000003c: nop
+// 0x00000040: .cfi_restore_state
+// 0x00000040: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x00, 0x28,
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28,
0x40, 0xD0, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
@@ -243,7 +239,7 @@ static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
};
static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x88, 0x0A,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x86, 0x0A,
0x42, 0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B,
0x0E, 0x40,
};
@@ -258,9 +254,9 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
// 0x00000006: .cfi_offset_extended: r81 at cfa-16
// 0x00000006: sub sp, sp, #44
// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: cmp r0, #0
-// 0x0000000c: beq +128 (0x00000090)
+// 0x00000008: cmp r0, #0
+// 0x0000000a: beq +128 (0x00000090)
+// 0x0000000c: ldr r0, [r0, #0]
// 0x0000000e: ldr r0, [r0, #0]
// 0x00000010: ldr r0, [r0, #0]
// 0x00000012: ldr r0, [r0, #0]
@@ -325,22 +321,21 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
// 0x00000088: ldr r0, [r0, #0]
// 0x0000008a: ldr r0, [r0, #0]
// 0x0000008c: ldr r0, [r0, #0]
-// 0x0000008e: ldr r0, [r0, #0]
-// 0x00000090: .cfi_remember_state
-// 0x00000090: add sp, sp, #44
-// 0x00000092: .cfi_def_cfa_offset: 20
-// 0x00000092: vpop.f32 {s16-s17}
-// 0x00000096: .cfi_def_cfa_offset: 12
-// 0x00000096: .cfi_restore_extended: r80
-// 0x00000096: .cfi_restore_extended: r81
-// 0x00000096: pop {r5, r6, pc}
-// 0x00000098: .cfi_restore_state
-// 0x00000098: .cfi_def_cfa_offset: 64
+// 0x0000008e: .cfi_remember_state
+// 0x0000008e: add sp, sp, #44
+// 0x00000090: .cfi_def_cfa_offset: 20
+// 0x00000090: vpop.f32 {s16-s17}
+// 0x00000094: .cfi_def_cfa_offset: 12
+// 0x00000094: .cfi_restore_extended: r80
+// 0x00000094: .cfi_restore_extended: r81
+// 0x00000094: pop {r5, r6, pc}
+// 0x00000096: .cfi_restore_state
+// 0x00000096: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x00, 0x00, 0xA4, 0xAF, 0x08, 0x00, 0x04, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
+ 0x08, 0x00, 0x04, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
0x09, 0x00, 0x20, 0x00, 0x04, 0x00, 0xBD, 0x27,
@@ -352,7 +347,7 @@ static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x54, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
+ 0x50, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
@@ -365,41 +360,40 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000010: .cfi_offset: r16 at cfa-12
// 0x00000010: sdc1 f22, +40(r29)
// 0x00000014: sdc1 f20, +32(r29)
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: bne r0, r4, 0x00000040 ; +36
-// 0x00000020: addiu r29, r29, -4
-// 0x00000024: .cfi_def_cfa_offset: 68
-// 0x00000024: sw r31, +0(r29)
-// 0x00000028: bltzal r0, 0x0000002c ; +4
-// 0x0000002c: lui r1, 0x20000
-// 0x00000030: ori r1, r1, 24
-// 0x00000034: addu r1, r1, r31
-// 0x00000038: lw r31, +0(r29)
-// 0x0000003c: jr r1
-// 0x00000040: addiu r29, r29, 4
-// 0x00000044: .cfi_def_cfa_offset: 64
-// 0x00000044: nop
+// 0x00000018: bne r0, r4, 0x00000040 ; +36
+// 0x0000001c: addiu r29, r29, -4
+// 0x00000020: .cfi_def_cfa_offset: 68
+// 0x00000020: sw r31, +0(r29)
+// 0x00000024: bltzal r0, 0x0000002c ; +4
+// 0x00000028: lui r1, 0x20000
+// 0x0000002c: ori r1, r1, 24
+// 0x00000030: addu r1, r1, r31
+// 0x00000034: lw r31, +0(r29)
+// 0x00000038: jr r1
+// 0x0000003c: addiu r29, r29, 4
+// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000040: nop
// ...
-// 0x00020044: nop
-// 0x00020048: .cfi_remember_state
-// 0x00020048: lw r31, +60(r29)
-// 0x0002004c: .cfi_restore: r31
-// 0x0002004c: lw r17, +56(r29)
-// 0x00020050: .cfi_restore: r17
-// 0x00020050: lw r16, +52(r29)
-// 0x00020054: .cfi_restore: r16
-// 0x00020054: ldc1 f22, +40(r29)
-// 0x00020058: ldc1 f20, +32(r29)
-// 0x0002005c: jr r31
-// 0x00020060: addiu r29, r29, 64
-// 0x00020064: .cfi_def_cfa_offset: 0
-// 0x00020064: .cfi_restore_state
-// 0x00020064: .cfi_def_cfa_offset: 64
+// 0x00020040: nop
+// 0x00020044: .cfi_remember_state
+// 0x00020044: lw r31, +60(r29)
+// 0x00020048: .cfi_restore: r31
+// 0x00020048: lw r17, +56(r29)
+// 0x0002004c: .cfi_restore: r17
+// 0x0002004c: lw r16, +52(r29)
+// 0x00020050: .cfi_restore: r16
+// 0x00020050: ldc1 f22, +40(r29)
+// 0x00020054: ldc1 f20, +32(r29)
+// 0x00020058: jr r31
+// 0x0002005c: addiu r29, r29, 64
+// 0x00020060: .cfi_def_cfa_offset: 0
+// 0x00020060: .cfi_restore_state
+// 0x00020060: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x00, 0x00, 0xA4, 0xFF, 0x02, 0x00, 0xA6, 0x60,
+ 0xE8, 0xFF, 0xBD, 0x67, 0x02, 0x00, 0xA6, 0x60,
0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
};
static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
@@ -409,7 +403,7 @@ static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x14, 0x00,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x10, 0x00,
0x02, 0x00, 0x0A, 0x44, 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0,
0x44, 0xD1, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
@@ -427,29 +421,28 @@ static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
// 0x00000018: .cfi_offset: r56 at cfa-40
// 0x00000018: daddiu r29, r29, -24
// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: sd r4, +0(r29)
-// 0x00000020: bnec r5, r6, 0x0000002c ; +12
-// 0x00000024: auipc r1, 2
-// 0x00000028: jic r1, 12 ; b 0x00020030 ; +131080
-// 0x0000002c: nop
+// 0x0000001c: bnec r5, r6, 0x0000002c ; +12
+// 0x00000020: auipc r1, 2
+// 0x00000024: jic r1, 12 ; b 0x00020030 ; +131080
+// 0x00000028: nop
// ...
-// 0x0002002c: nop
-// 0x00020030: .cfi_remember_state
-// 0x00020030: daddiu r29, r29, 24
-// 0x00020034: .cfi_def_cfa_offset: 40
-// 0x00020034: ldc1 f24, +0(r29)
-// 0x00020038: .cfi_restore: r56
-// 0x00020038: ldc1 f25, +8(r29)
-// 0x0002003c: .cfi_restore: r57
-// 0x0002003c: ld r16, +16(r29)
-// 0x00020040: .cfi_restore: r16
-// 0x00020040: ld r17, +24(r29)
-// 0x00020044: .cfi_restore: r17
-// 0x00020044: ld r31, +32(r29)
-// 0x00020048: .cfi_restore: r31
-// 0x00020048: daddiu r29, r29, 40
-// 0x0002004c: .cfi_def_cfa_offset: 0
-// 0x0002004c: jr r31
-// 0x00020050: nop
-// 0x00020054: .cfi_restore_state
-// 0x00020054: .cfi_def_cfa_offset: 64
+// 0x00020028: nop
+// 0x0002002c: .cfi_remember_state
+// 0x0002002c: daddiu r29, r29, 24
+// 0x00020030: .cfi_def_cfa_offset: 40
+// 0x00020030: ldc1 f24, +0(r29)
+// 0x00020034: .cfi_restore: r56
+// 0x00020034: ldc1 f25, +8(r29)
+// 0x00020038: .cfi_restore: r57
+// 0x00020038: ld r16, +16(r29)
+// 0x0002003c: .cfi_restore: r16
+// 0x0002003c: ld r17, +24(r29)
+// 0x00020040: .cfi_restore: r17
+// 0x00020040: ld r31, +32(r29)
+// 0x00020044: .cfi_restore: r31
+// 0x00020044: daddiu r29, r29, 40
+// 0x00020047: .cfi_def_cfa_offset: 0
+// 0x00020048: jr r31
+// 0x0002004c: nop
+// 0x00020050: .cfi_restore_state
+// 0x00020050: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d6f8307ac2..4370a84bd2 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -319,7 +319,7 @@ class OptimizingCompiler FINAL : public Compiler {
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ VariableSizedHandleScope* handles) const;
void RunOptimizations(HOptimization* optimizations[],
size_t length,
@@ -358,7 +358,7 @@ class OptimizingCompiler FINAL : public Compiler {
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ VariableSizedHandleScope* handles) const;
void RunArchOptimizations(InstructionSet instruction_set,
HGraph* graph,
@@ -442,7 +442,7 @@ static HOptimization* BuildOptimization(
CodeGenerator* codegen,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
SideEffectsAnalysis* most_recent_side_effects,
HInductionVarAnalysis* most_recent_induction) {
std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
@@ -524,7 +524,7 @@ static ArenaVector<HOptimization*> BuildOptimizations(
CodeGenerator* codegen,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
- StackHandleScopeCollection* handles) {
+ VariableSizedHandleScope* handles) {
// Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
// instances. This method assumes that each of them expects the nearest instance preceeding it
// in the pass name list.
@@ -570,7 +570,7 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const {
+ VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
@@ -707,7 +707,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const {
+ VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
@@ -949,7 +949,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
{
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
// Do not hold `mutator_lock_` between optimizations.
ScopedThreadSuspension sts(soa.Self(), kNative);
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 2a23c92f1f..58d90176cd 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -90,7 +90,7 @@ inline HGraph* CreateCFG(ArenaAllocator* allocator,
{
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
HGraphBuilder builder(graph, *item, &handles, return_type);
bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
return graph_built ? graph : nullptr;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 45a3ce411e..83698adba4 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -35,7 +35,7 @@ static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
}
}
-static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollection* handles,
+static inline ReferenceTypeInfo::TypeHandle GetRootHandle(VariableSizedHandleScope* handles,
ClassLinker::ClassRoot class_root,
ReferenceTypeInfo::TypeHandle* cache) {
if (!ReferenceTypeInfo::IsValidHandle(*cache)) {
@@ -109,7 +109,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
Handle<mirror::DexCache> hint_dex_cache,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
bool is_first_run,
const char* name)
: HOptimization(graph, name),
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 61428b2a45..4663471729 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -34,7 +34,7 @@ class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
Handle<mirror::DexCache> hint_dex_cache,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
bool is_first_run,
const char* name = kReferenceTypePropagationPassName);
@@ -56,7 +56,7 @@ class ReferenceTypePropagation : public HOptimization {
private:
class HandleCache {
public:
- explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { }
+ explicit HandleCache(VariableSizedHandleScope* handles) : handles_(handles) { }
template <typename T>
MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -74,7 +74,7 @@ class ReferenceTypePropagation : public HOptimization {
ReferenceTypeInfo::TypeHandle GetThrowableClassHandle();
private:
- StackHandleScopeCollection* handles_;
+ VariableSizedHandleScope* handles_;
ReferenceTypeInfo::TypeHandle object_class_handle_;
ReferenceTypeInfo::TypeHandle class_class_handle_;
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 75a4eac538..b061c871b0 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -35,7 +35,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
~ReferenceTypePropagationTest() { }
- void SetupPropagation(StackHandleScopeCollection* handles) {
+ void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
Handle<mirror::DexCache>(),
@@ -79,7 +79,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
TEST_F(ReferenceTypePropagationTest, ProperSetup) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
SetupPropagation(&handles);
EXPECT_TRUE(propagation_ != nullptr);
@@ -88,7 +88,7 @@ TEST_F(ReferenceTypePropagationTest, ProperSetup) {
TEST_F(ReferenceTypePropagationTest, MergeInvalidTypes) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
SetupPropagation(&handles);
// Two invalid types.
@@ -120,7 +120,7 @@ TEST_F(ReferenceTypePropagationTest, MergeInvalidTypes) {
TEST_F(ReferenceTypePropagationTest, MergeValidTypes) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
SetupPropagation(&handles);
// Same types.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index d7360adef8..45dac54115 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -49,7 +49,7 @@ class SsaBuilder : public ValueObject {
public:
SsaBuilder(HGraph* graph,
Handle<mirror::DexCache> dex_cache,
- StackHandleScopeCollection* handles)
+ VariableSizedHandleScope* handles)
: graph_(graph),
dex_cache_(dex_cache),
handles_(handles),
@@ -116,7 +116,7 @@ class SsaBuilder : public ValueObject {
HGraph* graph_;
Handle<mirror::DexCache> dex_cache_;
- StackHandleScopeCollection* const handles_;
+ VariableSizedHandleScope* const handles_;
// True if types of ambiguous ArrayGets have been resolved.
bool agets_fixed_;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 14d29c4f1a..8a9fd90c32 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -254,10 +254,10 @@ void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size
return Load(m_dst.AsArm(), sp, src.Int32Value(), size);
}
-void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst ATTRIBUTE_UNUSED,
- ThreadOffset32 src ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
+void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
+ ThreadOffset32 src,
+ size_t size) {
+ return Load(m_dst.AsArm(), tr, src.Int32Value(), size);
}
void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
@@ -558,6 +558,38 @@ void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t s
// TODO: think about using CBNZ here.
}
+std::unique_ptr<JNIMacroLabel> ArmVIXLJNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new ArmVIXLJNIMacroLabel());
+}
+
+void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ B(ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+}
+
+void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ ___ Cbz(test.AsArm().AsVIXLRegister(), ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ ___ Cbnz(test.AsArm().AsVIXLRegister(), ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+ break;
+ default:
+ LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ Bind(ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+}
+
void ArmVIXLJNIMacroAssembler::EmitExceptionPoll(
ArmVIXLJNIMacroAssembler::ArmException* exception) {
___ Bind(exception->Entry());
@@ -588,9 +620,14 @@ void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size) << dest;
} else if (dest.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dest;
CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
- ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+
+ if (size == 1u) {
+ ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset));
+ } else {
+ CHECK_EQ(4u, size) << dest;
+ ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+ }
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size) << dest;
___ Ldr(dest.AsVIXLRegisterPairLow(), MemOperand(base, offset));
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 9fc683dd4f..f3baf1f062 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -187,6 +187,15 @@ class ArmVIXLJNIMacroAssembler FINAL
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
@@ -219,6 +228,16 @@ class ArmVIXLJNIMacroAssembler FINAL
friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
};
+class ArmVIXLJNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
+ vixl32::Label,
+ kArm> {
+ public:
+ vixl32::Label* AsArm() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index dfdcd11893..9cd6884cbe 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -262,9 +262,12 @@ void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
} else if (dest.IsXRegister()) {
CHECK_NE(dest.AsXRegister(), SP) << dest;
- if (size == 4u) {
+
+ if (size == 1u) {
+ ___ Ldrb(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
+ } else if (size == 4u) {
___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
- } else {
+ } else {
CHECK_EQ(8u, size) << dest;
___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
}
@@ -627,6 +630,38 @@ void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t sta
___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
}
+std::unique_ptr<JNIMacroLabel> Arm64JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new Arm64JNIMacroLabel());
+}
+
+void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ B(Arm64JNIMacroLabel::Cast(label)->AsArm64());
+}
+
+void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ ___ Cbz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ ___ Cbnz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
+ break;
+ default:
+ LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+}
+
+void Arm64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64());
+}
+
void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index b9f6854b01..264e99adab 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -168,6 +168,15 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
private:
class Arm64Exception {
public:
@@ -222,6 +231,16 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
};
+class Arm64JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<Arm64JNIMacroLabel,
+ vixl::aarch64::Label,
+ kArm64> {
+ public:
+ vixl::aarch64::Label* AsArm64() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace arm64
} // namespace art
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 0119ae9bfb..59a1a48e20 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -35,6 +35,12 @@ class ArenaAllocator;
class DebugFrameOpCodeWriterForAssembler;
class InstructionSetFeatures;
class MemoryRegion;
+class JNIMacroLabel;
+
+enum class JNIMacroUnaryCondition {
+ kZero,
+ kNotZero
+};
template <PointerSize kPointerSize>
class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
@@ -193,6 +199,15 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
// and branch to a ExceptionSlowPath if it is.
virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+ // Create a new label that can be used with Jump/Bind calls.
+ virtual std::unique_ptr<JNIMacroLabel> CreateLabel() = 0;
+ // Emit an unconditional jump to the label.
+ virtual void Jump(JNIMacroLabel* label) = 0;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ virtual void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) = 0;
+ // Code at this offset will serve as the target for the Jump call.
+ virtual void Bind(JNIMacroLabel* label) = 0;
+
virtual ~JNIMacroAssembler() {}
/**
@@ -205,6 +220,28 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
explicit JNIMacroAssembler() {}
};
+// A "Label" class used with the JNIMacroAssembler
+// allowing one to use branches (jumping from one place to another).
+//
+// This is just an interface, so every platform must provide
+// its own implementation of it.
+//
+// It is only safe to use a label created
+// via JNIMacroAssembler::CreateLabel with that same macro assembler.
+class JNIMacroLabel {
+ public:
+ virtual ~JNIMacroLabel() = 0;
+
+ const InstructionSet isa_;
+ protected:
+ explicit JNIMacroLabel(InstructionSet isa) : isa_(isa) {}
+};
+
+inline JNIMacroLabel::~JNIMacroLabel() {
+ // Compulsory definition for a pure virtual destructor
+ // to avoid linking errors.
+}
+
template <typename T, PointerSize kPointerSize>
class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
public:
@@ -230,6 +267,30 @@ class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
T asm_;
};
+template <typename Self, typename PlatformLabel, InstructionSet kIsa>
+class JNIMacroLabelCommon : public JNIMacroLabel {
+ public:
+ static Self* Cast(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ CHECK_EQ(kIsa, label->isa_);
+
+ return reinterpret_cast<Self*>(label);
+ }
+
+ protected:
+ PlatformLabel* AsPlatformLabel() {
+ return &label_;
+ }
+
+ JNIMacroLabelCommon() : JNIMacroLabel(kIsa) {
+ }
+
+ virtual ~JNIMacroLabelCommon() OVERRIDE {}
+
+ private:
+ PlatformLabel label_;
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index e1255f7f23..b932fb82bc 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -179,6 +179,8 @@ class MipsExceptionSlowPath {
class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
public:
+ using JNIBase = JNIMacroAssembler<PointerSize::k32>;
+
explicit MipsAssembler(ArenaAllocator* arena,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
: Assembler(arena),
@@ -723,6 +725,34 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
}
+ // Don't warn about a different virtual Bind/Jump in the base class.
+ using JNIBase::Bind;
+ using JNIBase::Jump;
+
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
+ JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
+ ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+
// Create a new literal with a given value.
// NOTE: Force the template parameter to be explicitly specified.
template <typename T>
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 6277b5d66d..238cb9d765 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -335,6 +335,8 @@ class Mips64ExceptionSlowPath {
class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
+ using JNIBase = JNIMacroAssembler<PointerSize::k64>;
+
explicit Mips64Assembler(ArenaAllocator* arena)
: Assembler(arena),
overwriting_(false),
@@ -574,6 +576,35 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
void Bind(Mips64Label* label);
+
+ // Don't warn about a different virtual Bind/Jump in the base class.
+ using JNIBase::Bind;
+ using JNIBase::Jump;
+
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
+ JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
+ ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+
void Bc(Mips64Label* label);
void Jialc(Mips64Label* label, GpuRegister indirect_reg);
void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 77af885646..cfdf80ba50 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -215,8 +215,12 @@ void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
} else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ if (size == 1u) {
+ __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src));
+ } else {
+ CHECK_EQ(4u, size);
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ }
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
__ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
@@ -519,6 +523,48 @@ void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t sta
__ j(kNotEqual, slow->Entry());
}
+std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
+}
+
+void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
+}
+
+void X86JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ art::x86::Condition x86_cond;
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ x86_cond = art::x86::kZero;
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ x86_cond = art::x86::kNotZero;
+ break;
+ default:
+ LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+
+ // TEST reg, reg
+ // Jcc <Offset>
+ __ testl(test.AsX86().AsCpuRegister(), test.AsX86().AsCpuRegister());
+ __ j(x86_cond, X86JNIMacroLabel::Cast(label)->AsX86());
+
+
+ // X86 also has JCZX, JECZX, however it's not worth it to implement
+ // because we aren't likely to codegen with ECX+kZero check.
+}
+
+void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
+}
+
#undef __
void X86ExceptionSlowPath::Emit(Assembler *sasm) {
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 015584cbc1..8ffda6425e 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -30,6 +30,8 @@
namespace art {
namespace x86 {
+class X86JNIMacroLabel;
+
class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
@@ -152,10 +154,29 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
private:
DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
};
+class X86JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<X86JNIMacroLabel,
+ art::Label,
+ kX86> {
+ public:
+ art::Label* AsX86() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace x86
} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 3e687a7758..ec86254cfc 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -260,8 +260,12 @@ void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
} else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ if (size == 1u) {
+ __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true));
+ } else {
+ CHECK_EQ(4u, size);
+ __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ }
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
__ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
@@ -585,6 +589,44 @@ void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t
__ j(kNotEqual, slow->Entry());
}
+std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
+}
+
+void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+}
+
+void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ art::x86_64::Condition x86_64_cond;
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ x86_64_cond = art::x86_64::kZero;
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ x86_64_cond = art::x86_64::kNotZero;
+ break;
+ default:
+ LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+
+ // TEST reg, reg
+ // Jcc <Offset>
+ __ testq(test.AsX86_64().AsCpuRegister(), test.AsX86_64().AsCpuRegister());
+ __ j(x86_64_cond, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+}
+
+void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+}
+
#undef __
void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 9107f3c422..aa058f7454 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -180,10 +180,29 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
private:
DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
};
+class X86_64JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<X86_64JNIMacroLabel,
+ art::Label,
+ kX86_64> {
+ public:
+ art::Label* AsX86_64() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace x86_64
} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 292aff43e9..e26fa7ff03 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -339,6 +339,11 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError("");
UsageError(" --no-generate-mini-debug-info: Do not generate backtrace info.");
UsageError("");
+ UsageError(" --generate-build-id: Generate GNU-compatible linker build ID ELF section with");
+ UsageError(" SHA-1 of the file content (and thus stable across identical builds)");
+ UsageError("");
+ UsageError(" --no-generate-build-id: Do not generate the build ID ELF section.");
+ UsageError("");
UsageError(" --debuggable: Produce code debuggable with Java debugger.");
UsageError("");
UsageError(" --runtime-arg <argument>: used to specify various arguments for the runtime,");
@@ -988,7 +993,18 @@ class Dex2Oat FINAL {
}
}
- char_backing_storage_.reserve((dex_locations_.size() - 1) * 2);
+ std::string base_symbol_oat;
+ if (!oat_unstripped_.empty()) {
+ base_symbol_oat = oat_unstripped_[0];
+ size_t last_symbol_oat_slash = base_symbol_oat.rfind('/');
+ if (last_symbol_oat_slash == std::string::npos) {
+ Usage("--multi-image used with unusable symbol filename %s", base_symbol_oat.c_str());
+ }
+ base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
+ }
+
+ const size_t num_expanded_files = 2 + (base_symbol_oat.empty() ? 0 : 1);
+ char_backing_storage_.reserve((dex_locations_.size() - 1) * num_expanded_files);
// Now create the other names. Use a counted loop to skip the first one.
for (size_t i = 1; i < dex_locations_.size(); ++i) {
@@ -1000,6 +1016,11 @@ class Dex2Oat FINAL {
std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat");
char_backing_storage_.push_back(base_oat + oat_name);
oat_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+
+ if (!base_symbol_oat.empty()) {
+ char_backing_storage_.push_back(base_symbol_oat + oat_name);
+ oat_unstripped_.push_back((char_backing_storage_.end() - 1)->c_str());
+ }
}
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f75841415e..2fcc0cfc89 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2864,10 +2864,17 @@ class IMTDumper {
std::cerr << " " << iface->GetDescriptor(&iface_name) << std::endl;
for (ArtMethod& iface_method : iface->GetVirtualMethods(pointer_size)) {
- uint32_t base_hash = ImTable::GetBaseImtHash(&iface_method);
+ uint32_t class_hash, name_hash, signature_hash;
+ ImTable::GetImtHashComponents(&iface_method, &class_hash, &name_hash, &signature_hash);
uint32_t imt_slot = ImTable::GetImtIndex(&iface_method);
- std::cerr << " " << PrettyMethod(&iface_method, true) << " slot=" << std::dec
- << imt_slot << " base_hash=0x" << std::hex << base_hash << std::endl;
+ std::cerr << " " << PrettyMethod(&iface_method, true)
+ << " slot=" << imt_slot
+ << std::hex
+ << " class_hash=0x" << class_hash
+ << " name_hash=0x" << name_hash
+ << " signature_hash=0x" << signature_hash
+ << std::dec
+ << std::endl;
}
}
}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 31f2490c21..6945eb0802 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -536,6 +536,7 @@ art_cc_test {
"gc/task_processor_test.cc",
"gtest_test.cc",
"handle_scope_test.cc",
+ "imtable_test.cc",
"indenter_test.cc",
"indirect_reference_table_test.cc",
"instrumentation_test.cc",
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 8a51dc2c04..5ec989831e 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -274,7 +274,7 @@ class ScopedCheck {
AbortF("field operation on NULL object: %p", java_object);
return false;
}
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o.Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
@@ -782,7 +782,7 @@ class ScopedCheck {
}
}
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj.Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("%s is an invalid %s: %p (%p)",
what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
@@ -939,7 +939,7 @@ class ScopedCheck {
ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(jc);
if (c == nullptr) {
*msg += "NULL";
- } else if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
+ } else if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c.Ptr())) {
StringAppendF(msg, "INVALID POINTER:%p", jc);
} else if (!c->IsClass()) {
*msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
@@ -1108,7 +1108,7 @@ class ScopedCheck {
}
ObjPtr<mirror::Array> a = soa.Decode<mirror::Array>(java_array);
- if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
+ if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a.Ptr()))) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("jarray is an invalid %s: %p (%p)",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
@@ -1145,7 +1145,7 @@ class ScopedCheck {
}
ArtField* f = soa.DecodeField(fid);
// TODO: Better check here.
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass())) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass().Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("invalid jfieldID: %p", fid);
return nullptr;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index fa971c4c2b..dba9b8fb48 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -200,7 +200,7 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, ArtMethod* referr
inline mirror::Object* ClassLinker::AllocObject(Thread* self) {
return GetClassRoot(kJavaLangObject)->Alloc<true, false>(
self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
template <class T>
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 48d31a4c3e..14cbf24feb 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2218,7 +2218,7 @@ mirror::DexCache* ClassLinker::AllocDexCache(mirror::String** out_location,
const DexFile& dex_file) {
StackHandleScope<1> hs(self);
DCHECK(out_location != nullptr);
- auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
+ auto dex_cache(hs.NewHandle(ObjPtr<mirror::DexCache>::DownCast(
GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
if (dex_cache.Get() == nullptr) {
self->AssertPendingOOMException();
@@ -4749,7 +4749,7 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
MutableHandle<mirror::Class> handle_super_iface(hs.NewHandle<mirror::Class>(nullptr));
// First we initialize all of iface's super-interfaces recursively.
for (size_t i = 0; i < num_direct_ifaces; i++) {
- mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
+ ObjPtr<mirror::Class> super_iface = mirror::Class::GetDirectInterface(self, iface, i);
if (!super_iface->HasBeenRecursivelyInitialized()) {
// Recursive step
handle_super_iface.Assign(super_iface);
@@ -6493,7 +6493,7 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class>
size_t ifcount = super_ifcount + num_interfaces;
// Check that every class being implemented is an interface.
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = have_interfaces
+ ObjPtr<mirror::Class> interface = have_interfaces
? interfaces->GetWithoutChecks(i)
: mirror::Class::GetDirectInterface(self, klass, i);
DCHECK(interface != nullptr);
@@ -6532,9 +6532,9 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class>
ScopedAssertNoThreadSuspension nts("Copying mirror::Class*'s for FillIfTable");
std::vector<mirror::Class*> to_add;
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
+ ObjPtr<mirror::Class> interface = have_interfaces ? interfaces->Get(i) :
mirror::Class::GetDirectInterface(self, klass, i);
- to_add.push_back(interface);
+ to_add.push_back(interface.Ptr());
}
new_ifcount = FillIfTable(iftable.Get(), super_ifcount, std::move(to_add));
@@ -8298,7 +8298,7 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self,
mirror::Class::FindField(self, hs.NewHandle(h_path_class_loader->GetClass()), "parent",
"Ljava/lang/ClassLoader;");
DCHECK(parent_field != nullptr);
- mirror::Object* boot_cl =
+ ObjPtr<mirror::Object> boot_cl =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader)->AllocObject(self);
parent_field->SetObject<false>(h_path_class_loader.Get(), boot_cl);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e514112382..6279717acb 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -211,13 +211,13 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_TRUE(array->ShouldHaveEmbeddedVTable());
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != nullptr);
- mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
+ ObjPtr<mirror::Class> direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
EXPECT_TRUE(direct_interface0 != nullptr);
EXPECT_STREQ(direct_interface0->GetDescriptor(&temp), "Ljava/lang/Cloneable;");
- mirror::Class* direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
+ ObjPtr<mirror::Class> direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
EXPECT_STREQ(direct_interface1->GetDescriptor(&temp), "Ljava/io/Serializable;");
mirror::Class* array_ptr = array->GetComponentType();
- EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
+ EXPECT_OBJ_PTR_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
PointerSize pointer_size = class_linker_->GetImagePointerSize();
mirror::Class* JavaLangObject =
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 7fa8cf9326..1baa8f78a9 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -30,12 +30,13 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "obj_ptr-inl.h"
#include "thread.h"
#include "verifier/method_verifier.h"
namespace art {
-static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
+static void AddReferrerLocation(std::ostream& os, ObjPtr<mirror::Class> referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (referrer != nullptr) {
std::string location(referrer->GetLocation());
@@ -47,7 +48,9 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
}
static void ThrowException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
+ ObjPtr<mirror::Class> referrer,
+ const char* fmt,
+ va_list* args = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
@@ -63,7 +66,9 @@ static void ThrowException(const char* exception_descriptor,
}
static void ThrowWrappedException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
+ ObjPtr<mirror::Class> referrer,
+ const char* fmt,
+ va_list* args = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
@@ -109,7 +114,8 @@ void ThrowArrayIndexOutOfBoundsException(int index, int length) {
// ArrayStoreException
-void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
+void ThrowArrayStoreException(ObjPtr<mirror::Class> element_class,
+ ObjPtr<mirror::Class> array_class) {
ThrowException("Ljava/lang/ArrayStoreException;", nullptr,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
@@ -118,7 +124,7 @@ void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array
// ClassCastException
-void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
+void ThrowClassCastException(ObjPtr<mirror::Class> dest_type, ObjPtr<mirror::Class> src_type) {
ThrowException("Ljava/lang/ClassCastException;", nullptr,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
@@ -131,13 +137,13 @@ void ThrowClassCastException(const char* msg) {
// ClassCircularityError
-void ThrowClassCircularityError(mirror::Class* c) {
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c) {
std::ostringstream msg;
msg << PrettyDescriptor(c);
ThrowException("Ljava/lang/ClassCircularityError;", c, msg.str().c_str());
}
-void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...) {
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/ClassCircularityError;", c, fmt, &args);
@@ -146,7 +152,7 @@ void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...) {
// ClassFormatError
-void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowClassFormatError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/ClassFormatError;", referrer, fmt, &args);
@@ -155,14 +161,15 @@ void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) {
// IllegalAccessError
-void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) {
+void ThrowIllegalAccessErrorClass(ObjPtr<mirror::Class> referrer, ObjPtr<mirror::Class> accessed) {
std::ostringstream msg;
msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "'";
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
+void ThrowIllegalAccessErrorClassForMethodDispatch(ObjPtr<mirror::Class> referrer,
+ ObjPtr<mirror::Class> accessed,
ArtMethod* called,
InvokeType type) {
std::ostringstream msg;
@@ -172,14 +179,14 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirr
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) {
+void ThrowIllegalAccessErrorMethod(ObjPtr<mirror::Class> referrer, ArtMethod* accessed) {
std::ostringstream msg;
msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) {
+void ThrowIllegalAccessErrorField(ObjPtr<mirror::Class> referrer, ArtField* accessed) {
std::ostringstream msg;
msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
@@ -195,7 +202,7 @@ void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
msg.str().c_str());
}
-void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowIllegalAccessError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/IllegalAccessError;", referrer, fmt, &args);
@@ -228,8 +235,8 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
}
void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
- mirror::Class* target_class,
- mirror::Object* this_object,
+ ObjPtr<mirror::Class> target_class,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
@@ -244,7 +251,7 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
}
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
@@ -269,7 +276,7 @@ void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_st
msg.str().c_str());
}
-void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args);
@@ -303,14 +310,14 @@ void ThrowWrappedIOException(const char* fmt, ...) {
// LinkageError
-void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/LinkageError;", referrer, fmt, &args);
va_end(args);
}
-void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowWrappedLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowWrappedException("Ljava/lang/LinkageError;", referrer, fmt, &args);
@@ -330,7 +337,7 @@ void ThrowNegativeArraySizeException(const char* msg) {
// NoSuchFieldError
-void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
+void ThrowNoSuchFieldError(const StringPiece& scope, ObjPtr<mirror::Class> c,
const StringPiece& type, const StringPiece& name) {
std::ostringstream msg;
std::string temp;
@@ -339,7 +346,7 @@ void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
ThrowException("Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
}
-void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) {
+void ThrowNoSuchFieldException(ObjPtr<mirror::Class> c, const StringPiece& name) {
std::ostringstream msg;
std::string temp;
msg << "No field " << name << " in class " << c->GetDescriptor(&temp);
@@ -348,7 +355,7 @@ void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) {
// NoSuchMethodError
-void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
+void ThrowNoSuchMethodError(InvokeType type, ObjPtr<mirror::Class> c, const StringPiece& name,
const Signature& signature) {
std::ostringstream msg;
std::string temp;
@@ -378,7 +385,7 @@ static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type) {
- mirror::DexCache* dex_cache =
+ ObjPtr<mirror::DexCache> dex_cache =
Thread::Current()->GetCurrentMethod(nullptr)->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type);
@@ -386,7 +393,7 @@ void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type) {
- mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
+ ObjPtr<mirror::DexCache> dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
ThrowNullPointerExceptionForMethodAccessImpl(method->GetDexMethodIndex(),
dex_file, type);
@@ -784,7 +791,7 @@ void ThrowStringIndexOutOfBoundsException(int index, int length) {
// VerifyError
-void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowVerifyError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/VerifyError;", referrer, fmt, &args);
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 945dc2daba..2a602634fb 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -19,6 +19,7 @@
#include "base/mutex.h"
#include "invoke_type.h"
+#include "obj_ptr.h"
namespace art {
namespace mirror {
@@ -50,20 +51,21 @@ void ThrowArrayIndexOutOfBoundsException(int index, int length)
// ArrayStoreException
-void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class)
+void ThrowArrayStoreException(ObjPtr<mirror::Class> element_class,
+ ObjPtr<mirror::Class> array_class)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCircularityError
-void ThrowClassCircularityError(mirror::Class* c)
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...)
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c, const char* fmt, ...)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCastException
-void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
+void ThrowClassCastException(ObjPtr<mirror::Class> dest_type, ObjPtr<mirror::Class> src_type)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCastException(const char* msg)
@@ -71,30 +73,31 @@ void ThrowClassCastException(const char* msg)
// ClassFormatError
-void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowClassFormatError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessError
-void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
+void ThrowIllegalAccessErrorClass(ObjPtr<mirror::Class> referrer, ObjPtr<mirror::Class> accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
+void ThrowIllegalAccessErrorClassForMethodDispatch(ObjPtr<mirror::Class> referrer,
+ ObjPtr<mirror::Class> accessed,
ArtMethod* called,
InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
+void ThrowIllegalAccessErrorMethod(ObjPtr<mirror::Class> referrer, ArtMethod* accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
+void ThrowIllegalAccessErrorField(ObjPtr<mirror::Class> referrer, ArtField* accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowIllegalAccessError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -110,26 +113,29 @@ void ThrowIllegalArgumentException(const char* msg)
// IncompatibleClassChangeError
-void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
- ArtMethod* method, ArtMethod* referrer)
+void ThrowIncompatibleClassChangeError(InvokeType expected_type,
+ InvokeType found_type,
+ ArtMethod* method,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
- mirror::Class* target_class,
- mirror::Object* this_object,
+ ObjPtr<mirror::Class> target_class,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
+void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field,
+ bool is_static,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -146,11 +152,11 @@ void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__p
// LinkageError
-void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowWrappedLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -165,16 +171,20 @@ void ThrowNegativeArraySizeException(const char* msg)
// NoSuchFieldError
-void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
- const StringPiece& type, const StringPiece& name)
+void ThrowNoSuchFieldError(const StringPiece& scope,
+ ObjPtr<mirror::Class> c,
+ const StringPiece& type,
+ const StringPiece& name)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
+void ThrowNoSuchFieldException(ObjPtr<mirror::Class> c, const StringPiece& name)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchMethodError
-void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
+void ThrowNoSuchMethodError(InvokeType type,
+ ObjPtr<mirror::Class> c,
+ const StringPiece& name,
const Signature& signature)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -215,7 +225,7 @@ void ThrowStringIndexOutOfBoundsException(int index, int length)
// VerifyError
-void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowVerifyError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7006f70687..3977e4926a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -39,6 +39,7 @@
#include "handle_scope.h"
#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
+#include "jvalue-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -892,15 +893,16 @@ JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class
std::vector<uint64_t>* counts) {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->CollectGarbage(false);
- std::vector<mirror::Class*> classes;
+ VariableSizedHandleScope hs(Thread::Current());
+ std::vector<Handle<mirror::Class>> classes;
counts->clear();
for (size_t i = 0; i < class_ids.size(); ++i) {
JDWP::JdwpError error;
- mirror::Class* c = DecodeClass(class_ids[i], &error);
+ ObjPtr<mirror::Class> c = DecodeClass(class_ids[i], &error);
if (c == nullptr) {
return error;
}
- classes.push_back(c);
+ classes.push_back(hs.NewHandle(c));
counts->push_back(0);
}
heap->CountInstances(classes, false, &(*counts)[0]);
@@ -913,14 +915,15 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
// We only want reachable instances, so do a GC.
heap->CollectGarbage(false);
JDWP::JdwpError error;
- mirror::Class* c = DecodeClass(class_id, &error);
+ ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
if (c == nullptr) {
return error;
}
- std::vector<mirror::Object*> raw_instances;
- Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
+ VariableSizedHandleScope hs(Thread::Current());
+ std::vector<Handle<mirror::Object>> raw_instances;
+ Runtime::Current()->GetHeap()->GetInstances(hs, hs.NewHandle(c), max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
- instances->push_back(gRegistry->Add(raw_instances[i]));
+ instances->push_back(gRegistry->Add(raw_instances[i].Get()));
}
return JDWP::ERR_NONE;
}
@@ -930,14 +933,15 @@ JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_c
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->CollectGarbage(false);
JDWP::JdwpError error;
- mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
+ ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
if (o == nullptr) {
return JDWP::ERR_INVALID_OBJECT;
}
- std::vector<mirror::Object*> raw_instances;
- heap->GetReferringObjects(o, max_count, raw_instances);
+ VariableSizedHandleScope hs(Thread::Current());
+ std::vector<Handle<mirror::Object>> raw_instances;
+ heap->GetReferringObjects(hs, hs.NewHandle(o), max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
- referring_objects->push_back(gRegistry->Add(raw_instances[i]));
+ referring_objects->push_back(gRegistry->Add(raw_instances[i].Get()));
}
return JDWP::ERR_NONE;
}
@@ -1286,7 +1290,7 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_
return error;
}
Thread* self = Thread::Current();
- mirror::Object* new_object;
+ ObjPtr<mirror::Object> new_object;
if (c->IsStringClass()) {
// Special case for java.lang.String.
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -1301,7 +1305,7 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_
*new_object_id = 0;
return JDWP::ERR_OUT_OF_MEMORY;
}
- *new_object_id = gRegistry->Add(new_object);
+ *new_object_id = gRegistry->Add(new_object.Ptr());
return JDWP::ERR_NONE;
}
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index f0d3909bff..576c4aa849 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -22,7 +22,7 @@
#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
#include "mirror/field.h"
#include "mirror/method.h"
#include "reflection.h"
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index e37db7dd92..8077c21bdc 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -231,10 +231,10 @@ inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
// CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
return klass->Alloc</*kInstrumented*/true>(
self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
DCHECK(klass != nullptr);
- return klass->Alloc<kInstrumented>(self, allocator_type);
+ return klass->Alloc<kInstrumented>(self, allocator_type).Ptr();
}
// Given the context of a calling Method and a resolved class, create an instance.
@@ -254,10 +254,10 @@ inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
// Pass in false since the object cannot be finalizable.
// CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
// instrumented.
- return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator());
+ return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
}
// Pass in false since the object cannot be finalizable.
- return klass->Alloc<kInstrumented, false>(self, allocator_type);
+ return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
}
// Given the context of a calling Method and an initialized class, create an instance.
@@ -268,7 +268,7 @@ inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
// Pass in false since the object cannot be finalizable.
- return klass->Alloc<kInstrumented, false>(self, allocator_type);
+ return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 7c7e2da740..20fa0d8e6b 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -144,7 +144,7 @@ static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result,
HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&o));
CheckReferenceResult(h_obj, self);
}
- VerifyObject(o.Ptr());
+ VerifyObject(o);
return o.Ptr();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 553c092f69..1cd641b962 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -94,6 +94,7 @@ TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
CHECK_FRAME_SIZE(kArm);
CHECK_FRAME_SIZE(kArm64);
CHECK_FRAME_SIZE(kMips);
+ CHECK_FRAME_SIZE(kMips64);
CHECK_FRAME_SIZE(kX86);
CHECK_FRAME_SIZE(kX86_64);
}
@@ -104,6 +105,7 @@ TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
EXPECT_EQ(GetInstructionSetPointerSize(kArm), GetConstExprPointerSize(kArm));
EXPECT_EQ(GetInstructionSetPointerSize(kArm64), GetConstExprPointerSize(kArm64));
EXPECT_EQ(GetInstructionSetPointerSize(kMips), GetConstExprPointerSize(kMips));
+ EXPECT_EQ(GetInstructionSetPointerSize(kMips64), GetConstExprPointerSize(kMips64));
EXPECT_EQ(GetInstructionSetPointerSize(kX86), GetConstExprPointerSize(kX86));
EXPECT_EQ(GetInstructionSetPointerSize(kX86_64), GetConstExprPointerSize(kX86_64));
}
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
new file mode 100644
index 0000000000..f60bc0c834
--- /dev/null
+++ b/runtime/gc/allocation_listener.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ALLOCATION_LISTENER_H_
+#define ART_RUNTIME_GC_ALLOCATION_LISTENER_H_
+
+#include <list>
+#include <memory>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "obj_ptr.h"
+#include "object_callbacks.h"
+#include "gc_root.h"
+
+namespace art {
+
+namespace mirror {
+ class Object;
+}
+
+class Thread;
+
+namespace gc {
+
+class AllocationListener {
+ public:
+ virtual ~AllocationListener() {}
+
+ virtual void ObjectAllocated(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ALLOCATION_LISTENER_H_
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 13ebb272c5..d921900933 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/stl_util.h"
+#include "obj_ptr-inl.h"
#include "stack.h"
#ifdef ART_TARGET_ANDROID
@@ -263,7 +264,7 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
}
void AllocRecordObjectMap::RecordAllocation(Thread* self,
- mirror::Object** obj,
+ ObjPtr<mirror::Object>* obj,
size_t byte_count) {
// Get stack trace outside of lock in case there are allocations during the stack walk.
// b/27858645.
@@ -305,7 +306,7 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self,
trace.SetTid(self->GetTid());
// Add the record.
- Put(*obj, AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
+ Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
DCHECK_LE(Size(), alloc_record_max_);
}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index f1f013b3b5..c8b2b89702 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -21,6 +21,7 @@
#include <memory>
#include "base/mutex.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "gc_root.h"
@@ -210,7 +211,7 @@ class AllocRecordObjectMap {
// Caller needs to check that it is enabled before calling since we read the stack trace before
// checking the enabled boolean.
void RecordAllocation(Thread* self,
- mirror::Object** obj,
+ ObjPtr<mirror::Object>* obj,
size_t byte_count)
REQUIRES(!Locks::alloc_tracker_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2e971729f0..76a478ef1b 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -296,7 +296,6 @@ class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
- Runtime::Current()->GetHeap()->DumpObject(LOG_STREAM(INFO), obj);
LOG(FATAL) << ref << " found in from space";
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 83789cc733..05ce9c7952 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -19,6 +19,7 @@
#include "heap.h"
+#include "allocation_listener.h"
#include "base/time_utils.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/allocation_record.h"
@@ -40,7 +41,7 @@ namespace gc {
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor) {
@@ -51,16 +52,19 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
CHECK_EQ(self->GetState(), kRunnable);
self->AssertThreadSuspensionIsAllowable();
self->AssertNoPendingException();
+ // Make sure to preserve klass.
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
self->PoisonObjectPointers();
}
// Need to check that we arent the large object allocator since the large object allocation code
// path this function. If we didn't check we would have an infinite loop.
- mirror::Object* obj;
+ ObjPtr<mirror::Object> obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
pre_fence_visitor);
if (obj != nullptr) {
- return obj;
+ return obj.Ptr();
} else {
// There should be an OOM exception, since we are retrying, clear it.
self->ClearException();
@@ -84,7 +88,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj);
+ obj->SetReadBarrierPointer(obj.Ptr());
}
obj->AssertReadBarrierPointer();
}
@@ -92,14 +96,15 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
- (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
- LIKELY(obj != nullptr)) {
+ } else if (
+ !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
+ (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
+ LIKELY(obj != nullptr)) {
DCHECK(!is_running_on_memory_tool_);
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj);
+ obj->SetReadBarrierPointer(obj.Ptr());
}
obj->AssertReadBarrierPointer();
}
@@ -140,7 +145,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj);
+ obj->SetReadBarrierPointer(obj.Ptr());
}
obj->AssertReadBarrierPointer();
}
@@ -184,6 +189,12 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
DCHECK(allocation_records_ != nullptr);
allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
}
+ AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
+ if (l != nullptr) {
+ // Same as above. We assume that a listener that was once stored will never be deleted.
+ // Otherwise we'd have to perform this under a lock.
+ l->ObjectAllocated(self, &obj, bytes_allocated);
+ }
} else {
DCHECK(!IsAllocTrackingEnabled());
}
@@ -206,25 +217,25 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
VerifyObject(obj);
self->VerifyStack();
- return obj;
+ return obj.Ptr();
}
// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;
-inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
+inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
if (kUseThreadLocalAllocationStack) {
- if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
+ if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
}
- } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
+ } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
PushOnAllocationStackWithInternalGC(self, obj);
}
}
template <bool kInstrumented, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocLargeObject(Thread* self,
- mirror::Class** klass,
+ ObjPtr<mirror::Class>* klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor) {
// Save and restore the class in case it moves.
@@ -398,7 +409,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self,
return ret;
}
-inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
+inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
// We need to have a zygote space or else our newly allocated large object can end up in the
// Zygote resulting in it being prematurely freed.
// We can only do this for primitive objects since large objects will not be within the card table
@@ -428,7 +439,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t
inline void Heap::CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
- mirror::Object** obj) {
+ ObjPtr<mirror::Object>* obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
RequestConcurrentGCAndSaveObject(self, false, obj);
}
@@ -440,6 +451,16 @@ inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
card_table_->MarkCard(dst.Ptr());
}
+inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
+ int start_offset ATTRIBUTE_UNUSED,
+ size_t length ATTRIBUTE_UNUSED) {
+ card_table_->MarkCard(dst.Ptr());
+}
+
+inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
+ card_table_->MarkCard(obj.Ptr());
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 01ad8d0366..bf5af8ee7e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -21,6 +21,7 @@
#include <unwind.h> // For GC verification.
#include <vector>
+#include "allocation_listener.h"
#include "art_field-inl.h"
#include "base/allocator.h"
#include "base/arena_allocator.h"
@@ -46,6 +47,7 @@
#include "gc/collector/semi_space.h"
#include "gc/collector/sticky_mark_sweep.h"
#include "gc/reference_processor.h"
+#include "gc/scoped_gc_critical_section.h"
#include "gc/space/bump_pointer_space.h"
#include "gc/space/dlmalloc_space-inl.h"
#include "gc/space/image_space.h"
@@ -759,83 +761,6 @@ void Heap::DisableMovingGc() {
}
}
-std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
- if (!IsValidContinuousSpaceObjectAddress(klass)) {
- return StringPrintf("<non heap address klass %p>", klass);
- }
- mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
- if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
- std::string result("[");
- result += SafeGetClassDescriptor(component_type);
- return result;
- } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
- return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
- } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
- return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
- } else {
- mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
- if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
- return StringPrintf("<non heap address dex_cache %p>", dex_cache);
- }
- const DexFile* dex_file = dex_cache->GetDexFile();
- uint16_t class_def_idx = klass->GetDexClassDefIndex();
- if (class_def_idx == DexFile::kDexNoIndex16) {
- return "<class def not found>";
- }
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
- return dex_file->GetTypeDescriptor(type_id);
- }
-}
-
-std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
- if (obj == nullptr) {
- return "null";
- }
- mirror::Class* klass = obj->GetClass<kVerifyNone>();
- if (klass == nullptr) {
- return "(class=null)";
- }
- std::string result(SafeGetClassDescriptor(klass));
- if (obj->IsClass()) {
- result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
- }
- return result;
-}
-
-void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
- if (obj == nullptr) {
- stream << "(obj=null)";
- return;
- }
- if (IsAligned<kObjectAlignment>(obj)) {
- space::Space* space = nullptr;
- // Don't use find space since it only finds spaces which actually contain objects instead of
- // spaces which may contain objects (e.g. cleared bump pointer spaces).
- for (const auto& cur_space : continuous_spaces_) {
- if (cur_space->HasAddress(obj)) {
- space = cur_space;
- break;
- }
- }
- // Unprotect all the spaces.
- for (const auto& con_space : continuous_spaces_) {
- mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
- }
- stream << "Object " << obj;
- if (space != nullptr) {
- stream << " in space " << *space;
- }
- mirror::Class* klass = obj->GetClass<kVerifyNone>();
- stream << "\nclass=" << klass;
- if (klass != nullptr) {
- stream << " type= " << SafePrettyTypeOf(obj);
- }
- // Re-protect the address we faulted on.
- mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
- }
-}
-
bool Heap::IsCompilingBoot() const {
if (!Runtime::Current()->IsAotCompiler()) {
return false;
@@ -1011,10 +936,14 @@ void Heap::VisitObjects(ObjectCallback callback, void* arg) {
}
DecrementDisableMovingGC(self);
} else {
+ // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
+ // catch bugs.
+ self->PoisonObjectPointers();
// GCs can move objects, so don't allow this.
ScopedAssertNoThreadSuspension ants("Visiting objects");
DCHECK(region_space_ == nullptr);
VisitObjectsInternal(callback, arg);
+ self->PoisonObjectPointers();
}
}
@@ -1287,6 +1216,16 @@ void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
}
}
+ALWAYS_INLINE
+static inline AllocationListener* GetAndOverwriteAllocationListener(
+ Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
+ AllocationListener* old;
+ do {
+ old = storage->LoadSequentiallyConsistent();
+ } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value));
+ return old;
+}
+
Heap::~Heap() {
VLOG(heap) << "Starting ~Heap()";
STLDeleteElements(&garbage_collectors_);
@@ -1307,36 +1246,49 @@ Heap::~Heap() {
<< " total=" << seen_backtrace_count_.LoadRelaxed() +
unique_backtrace_count_.LoadRelaxed();
}
+ // Delete any still registered allocation listener.
+ AllocationListener* l = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
+ delete l;
+
VLOG(heap) << "Finished ~Heap()";
}
-space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
- bool fail_ok) const {
+
+space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
for (const auto& space : continuous_spaces_) {
- if (space->Contains(obj)) {
+ if (space->Contains(addr)) {
return space;
}
}
+ return nullptr;
+}
+
+space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
+ bool fail_ok) const {
+ space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
+ if (space != nullptr) {
+ return space;
+ }
if (!fail_ok) {
- LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+ LOG(FATAL) << "object " << obj << " not inside any spaces!";
}
return nullptr;
}
-space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
+space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
bool fail_ok) const {
for (const auto& space : discontinuous_spaces_) {
- if (space->Contains(obj)) {
+ if (space->Contains(obj.Ptr())) {
return space;
}
}
if (!fail_ok) {
- LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+ LOG(FATAL) << "object " << obj << " not inside any spaces!";
}
return nullptr;
}
-space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
+space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
space::Space* result = FindContinuousSpaceFromObject(obj, true);
if (result != nullptr) {
return result;
@@ -1344,6 +1296,21 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
+space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
+ for (const auto& space : continuous_spaces_) {
+ if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
+ return space;
+ }
+ }
+ for (const auto& space : discontinuous_spaces_) {
+ if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
+ return space;
+ }
+ }
+ return nullptr;
+}
+
+
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
// If we're in a stack overflow, do not create a new exception. It would require running the
// constructor, which will of course still be in a stack overflow.
@@ -1408,6 +1375,8 @@ void Heap::Trim(Thread* self) {
// Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
// about pauses.
ScopedTrace trace("Deflating monitors");
+ // Avoid race conditions on the lock word for CC.
+ ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
ScopedSuspendAll ssa(__FUNCTION__);
uint64_t start_time = NanoTime();
size_t count = runtime->GetMonitorList()->DeflateMonitors();
@@ -1508,62 +1477,49 @@ void Heap::TrimSpaces(Thread* self) {
<< static_cast<int>(100 * managed_utilization) << "%.";
}
-bool Heap::IsValidObjectAddress(ObjPtr<mirror::Object> obj) const {
- // Note: we deliberately don't take the lock here, and mustn't test anything that would require
- // taking the lock.
- if (obj == nullptr) {
+bool Heap::IsValidObjectAddress(const void* addr) const {
+ if (addr == nullptr) {
return true;
}
- return IsAligned<kObjectAlignment>(obj.Ptr()) &&
- FindSpaceFromObject(obj.Ptr(), true) != nullptr;
+ return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
}
-bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
- return FindContinuousSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
+ return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
}
-bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
- if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
+ bool search_allocation_stack,
+ bool search_live_stack,
+ bool sorted) {
+ if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
return false;
}
- for (const auto& space : continuous_spaces_) {
- if (space->HasAddress(obj)) {
- return true;
- }
- }
- return false;
-}
-
-bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
- bool search_live_stack, bool sorted) {
- if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
- return false;
- }
- if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
+ if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
if (obj == klass) {
// This case happens for java.lang.Class.
return true;
}
return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
- } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
+ } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
// If we are in the allocated region of the temp space, then we are probably live (e.g. during
// a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
- return temp_space_->Contains(obj);
+ return temp_space_->Contains(obj.Ptr());
}
- if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
+ if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
return true;
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
space::DiscontinuousSpace* d_space = nullptr;
if (c_space != nullptr) {
- if (c_space->GetLiveBitmap()->Test(obj)) {
+ if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
if (d_space != nullptr) {
- if (d_space->GetLiveBitmap()->Test(obj)) {
+ if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
}
@@ -1575,20 +1531,20 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
}
if (search_allocation_stack) {
if (sorted) {
- if (allocation_stack_->ContainsSorted(obj)) {
+ if (allocation_stack_->ContainsSorted(obj.Ptr())) {
return true;
}
- } else if (allocation_stack_->Contains(obj)) {
+ } else if (allocation_stack_->Contains(obj.Ptr())) {
return true;
}
}
if (search_live_stack) {
if (sorted) {
- if (live_stack_->ContainsSorted(obj)) {
+ if (live_stack_->ContainsSorted(obj.Ptr())) {
return true;
}
- } else if (live_stack_->Contains(obj)) {
+ } else if (live_stack_->Contains(obj.Ptr())) {
return true;
}
}
@@ -1596,12 +1552,12 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
// We need to check the bitmaps again since there is a race where we mark something as live and
// then clear the stack containing it.
if (c_space != nullptr) {
- if (c_space->GetLiveBitmap()->Test(obj)) {
+ if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
+ if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
}
@@ -1631,7 +1587,7 @@ void Heap::DumpSpaces(std::ostream& stream) const {
}
}
-void Heap::VerifyObjectBody(mirror::Object* obj) {
+void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
if (verify_object_mode_ == kVerifyObjectModeDisabled) {
return;
}
@@ -1640,7 +1596,7 @@ void Heap::VerifyObjectBody(mirror::Object* obj) {
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
}
- CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
+ CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
CHECK(c != nullptr) << "Null class in object " << obj;
CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
@@ -1719,14 +1675,13 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- mirror::Class** klass) {
+ ObjPtr<mirror::Class>* klass) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
DCHECK(klass != nullptr);
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
- klass = nullptr; // Invalidate for safety.
+ HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1929,7 +1884,7 @@ uint64_t Heap::GetBytesAllocatedEver() const {
class InstanceCounter {
public:
- InstanceCounter(const std::vector<mirror::Class*>& classes,
+ InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
bool use_is_assignable_from,
uint64_t* counts)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1941,7 +1896,7 @@ class InstanceCounter {
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
- mirror::Class* klass = instance_counter->classes_[i];
+ ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
if (instance_counter->use_is_assignable_from_) {
if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
++instance_counter->counts_[i];
@@ -1953,13 +1908,14 @@ class InstanceCounter {
}
private:
- const std::vector<mirror::Class*>& classes_;
+ const std::vector<Handle<mirror::Class>>& classes_;
bool use_is_assignable_from_;
uint64_t* const counts_;
DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
};
-void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
+void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
+ bool use_is_assignable_from,
uint64_t* counts) {
InstanceCounter counter(classes, use_is_assignable_from, counts);
VisitObjects(InstanceCounter::Callback, &counter);
@@ -1967,44 +1923,55 @@ void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_i
class InstanceCollector {
public:
- InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+ InstanceCollector(VariableSizedHandleScope& scope,
+ Handle<mirror::Class> c,
+ int32_t max_count,
+ std::vector<Handle<mirror::Object>>& instances)
REQUIRES_SHARED(Locks::mutator_lock_)
- : class_(c), max_count_(max_count), instances_(instances) {
- }
+ : scope_(scope),
+ class_(c),
+ max_count_(max_count),
+ instances_(instances) {}
+
static void Callback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(arg != nullptr);
InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
- if (obj->GetClass() == instance_collector->class_) {
+ if (obj->GetClass() == instance_collector->class_.Get()) {
if (instance_collector->max_count_ == 0 ||
instance_collector->instances_.size() < instance_collector->max_count_) {
- instance_collector->instances_.push_back(obj);
+ instance_collector->instances_.push_back(instance_collector->scope_.NewHandle(obj));
}
}
}
private:
- const mirror::Class* const class_;
+ VariableSizedHandleScope& scope_;
+ Handle<mirror::Class> const class_;
const uint32_t max_count_;
- std::vector<mirror::Object*>& instances_;
+ std::vector<Handle<mirror::Object>>& instances_;
DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
};
-void Heap::GetInstances(mirror::Class* c,
+void Heap::GetInstances(VariableSizedHandleScope& scope,
+ Handle<mirror::Class> c,
int32_t max_count,
- std::vector<mirror::Object*>& instances) {
- InstanceCollector collector(c, max_count, instances);
+ std::vector<Handle<mirror::Object>>& instances) {
+ InstanceCollector collector(scope, c, max_count, instances);
VisitObjects(&InstanceCollector::Callback, &collector);
}
class ReferringObjectsFinder {
public:
- ReferringObjectsFinder(mirror::Object* object,
+ ReferringObjectsFinder(VariableSizedHandleScope& scope,
+ Handle<mirror::Object> object,
int32_t max_count,
- std::vector<mirror::Object*>& referring_objects)
+ std::vector<Handle<mirror::Object>>& referring_objects)
REQUIRES_SHARED(Locks::mutator_lock_)
- : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
- }
+ : scope_(scope),
+ object_(object),
+ max_count_(max_count),
+ referring_objects_(referring_objects) {}
static void Callback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2014,16 +1981,18 @@ class ReferringObjectsFinder {
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
o->VisitReferences(*this, VoidFunctor());
}
// For Object::VisitReferences.
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
- if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
- referring_objects_.push_back(obj);
+ if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
+ referring_objects_.push_back(scope_.NewHandle(obj));
}
}
@@ -2032,15 +2001,18 @@ class ReferringObjectsFinder {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
private:
- const mirror::Object* const object_;
+ VariableSizedHandleScope& scope_;
+ Handle<mirror::Object> const object_;
const uint32_t max_count_;
- std::vector<mirror::Object*>& referring_objects_;
+ std::vector<Handle<mirror::Object>>& referring_objects_;
DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
};
-void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
- std::vector<mirror::Object*>& referring_objects) {
- ReferringObjectsFinder finder(o, max_count, referring_objects);
+void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
+ Handle<mirror::Object> o,
+ int32_t max_count,
+ std::vector<Handle<mirror::Object>>& referring_objects) {
+ ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
VisitObjects(&ReferringObjectsFinder::Callback, &finder);
}
@@ -3098,41 +3070,42 @@ class VerifyObjectVisitor {
const bool verify_referent_;
};
-void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
// Slow path, the allocation stack push back must have already failed.
- DCHECK(!allocation_stack_->AtomicPushBack(*obj));
+ DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
do {
// TODO: Add handle VerifyObject.
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
// Push our object into the reserve region of the allocaiton stack. This is only required due
// to heap verification requiring that roots are live (either in the live bitmap or in the
// allocation stack).
- CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+ CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- } while (!allocation_stack_->AtomicPushBack(*obj));
+ } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
}
-void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
+ ObjPtr<mirror::Object>* obj) {
// Slow path, the allocation stack push back must have already failed.
- DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
+ DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
StackReference<mirror::Object>* start_address;
StackReference<mirror::Object>* end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
&end_address)) {
// TODO: Add handle VerifyObject.
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
// Push our object into the reserve region of the allocaiton stack. This is only required due
// to heap verification requiring that roots are live (either in the live bitmap or in the
// allocation stack).
- CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+ CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
// Push into the reserve allocation stack.
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
- CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
+ CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
}
// Must do this with mutators suspended since we are directly accessing the allocation stacks.
@@ -3722,19 +3695,21 @@ void Heap::ClearGrowthLimit() {
}
}
-void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
+void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
ScopedObjectAccess soa(self);
ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
jvalue args[1];
args[0].l = arg.get();
InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
// Restore object in case it gets moved.
- *object = soa.Decode<mirror::Object>(arg.get()).Ptr();
+ *object = soa.Decode<mirror::Object>(arg.get());
}
-void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
+ bool force_full,
+ ObjPtr<mirror::Object>* obj) {
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
RequestConcurrentGC(self, force_full);
}
@@ -4011,7 +3986,7 @@ void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
}
-void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
+void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
(c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
CHECK_GE(byte_count, sizeof(mirror::Object));
@@ -4137,7 +4112,7 @@ static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
return state.GetFrameCount();
}
-void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
+void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
!runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
@@ -4176,9 +4151,9 @@ void Heap::DisableGCForShutdown() {
gc_disabled_for_shutdown_ = true;
}
-bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
+bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
for (gc::space::ImageSpace* space : boot_image_spaces_) {
- if (space->HasAddress(obj)) {
+ if (space->HasAddress(obj.Ptr())) {
return true;
}
}
@@ -4223,5 +4198,22 @@ void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
}
}
+void Heap::SetAllocationListener(AllocationListener* l) {
+ AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
+
+ if (old == nullptr) {
+ Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ }
+}
+
+void Heap::RemoveAllocationListener() {
+ AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
+
+ if (old != nullptr) {
+ Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+ }
+}
+
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 678edff9c1..6d37140e81 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -34,6 +34,7 @@
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "globals.h"
+#include "handle.h"
#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -49,6 +50,7 @@ class StackVisitor;
class Thread;
class ThreadPool;
class TimingLogger;
+class VariableSizedHandleScope;
namespace mirror {
class Class;
@@ -57,6 +59,7 @@ namespace mirror {
namespace gc {
+class AllocationListener;
class AllocRecordObjectMap;
class ReferenceProcessor;
class TaskProcessor;
@@ -193,36 +196,48 @@ class Heap {
// Allocates and initializes storage for an object instance.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
!Roles::uninterruptible_) {
- return AllocObjectWithAllocator<kInstrumented, true>(
- self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
+ return AllocObjectWithAllocator<kInstrumented, true>(self,
+ klass,
+ num_bytes,
+ GetCurrentAllocator(),
+ pre_fence_visitor);
}
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocNonMovableObject(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
!Roles::uninterruptible_) {
- return AllocObjectWithAllocator<kInstrumented, true>(
- self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
+ return AllocObjectWithAllocator<kInstrumented, true>(self,
+ klass,
+ num_bytes,
+ GetCurrentNonMovingAllocator(),
+ pre_fence_visitor);
}
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
!Roles::uninterruptible_);
AllocatorType GetCurrentAllocator() const {
@@ -240,7 +255,7 @@ class Heap {
void VisitObjectsPaused(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
+ void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
@@ -262,7 +277,7 @@ class Heap {
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
// TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
// proper lock ordering for it.
- void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
+ void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
// Check sanity of all live references.
void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
@@ -275,16 +290,16 @@ class Heap {
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- bool IsValidObjectAddress(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
- bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
+ bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
- bool IsLiveObjectLocked(mirror::Object* obj,
+ bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
bool search_allocation_stack = true,
bool search_live_stack = true,
bool sorted = false)
@@ -320,19 +335,25 @@ class Heap {
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
// The boolean decides whether to use IsAssignableFrom or == when comparing classes.
- void CountInstances(const std::vector<mirror::Class*>& classes,
+ void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
bool use_is_assignable_from,
uint64_t* counts)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+
// Implements JDWP RT_Instances.
- void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+ void GetInstances(VariableSizedHandleScope& scope,
+ Handle<mirror::Class> c,
+ int32_t max_count,
+ std::vector<Handle<mirror::Object>>& instances)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+
// Implements JDWP OR_ReferringObjects.
- void GetReferringObjects(mirror::Object* o,
+ void GetReferringObjects(VariableSizedHandleScope& scope,
+ Handle<mirror::Object> o,
int32_t max_count,
- std::vector<mirror::Object*>& referring_objects)
+ std::vector<Handle<mirror::Object>>& referring_objects)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -444,16 +465,14 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_);
// Write barrier for array operations that update many field positions
- ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst,
- int start_offset ATTRIBUTE_UNUSED,
+ ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
+ int start_offset,
// TODO: element_count or byte_count?
- size_t length ATTRIBUTE_UNUSED) {
- card_table_->MarkCard(dst);
- }
+ size_t length)
+ REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
- card_table_->MarkCard(obj);
- }
+ ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
+ REQUIRES_SHARED(Locks::mutator_lock_);
accounting::CardTable* GetCardTable() const {
return card_table_.get();
@@ -463,7 +482,7 @@ class Heap {
return rb_table_.get();
}
- void AddFinalizerReference(Thread* self, mirror::Object** object);
+ void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
// Returns the number of bytes currently allocated.
size_t GetBytesAllocated() const {
@@ -526,12 +545,20 @@ class Heap {
// get the space that corresponds to an object's address. Current implementation searches all
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
- space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
+ space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
REQUIRES_SHARED(Locks::mutator_lock_);
- space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
+
+ space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
bool fail_ok) const
REQUIRES_SHARED(Locks::mutator_lock_);
- space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
+
+ space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -597,7 +624,7 @@ class Heap {
return boot_image_spaces_;
}
- bool ObjectIsInBootImageSpace(mirror::Object* obj) const
+ bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsInBootImageOatFile(const void* p) const
@@ -649,12 +676,6 @@ class Heap {
void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
- // Dump object should only be used by the signal handler.
- void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
- // Safe version of pretty type of which check to make sure objects are heap addresses.
- std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
- std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
-
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os)
REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -784,6 +805,12 @@ class Heap {
HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
+ // Install an allocation listener.
+ void SetAllocationListener(AllocationListener* l);
+ // Remove an allocation listener. Note: the listener must not be deleted, as for performance
+ // reasons, we assume it stays valid when we read it (so that we don't require a lock).
+ void RemoveAllocationListener();
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -830,11 +857,11 @@ class Heap {
collector_type == kCollectorTypeMC ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
- bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
+ bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
- mirror::Object** obj)
+ ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
@@ -845,7 +872,7 @@ class Heap {
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocLargeObject(Thread* self,
- mirror::Class** klass,
+ ObjPtr<mirror::Class>* klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -860,14 +887,14 @@ class Heap {
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- mirror::Class** klass)
+ ObjPtr<mirror::Class>* klass)
REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate into a specific space.
mirror::Object* AllocateInto(Thread* self,
space::AllocSpace* space,
- mirror::Class* c,
+ ObjPtr<mirror::Class> c,
size_t bytes)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -892,10 +919,6 @@ class Heap {
template <bool kGrow>
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
- // Returns true if the address passed in is within the address range of a continuous space.
- bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
void RunFinalization(JNIEnv* env, uint64_t timeout);
@@ -907,7 +930,7 @@ class Heap {
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
REQUIRES(!*pending_task_lock_);
- void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
+ void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_);
bool IsGCRequestPending() const;
@@ -979,13 +1002,13 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_);
// Push an object onto the allocation stack.
- void PushOnAllocationStack(Thread* self, mirror::Object** obj)
+ void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
- void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
+ void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
- void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
+ void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
@@ -1016,7 +1039,7 @@ class Heap {
void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
// GC stress mode attempts to do one GC per unique backtrace.
- void CheckGcStressMode(Thread* self, mirror::Object** obj)
+ void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
@@ -1352,6 +1375,9 @@ class Heap {
// Boot image spaces.
std::vector<space::ImageSpace*> boot_image_spaces_;
+ // An installed allocation listener.
+ Atomic<AllocationListener*> alloc_listener_;
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::MarkCompact;
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index cceb0072a9..b212d095cb 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -28,24 +28,30 @@
namespace art {
template<size_t kNumReferences>
-inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
- : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) {
- DCHECK_EQ(self, Thread::Current());
+inline FixedSizeHandleScope<kNumReferences>::FixedSizeHandleScope(BaseHandleScope* link,
+ mirror::Object* fill_value)
+ : HandleScope(link, kNumReferences) {
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
}
- static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference");
- // TODO: Figure out how to use a compile assert.
- CHECK_EQ(&storage_[0], GetReferences());
+ static_assert(kNumReferences >= 1, "FixedSizeHandleScope must contain at least 1 reference");
+ DCHECK_EQ(&storage_[0], GetReferences()); // TODO: Figure out how to use a compile assert.
for (size_t i = 0; i < kNumReferences; ++i) {
SetReference(i, fill_value);
}
+}
+
+template<size_t kNumReferences>
+inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
+ : FixedSizeHandleScope<kNumReferences>(self->GetTopHandleScope(), fill_value),
+ self_(self) {
+ DCHECK_EQ(self, Thread::Current());
self_->PushHandleScope(this);
}
template<size_t kNumReferences>
inline StackHandleScope<kNumReferences>::~StackHandleScope() {
- HandleScope* top_handle_scope = self_->PopHandleScope();
+ BaseHandleScope* top_handle_scope = self_->PopHandleScope();
DCHECK_EQ(top_handle_scope, this);
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(self_);
@@ -66,7 +72,7 @@ inline size_t HandleScope::SizeOf(PointerSize pointer_size, uint32_t num_referen
}
inline mirror::Object* HandleScope::GetReference(size_t i) const {
- DCHECK_LT(i, number_of_references_);
+ DCHECK_LT(i, NumberOfReferences());
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
}
@@ -74,12 +80,12 @@ inline mirror::Object* HandleScope::GetReference(size_t i) const {
}
inline Handle<mirror::Object> HandleScope::GetHandle(size_t i) {
- DCHECK_LT(i, number_of_references_);
+ DCHECK_LT(i, NumberOfReferences());
return Handle<mirror::Object>(&GetReferences()[i]);
}
inline MutableHandle<mirror::Object> HandleScope::GetMutableHandle(size_t i) {
- DCHECK_LT(i, number_of_references_);
+ DCHECK_LT(i, NumberOfReferences());
return MutableHandle<mirror::Object>(&GetReferences()[i]);
}
@@ -87,7 +93,7 @@ inline void HandleScope::SetReference(size_t i, mirror::Object* object) {
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
}
- DCHECK_LT(i, number_of_references_);
+ DCHECK_LT(i, NumberOfReferences());
GetReferences()[i].Assign(object);
}
@@ -95,13 +101,13 @@ inline bool HandleScope::Contains(StackReference<mirror::Object>* handle_scope_e
// A HandleScope should always contain something. One created by the
// jni_compiler should have a jobject/jclass as a native method is
// passed in a this pointer or a class
- DCHECK_GT(number_of_references_, 0U);
+ DCHECK_GT(NumberOfReferences(), 0U);
return &GetReferences()[0] <= handle_scope_entry &&
handle_scope_entry <= &GetReferences()[number_of_references_ - 1];
}
template<size_t kNumReferences> template<class T>
-inline MutableHandle<T> StackHandleScope<kNumReferences>::NewHandle(T* object) {
+inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) {
SetReference(pos_, object);
MutableHandle<T> h(GetHandle<T>(pos_));
pos_++;
@@ -109,24 +115,24 @@ inline MutableHandle<T> StackHandleScope<kNumReferences>::NewHandle(T* object) {
}
template<size_t kNumReferences> template<class MirrorType, bool kPoison>
-inline MutableHandle<MirrorType> StackHandleScope<kNumReferences>::NewHandle(
+inline MutableHandle<MirrorType> FixedSizeHandleScope<kNumReferences>::NewHandle(
ObjPtr<MirrorType, kPoison> object) {
return NewHandle(object.Ptr());
}
template<size_t kNumReferences> template<class T>
-inline HandleWrapper<T> StackHandleScope<kNumReferences>::NewHandleWrapper(T** object) {
+inline HandleWrapper<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper(T** object) {
return HandleWrapper<T>(object, NewHandle(*object));
}
template<size_t kNumReferences> template<class T>
-inline HandleWrapperObjPtr<T> StackHandleScope<kNumReferences>::NewHandleWrapper(
+inline HandleWrapperObjPtr<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper(
ObjPtr<T>* object) {
return HandleWrapperObjPtr<T>(object, NewHandle(*object));
}
template<size_t kNumReferences>
-inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) {
+inline void FixedSizeHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) {
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
}
@@ -135,12 +141,111 @@ inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Obj
GetReferences()[i].Assign(object);
}
+// Number of references contained within this handle scope.
+inline uint32_t BaseHandleScope::NumberOfReferences() const {
+ return LIKELY(!IsVariableSized())
+ ? AsHandleScope()->NumberOfReferences()
+ : AsVariableSized()->NumberOfReferences();
+}
+
+inline bool BaseHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) const {
+ return LIKELY(!IsVariableSized())
+ ? AsHandleScope()->Contains(handle_scope_entry)
+ : AsVariableSized()->Contains(handle_scope_entry);
+}
+
+template <typename Visitor>
+inline void BaseHandleScope::VisitRoots(Visitor& visitor) {
+ if (LIKELY(!IsVariableSized())) {
+ AsHandleScope()->VisitRoots(visitor);
+ } else {
+ AsVariableSized()->VisitRoots(visitor);
+ }
+}
+
+inline VariableSizedHandleScope* BaseHandleScope::AsVariableSized() {
+ DCHECK(IsVariableSized());
+ return down_cast<VariableSizedHandleScope*>(this);
+}
+
+inline HandleScope* BaseHandleScope::AsHandleScope() {
+ DCHECK(!IsVariableSized());
+ return down_cast<HandleScope*>(this);
+}
+
+inline const VariableSizedHandleScope* BaseHandleScope::AsVariableSized() const {
+ DCHECK(IsVariableSized());
+ return down_cast<const VariableSizedHandleScope*>(this);
+}
+
+inline const HandleScope* BaseHandleScope::AsHandleScope() const {
+ DCHECK(!IsVariableSized());
+ return down_cast<const HandleScope*>(this);
+}
+
+template<class T>
+MutableHandle<T> VariableSizedHandleScope::NewHandle(T* object) {
+ if (current_scope_->RemainingSlots() == 0) {
+ current_scope_ = new LocalScopeType(current_scope_);
+ }
+ return current_scope_->NewHandle(object);
+}
+
template<class MirrorType, bool kPoison>
-inline MutableHandle<MirrorType> StackHandleScopeCollection::NewHandle(
+inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle(
ObjPtr<MirrorType, kPoison> ptr) {
return NewHandle(ptr.Ptr());
}
+inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
+ : BaseHandleScope(self->GetTopHandleScope()),
+ self_(self) {
+ current_scope_ = new LocalScopeType(/*link*/ nullptr);
+ self_->PushHandleScope(this);
+}
+
+inline VariableSizedHandleScope::~VariableSizedHandleScope() {
+ BaseHandleScope* top_handle_scope = self_->PopHandleScope();
+ DCHECK_EQ(top_handle_scope, this);
+ while (current_scope_ != nullptr) {
+ LocalScopeType* next = reinterpret_cast<LocalScopeType*>(current_scope_->GetLink());
+ delete current_scope_;
+ current_scope_ = next;
+ }
+}
+
+inline uint32_t VariableSizedHandleScope::NumberOfReferences() const {
+ uint32_t sum = 0;
+ const LocalScopeType* cur = current_scope_;
+ while (cur != nullptr) {
+ sum += cur->NumberOfReferences();
+ cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink());
+ }
+ return sum;
+}
+
+inline bool VariableSizedHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry)
+ const {
+ const LocalScopeType* cur = current_scope_;
+ while (cur != nullptr) {
+ if (cur->Contains(handle_scope_entry)) {
+ return true;
+ }
+ cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink());
+ }
+ return false;
+}
+
+template <typename Visitor>
+inline void VariableSizedHandleScope::VisitRoots(Visitor& visitor) {
+ LocalScopeType* cur = current_scope_;
+ while (cur != nullptr) {
+ cur->VisitRoots(visitor);
+ cur = reinterpret_cast<LocalScopeType*>(cur->GetLink());
+ }
+}
+
+
} // namespace art
#endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index fc729a547b..8a0aba6121 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -29,26 +29,69 @@
namespace art {
+class HandleScope;
template<class MirrorType, bool kPoison> class ObjPtr;
+class Thread;
+class VariableSizedHandleScope;
namespace mirror {
class Object;
}
-class Thread;
+// Basic handle scope, tracked by a list. May be variable sized.
+class PACKED(4) BaseHandleScope {
+ public:
+ bool IsVariableSized() const {
+ return number_of_references_ == kNumReferencesVariableSized;
+ }
+
+ // Number of references contained within this handle scope.
+ ALWAYS_INLINE uint32_t NumberOfReferences() const;
+
+ ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
+
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Link to previous BaseHandleScope or null.
+ BaseHandleScope* GetLink() const {
+ return link_;
+ }
+
+ ALWAYS_INLINE VariableSizedHandleScope* AsVariableSized();
+ ALWAYS_INLINE HandleScope* AsHandleScope();
+ ALWAYS_INLINE const VariableSizedHandleScope* AsVariableSized() const;
+ ALWAYS_INLINE const HandleScope* AsHandleScope() const;
+
+ protected:
+ BaseHandleScope(BaseHandleScope* link, uint32_t num_references)
+ : link_(link),
+ number_of_references_(num_references) {}
+
+ // Variable sized constructor.
+ BaseHandleScope(BaseHandleScope* link)
+ : link_(link),
+ number_of_references_(kNumReferencesVariableSized) {}
+
+ static constexpr int32_t kNumReferencesVariableSized = -1;
+
+ // Link-list of handle scopes. The root is held by a Thread.
+ BaseHandleScope* const link_;
+
+ // Number of handlerized references. -1 for variable sized handle scopes.
+ const int32_t number_of_references_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BaseHandleScope);
+};
// HandleScopes are scoped objects containing a number of Handles. They are used to allocate
// handles, for these handles (and the objects contained within them) to be visible/roots for the
// GC. It is most common to stack allocate HandleScopes using StackHandleScope.
-class PACKED(4) HandleScope {
+class PACKED(4) HandleScope : public BaseHandleScope {
public:
~HandleScope() {}
- // Number of references contained within this handle scope.
- uint32_t NumberOfReferences() const {
- return number_of_references_;
- }
-
// We have versions with and without explicit pointer size of the following. The first two are
// used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one
// takes the pointer size explicitly so that at compile time we can cross-compile correctly.
@@ -59,11 +102,6 @@ class PACKED(4) HandleScope {
// Returns the size of a HandleScope containing num_references handles.
static size_t SizeOf(PointerSize pointer_size, uint32_t num_references);
- // Link to previous HandleScope or null.
- HandleScope* GetLink() const {
- return link_;
- }
-
ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -93,11 +131,26 @@ class PACKED(4) HandleScope {
}
// Placement new creation.
- static HandleScope* Create(void* storage, HandleScope* link, uint32_t num_references)
+ static HandleScope* Create(void* storage, BaseHandleScope* link, uint32_t num_references)
WARN_UNUSED {
return new (storage) HandleScope(link, num_references);
}
+ // Number of references contained within this handle scope.
+ ALWAYS_INLINE uint32_t NumberOfReferences() const {
+ DCHECK_GE(number_of_references_, 0);
+ return static_cast<uint32_t>(number_of_references_);
+ }
+
+ template <typename Visitor>
+ void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) {
+ // GetReference returns a pointer to the stack reference within the handle scope. If this
+ // needs to be updated, it will be done by the root visitor.
+ visitor.VisitRootIfNonNull(GetHandle(i).GetReference());
+ }
+ }
+
protected:
// Return backing storage used for references.
ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
@@ -105,20 +158,11 @@ class PACKED(4) HandleScope {
return reinterpret_cast<StackReference<mirror::Object>*>(address);
}
- explicit HandleScope(size_t number_of_references) :
- link_(nullptr), number_of_references_(number_of_references) {
- }
+ explicit HandleScope(size_t number_of_references) : HandleScope(nullptr, number_of_references) {}
// Semi-hidden constructor. Construction expected by generated code and StackHandleScope.
- HandleScope(HandleScope* link, uint32_t num_references) :
- link_(link), number_of_references_(num_references) {
- }
-
- // Link-list of handle scopes. The root is held by a Thread.
- HandleScope* const link_;
-
- // Number of handlerized references.
- const uint32_t number_of_references_;
+ HandleScope(BaseHandleScope* link, uint32_t num_references)
+ : BaseHandleScope(link, num_references) {}
// Storage for references.
// StackReference<mirror::Object> references_[number_of_references_]
@@ -165,14 +209,10 @@ class HandleWrapperObjPtr : public MutableHandle<T> {
ObjPtr<T>* const obj_;
};
-
-// Scoped handle storage of a fixed size that is usually stack allocated.
+// Fixed size handle scope that is not necessarily linked in the thread.
template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public HandleScope {
+class PACKED(4) FixedSizeHandleScope : public HandleScope {
public:
- explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
- ALWAYS_INLINE ~StackHandleScope();
-
template<class T>
ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -191,11 +231,15 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope {
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
REQUIRES_SHARED(Locks::mutator_lock_);
- Thread* Self() const {
- return self_;
+ size_t RemainingSlots() const {
+ return kNumReferences - pos_;
}
private:
+ explicit ALWAYS_INLINE FixedSizeHandleScope(BaseHandleScope* link,
+ mirror::Object* fill_value = nullptr);
+ ALWAYS_INLINE ~FixedSizeHandleScope() {}
+
template<class T>
ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, kNumReferences);
@@ -205,66 +249,65 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope {
// Reference storage needs to be first as expected by the HandleScope layout.
StackReference<mirror::Object> storage_[kNumReferences];
- // The thread that the stack handle scope is a linked list upon. The stack handle scope will
- // push and pop itself from this thread.
- Thread* const self_;
-
// Position new handles will be created.
- size_t pos_;
+ size_t pos_ = 0;
template<size_t kNumRefs> friend class StackHandleScope;
+ friend class VariableSizedHandleScope;
};
-// Utility class to manage a collection (stack) of StackHandleScope. All the managed
-// scope handle have the same fixed sized.
-// Calls to NewHandle will create a new handle inside the top StackHandleScope.
-// When the handle scope becomes full a new one is created and push on top of the
-// previous.
-//
-// NB:
-// - it is not safe to use the *same* StackHandleScopeCollection intermix with
-// other StackHandleScopes.
-// - this is a an easy way around implementing a full ZoneHandleScope to manage an
-// arbitrary number of handles.
-class StackHandleScopeCollection {
+// Scoped handle storage of a fixed size that is stack allocated.
+template<size_t kNumReferences>
+class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
public:
- explicit StackHandleScopeCollection(Thread* const self) :
- self_(self),
- current_scope_num_refs_(0) {
- }
+ explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
+ ALWAYS_INLINE ~StackHandleScope();
- ~StackHandleScopeCollection() {
- while (!scopes_.empty()) {
- delete scopes_.top();
- scopes_.pop();
- }
+ Thread* Self() const {
+ return self_;
}
+ private:
+ // The thread that the stack handle scope is a linked list upon. The stack handle scope will
+ // push and pop itself from this thread.
+ Thread* const self_;
+};
+
+// Utility class to manage a variable sized handle scope by having a list of fixed size handle
+// scopes.
+// Calls to NewHandle will create a new handle inside the current FixedSizeHandleScope.
+// When the current handle scope becomes full a new one is created and put at the front of the
+// list.
+class VariableSizedHandleScope : public BaseHandleScope {
+ public:
+ explicit VariableSizedHandleScope(Thread* const self);
+ ~VariableSizedHandleScope();
+
template<class T>
- MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) {
- StackHandleScope<kNumReferencesPerScope>* scope =
- new StackHandleScope<kNumReferencesPerScope>(self_);
- scopes_.push(scope);
- current_scope_num_refs_ = 0;
- }
- current_scope_num_refs_++;
- return scopes_.top()->NewHandle(object);
- }
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<class MirrorType, bool kPoison>
MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType, kPoison> ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Number of references contained within this handle scope.
+ ALWAYS_INLINE uint32_t NumberOfReferences() const;
+
+ ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
+
+ template <typename Visitor>
+ void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
static constexpr size_t kNumReferencesPerScope = 4;
Thread* const self_;
- std::stack<StackHandleScope<kNumReferencesPerScope>*> scopes_;
- size_t current_scope_num_refs_;
+ // Linked list of fixed size handle scopes.
+ using LocalScopeType = FixedSizeHandleScope<kNumReferencesPerScope>;
+ LocalScopeType* current_scope_;
- DISALLOW_COPY_AND_ASSIGN(StackHandleScopeCollection);
+ DISALLOW_COPY_AND_ASSIGN(VariableSizedHandleScope);
};
} // namespace art
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index c269a37f8d..92063c4ba8 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -15,6 +15,7 @@
*/
#include "base/enums.h"
+#include "common_runtime_test.h"
#include "gtest/gtest.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -22,51 +23,85 @@
namespace art {
-// Handle scope with a fixed size which is allocated on the stack.
-template<size_t kNumReferences>
-class NoThreadStackHandleScope : public HandleScope {
- public:
- explicit NoThreadStackHandleScope(HandleScope* link) : HandleScope(link, kNumReferences) {
- }
- ~NoThreadStackHandleScope() {
- }
-
- private:
- // references_storage_ needs to be first so that it matches the address of references_
- StackReference<mirror::Object> references_storage_[kNumReferences];
-};
+class HandleScopeTest : public CommonRuntimeTest {};
// Test the offsets computed for members of HandleScope. Because of cross-compiling
// it is impossible the use OFFSETOF_MEMBER, so we do some reasonable computations ourselves. This
// test checks whether we do the right thing.
-TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
+TEST_F(HandleScopeTest, Offsets) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
// As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER
// here. So do the inverse: set some data, and access it through pointers created from the offsets.
- NoThreadStackHandleScope<0x9ABC> test_table(reinterpret_cast<HandleScope*>(0x5678));
- test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234));
+ StackHandleScope<0x1> hs0(soa.Self());
+ static const size_t kNumReferences = 0x9ABC;
+ StackHandleScope<kNumReferences> test_table(soa.Self());
+ ObjPtr<mirror::Class> c = class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ test_table.SetReference(0, c.Ptr());
uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table);
{
- uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
+ BaseHandleScope** link_ptr = reinterpret_cast<BaseHandleScope**>(table_base_ptr +
HandleScope::LinkOffset(kRuntimePointerSize));
- EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
+ EXPECT_EQ(*link_ptr, &hs0);
}
{
uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
HandleScope::NumberOfReferencesOffset(kRuntimePointerSize));
- EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
+ EXPECT_EQ(*num_ptr, static_cast<size_t>(kNumReferences));
}
{
- // Assume sizeof(StackReference<mirror::Object>) == sizeof(uint32_t)
- // TODO: How can we make this assumption-less but still access directly and fully?
- EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
-
- uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
+ auto* ref_ptr = reinterpret_cast<StackReference<mirror::Object>*>(table_base_ptr +
HandleScope::ReferencesOffset(kRuntimePointerSize));
- EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
+ EXPECT_OBJ_PTR_EQ(ref_ptr->AsMirrorPtr(), c);
+ }
+}
+
+class CollectVisitor {
+ public:
+ void VisitRootIfNonNull(StackReference<mirror::Object>* ref) {
+ if (!ref->IsNull()) {
+ visited.insert(ref);
+ }
+ ++total_visited;
+ }
+
+ std::set<StackReference<mirror::Object>*> visited;
+ size_t total_visited = 0; // including null.
+};
+
+// Test functionality of variable sized handle scopes.
+TEST_F(HandleScopeTest, VariableSized) {
+ ScopedObjectAccess soa(Thread::Current());
+ VariableSizedHandleScope hs(soa.Self());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> c =
+ hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
+ // Test nested scopes.
+ StackHandleScope<1> inner(soa.Self());
+ inner.NewHandle(c->AllocObject(soa.Self()));
+ // Add a bunch of handles and make sure callbacks work.
+ static const size_t kNumHandles = 100;
+ std::vector<Handle<mirror::Object>> handles;
+ for (size_t i = 0; i < kNumHandles; ++i) {
+ BaseHandleScope* base = &hs;
+ ObjPtr<mirror::Object> o = c->AllocObject(soa.Self());
+ handles.push_back(hs.NewHandle(o));
+ EXPECT_OBJ_PTR_EQ(o, handles.back().Get());
+ EXPECT_TRUE(hs.Contains(handles.back().GetReference()));
+ EXPECT_TRUE(base->Contains(handles.back().GetReference()));
+ EXPECT_EQ(hs.NumberOfReferences(), base->NumberOfReferences());
+ }
+ CollectVisitor visitor;
+ BaseHandleScope* base = &hs;
+ base->VisitRoots(visitor);
+ EXPECT_LE(visitor.visited.size(), base->NumberOfReferences());
+ EXPECT_EQ(visitor.total_visited, base->NumberOfReferences());
+ for (StackReference<mirror::Object>* ref : visitor.visited) {
+ EXPECT_TRUE(base->Contains(ref));
}
}
diff --git a/runtime/imtable-inl.h b/runtime/imtable-inl.h
index 0cb9b5e4dc..cb85fa6e56 100644
--- a/runtime/imtable-inl.h
+++ b/runtime/imtable-inl.h
@@ -20,15 +20,82 @@
#include "imtable.h"
#include "art_method-inl.h"
+#include "dex_file.h"
+#include "utf.h"
namespace art {
-inline uint32_t ImTable::GetBaseImtHash(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
- return method->GetDexMethodIndex();
+static constexpr bool kImTableHashUseName = true;
+static constexpr bool kImTableHashUseCoefficients = true;
+
+// Magic configuration that minimizes some common runtime calls.
+static constexpr uint32_t kImTableHashCoefficientClass = 427;
+static constexpr uint32_t kImTableHashCoefficientName = 16;
+static constexpr uint32_t kImTableHashCoefficientSignature = 14;
+
+inline void ImTable::GetImtHashComponents(ArtMethod* method,
+ uint32_t* class_hash,
+ uint32_t* name_hash,
+ uint32_t* signature_hash) {
+ if (kImTableHashUseName) {
+ if (method->IsProxyMethod()) {
+ *class_hash = 0;
+ *name_hash = 0;
+ *signature_hash = 0;
+ return;
+ }
+
+ const DexFile* dex_file = method->GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+
+ // Class descriptor for the class component.
+ *class_hash = ComputeModifiedUtf8Hash(dex_file->GetMethodDeclaringClassDescriptor(method_id));
+
+ // Method name for the method component.
+ *name_hash = ComputeModifiedUtf8Hash(dex_file->GetMethodName(method_id));
+
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+
+ // Read the proto for the signature component.
+ uint32_t tmp = ComputeModifiedUtf8Hash(
+ dex_file->GetTypeDescriptor(dex_file->GetTypeId(proto_id.return_type_idx_)));
+
+ // Mix in the argument types.
+ // Note: we could consider just using the shorty. This would be faster, at the price of
+ // potential collisions.
+ const DexFile::TypeList* param_types = dex_file->GetProtoParameters(proto_id);
+ if (param_types != nullptr) {
+ for (size_t i = 0; i != param_types->Size(); ++i) {
+ const DexFile::TypeItem& type = param_types->GetTypeItem(i);
+ tmp = 31 * tmp + ComputeModifiedUtf8Hash(
+ dex_file->GetTypeDescriptor(dex_file->GetTypeId(type.type_idx_)));
+ }
+ }
+
+ *signature_hash = tmp;
+ return;
+ } else {
+ *class_hash = method->GetDexMethodIndex();
+ *name_hash = 0;
+ *signature_hash = 0;
+ return;
+ }
}
inline uint32_t ImTable::GetImtIndex(ArtMethod* method) {
- return GetBaseImtHash(method) % ImTable::kSize;
+ uint32_t class_hash, name_hash, signature_hash;
+ GetImtHashComponents(method, &class_hash, &name_hash, &signature_hash);
+
+ uint32_t mixed_hash;
+ if (!kImTableHashUseCoefficients) {
+ mixed_hash = class_hash + name_hash + signature_hash;
+ } else {
+ mixed_hash = kImTableHashCoefficientClass * class_hash +
+ kImTableHashCoefficientName * name_hash +
+ kImTableHashCoefficientSignature * signature_hash;
+ }
+
+ return mixed_hash % ImTable::kSize;
}
} // namespace art
diff --git a/runtime/imtable.h b/runtime/imtable.h
index 6df890d14b..b7066bd521 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -23,6 +23,7 @@
#include "base/enums.h"
#include "base/macros.h"
+#include "base/mutex.h"
namespace art {
@@ -74,18 +75,17 @@ class ImTable {
return kSize * static_cast<size_t>(pointer_size);
}
- // Converts a method to the base hash used in GetImtIndex.
- ALWAYS_INLINE static inline uint32_t GetBaseImtHash(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE static inline uint32_t GetBaseImtHash(const DexFile* dex_file, uint32_t method_idx)
+ // Converts a method to the base hash components used in GetImtIndex.
+ ALWAYS_INLINE static inline void GetImtHashComponents(ArtMethod* method,
+ uint32_t* class_hash,
+ uint32_t* name_hash,
+ uint32_t* signature_hash)
REQUIRES_SHARED(Locks::mutator_lock_);
// The (complete) hashing scheme to map an ArtMethod to a slot in the Interface Method Table
// (IMT).
ALWAYS_INLINE static inline uint32_t GetImtIndex(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE static inline uint32_t GetImtIndex(const DexFile* dex_file, uint32_t method_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/imtable_test.cc b/runtime/imtable_test.cc
new file mode 100644
index 0000000000..8cbe2916ec
--- /dev/null
+++ b/runtime/imtable_test.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "imtable-inl.h"
+
+#include <memory>
+#include <string>
+
+#include "jni.h"
+
+#include "base/mutex.h"
+#include "class_linker.h"
+#include "common_runtime_test.h"
+#include "mirror/accessible_object.h"
+#include "mirror/class.h"
+#include "mirror/class_loader.h"
+#include "handle_scope-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+
+class ImTableTest : public CommonRuntimeTest {
+ public:
+ std::pair<mirror::Class*, mirror::Class*> LoadClasses(const std::string& class_name)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ jobject jclass_loader_a = LoadDex("IMTA");
+ CHECK(jclass_loader_a != nullptr);
+ jobject jclass_loader_b = LoadDex("IMTB");
+ CHECK(jclass_loader_b != nullptr);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+
+ StackHandleScope<3> hs(self);
+ MutableHandle<mirror::ClassLoader> h_class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
+
+ // A.
+ h_class_loader.Assign(
+ ObjPtr<mirror::ClassLoader>::DownCast(self->DecodeJObject(jclass_loader_a)));
+ Handle<mirror::Class> h_class_a(
+ hs.NewHandle(class_linker->FindClass(self, class_name.c_str(), h_class_loader)));
+ if (h_class_a.Get() == nullptr) {
+ LOG(ERROR) << self->GetException()->Dump();
+ CHECK(false) << "h_class_a == nullptr";
+ }
+
+ // B.
+ h_class_loader.Assign(
+ ObjPtr<mirror::ClassLoader>::DownCast(self->DecodeJObject(jclass_loader_b)));
+ Handle<mirror::Class> h_class_b(
+ hs.NewHandle(class_linker->FindClass(self, class_name.c_str(), h_class_loader)));
+ if (h_class_b.Get() == nullptr) {
+ LOG(ERROR) << self->GetException()->Dump();
+ CHECK(false) << "h_class_b == nullptr";
+ }
+
+ return std::make_pair(h_class_a.Get(), h_class_b.Get());
+ }
+
+ std::pair<ArtMethod*, ArtMethod*> LoadMethods(const std::string& class_name,
+ const std::string& method_name)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::pair<mirror::Class*, mirror::Class*> classes = LoadClasses(class_name);
+
+ const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+
+ ArtMethod* method_a =
+ classes.first->FindDeclaredVirtualMethodByName(method_name, pointer_size);
+ ArtMethod* method_b =
+ classes.second->FindDeclaredVirtualMethodByName(method_name, pointer_size);
+
+ return std::make_pair(method_a, method_b);
+ }
+};
+
+TEST_F(ImTableTest, NewMethodBefore) {
+ ScopedObjectAccess soa(Thread::Current());
+
+ std::pair<ArtMethod*, ArtMethod*> methods = LoadMethods("LInterfaces$A;", "foo");
+ CHECK_EQ(ImTable::GetImtIndex(methods.first), ImTable::GetImtIndex(methods.second));
+}
+
+TEST_F(ImTableTest, NewClassBefore) {
+ ScopedObjectAccess soa(Thread::Current());
+
+ std::pair<ArtMethod*, ArtMethod*> methods = LoadMethods("LInterfaces$Z;", "foo");
+ CHECK_EQ(ImTable::GetImtIndex(methods.first), ImTable::GetImtIndex(methods.second));
+}
+
+} // namespace art
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e05f8f307c..e357fa66a4 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -90,7 +90,7 @@ inline ObjPtr<mirror::Object> IndirectReferenceTable::Get(IndirectRef iref) cons
}
uint32_t idx = ExtractIndex(iref);
ObjPtr<mirror::Object> obj = table_[idx].GetReference()->Read<kReadBarrierOption>();
- VerifyObject(obj.Ptr());
+ VerifyObject(obj);
return obj;
}
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d59bb39ccf..6109ec6758 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -99,7 +99,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, ObjPtr<mirror::Object>
size_t topIndex = segment_state_.parts.topIndex;
CHECK(obj != nullptr);
- VerifyObject(obj.Ptr());
+ VerifyObject(obj);
DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 0380f3ee7c..169911077e 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -53,15 +53,16 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
IndirectReferenceTable irt(kTableInitial, kTableMax, kGlobal);
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ StackHandleScope<4> hs(soa.Self());
ASSERT_TRUE(c != nullptr);
- mirror::Object* obj0 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj0 != nullptr);
- mirror::Object* obj1 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj1 != nullptr);
- mirror::Object* obj2 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj2 != nullptr);
- mirror::Object* obj3 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj3 != nullptr);
+ Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj0.Get() != nullptr);
+ Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj1.Get() != nullptr);
+ Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj2.Get() != nullptr);
+ Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj3.Get() != nullptr);
const uint32_t cookie = IRT_FIRST_SEGMENT;
@@ -71,19 +72,19 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
EXPECT_FALSE(irt.Remove(cookie, iref0)) << "unexpectedly successful removal";
// Add three, check, remove in the order in which they were added.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
- IndirectRef iref1 = irt.Add(cookie, obj1);
+ IndirectRef iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
CheckDump(&irt, 2, 2);
- IndirectRef iref2 = irt.Add(cookie, obj2);
+ IndirectRef iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
CheckDump(&irt, 3, 3);
- EXPECT_OBJ_PTR_EQ(obj0, irt.Get(iref0));
- EXPECT_OBJ_PTR_EQ(obj1, irt.Get(iref1));
- EXPECT_OBJ_PTR_EQ(obj2, irt.Get(iref2));
+ EXPECT_OBJ_PTR_EQ(obj0.Get(), irt.Get(iref0));
+ EXPECT_OBJ_PTR_EQ(obj1.Get(), irt.Get(iref1));
+ EXPECT_OBJ_PTR_EQ(obj2.Get(), irt.Get(iref2));
EXPECT_TRUE(irt.Remove(cookie, iref0));
CheckDump(&irt, 2, 2);
@@ -99,11 +100,11 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
EXPECT_TRUE(irt.Get(iref0) == nullptr);
// Add three, remove in the opposite order.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
- iref2 = irt.Add(cookie, obj2);
+ iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
CheckDump(&irt, 3, 3);
@@ -119,11 +120,11 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Add three, remove middle / middle / bottom / top. (Second attempt
// to remove middle should fail.)
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
- iref2 = irt.Add(cookie, obj2);
+ iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
CheckDump(&irt, 3, 3);
@@ -148,20 +149,20 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Add four entries. Remove #1, add new entry, verify that table size
// is still 4 (i.e. holes are getting filled). Remove #1 and #3, verify
// that we delete one and don't hole-compact the other.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
- iref2 = irt.Add(cookie, obj2);
+ iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
- IndirectRef iref3 = irt.Add(cookie, obj3);
+ IndirectRef iref3 = irt.Add(cookie, obj3.Get());
EXPECT_TRUE(iref3 != nullptr);
CheckDump(&irt, 4, 4);
ASSERT_TRUE(irt.Remove(cookie, iref1));
CheckDump(&irt, 3, 3);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
ASSERT_EQ(4U, irt.Capacity()) << "hole not filled";
@@ -184,12 +185,12 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Add an entry, remove it, add a new entry, and try to use the original
// iref. They have the same slot number but are for different objects.
// With the extended checks in place, this should fail.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_TRUE(irt.Remove(cookie, iref0));
CheckDump(&irt, 0, 0);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_FALSE(irt.Remove(cookie, iref0)) << "mismatched del succeeded";
@@ -200,12 +201,12 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Same as above, but with the same object. A more rigorous checker
// (e.g. with slot serialization) will catch this.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_TRUE(irt.Remove(cookie, iref0));
CheckDump(&irt, 0, 0);
- iref1 = irt.Add(cookie, obj0);
+ iref1 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref1 != nullptr);
CheckDump(&irt, 1, 1);
if (iref0 != iref1) {
@@ -220,7 +221,7 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
ASSERT_TRUE(irt.Get(nullptr) == nullptr);
// Stale lookup.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_TRUE(irt.Remove(cookie, iref0));
@@ -231,12 +232,12 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// These ones fit...
IndirectRef manyRefs[kTableInitial];
for (size_t i = 0; i < kTableInitial; i++) {
- manyRefs[i] = irt.Add(cookie, obj0);
+ manyRefs[i] = irt.Add(cookie, obj0.Get());
ASSERT_TRUE(manyRefs[i] != nullptr) << "Failed adding " << i;
CheckDump(&irt, i + 1, 1);
}
// ...this one causes overflow.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
ASSERT_TRUE(iref0 != nullptr);
ASSERT_EQ(kTableInitial + 1, irt.Capacity());
CheckDump(&irt, kTableInitial + 1, 1);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d283a50234..0d3af93fd1 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -22,6 +22,7 @@
#include "interpreter_common.h"
#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
+#include "jvalue-inl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 6cff1da357..295cdec9b9 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -20,6 +20,7 @@
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
+#include "jvalue-inl.h"
#include "safe_math.h"
namespace art {
diff --git a/runtime/interpreter/mterp/arm/op_unused_fa.S b/runtime/interpreter/mterp/arm/op_unused_fa.S
deleted file mode 100644
index 10948dc06c..0000000000
--- a/runtime/interpreter/mterp/arm/op_unused_fa.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fb.S b/runtime/interpreter/mterp/arm/op_unused_fb.S
deleted file mode 100644
index 10948dc06c..0000000000
--- a/runtime/interpreter/mterp/arm/op_unused_fb.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fa.S b/runtime/interpreter/mterp/arm64/op_unused_fa.S
deleted file mode 100644
index 204eceff7e..0000000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fa.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fb.S b/runtime/interpreter/mterp/arm64/op_unused_fb.S
deleted file mode 100644
index 204eceff7e..0000000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fb.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
index b6caf1195e..6d9774c223 100644
--- a/runtime/interpreter/mterp/config_arm
+++ b/runtime/interpreter/mterp/config_arm
@@ -286,8 +286,8 @@ op-start arm
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- # op op_unused_fa FALLBACK
- # op op_unused_fb FALLBACK
+ op op_invoke_polymorphic FALLBACK
+ op op_invoke_polymorphic_range FALLBACK
# op op_unused_fc FALLBACK
# op op_unused_fd FALLBACK
# op op_unused_fe FALLBACK
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index c5e06c7236..9f32695664 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -284,8 +284,8 @@ op-start arm64
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- # op op_unused_fa FALLBACK
- # op op_unused_fb FALLBACK
+ op op_invoke_polymorphic FALLBACK
+ op op_invoke_polymorphic_range FALLBACK
# op op_unused_fc FALLBACK
# op op_unused_fd FALLBACK
# op op_unused_fe FALLBACK
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index 515cb0b591..708a22b6a4 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -286,8 +286,8 @@ op-start mips
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- # op op_unused_fa FALLBACK
- # op op_unused_fb FALLBACK
+ op op_invoke_polymorphic FALLBACK
+ op op_invoke_polymorphic_range FALLBACK
# op op_unused_fc FALLBACK
# op op_unused_fd FALLBACK
# op op_unused_fe FALLBACK
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index aafd248821..7643a4829e 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -286,8 +286,8 @@ op-start mips64
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- # op op_unused_fa FALLBACK
- # op op_unused_fb FALLBACK
+ op op_invoke_polymorphic FALLBACK
+ op op_invoke_polymorphic_range FALLBACK
# op op_unused_fc FALLBACK
# op op_unused_fd FALLBACK
# op op_unused_fe FALLBACK
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index 64d8ee8369..f454786682 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -290,8 +290,8 @@ op-start x86
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- # op op_unused_fa FALLBACK
- # op op_unused_fb FALLBACK
+ op op_invoke_polymorphic FALLBACK
+ op op_invoke_polymorphic_range FALLBACK
# op op_unused_fc FALLBACK
# op op_unused_fd FALLBACK
# op op_unused_fe FALLBACK
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index 7c357db974..dbfd3d18fc 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -290,8 +290,8 @@ op-start x86_64
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- # op op_unused_fa FALLBACK
- # op op_unused_fb FALLBACK
+ op op_invoke_polymorphic FALLBACK
+ op op_invoke_polymorphic_range FALLBACK
# op op_unused_fc FALLBACK
# op op_unused_fd FALLBACK
# op op_unused_fe FALLBACK
diff --git a/runtime/interpreter/mterp/mips/op_unused_fa.S b/runtime/interpreter/mterp/mips/op_unused_fa.S
deleted file mode 100644
index 99ef3cf308..0000000000
--- a/runtime/interpreter/mterp/mips/op_unused_fa.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fb.S b/runtime/interpreter/mterp/mips/op_unused_fb.S
deleted file mode 100644
index 99ef3cf308..0000000000
--- a/runtime/interpreter/mterp/mips/op_unused_fb.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fa.S b/runtime/interpreter/mterp/mips64/op_unused_fa.S
deleted file mode 100644
index 29463d73fc..0000000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fa.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fb.S b/runtime/interpreter/mterp/mips64/op_unused_fb.S
deleted file mode 100644
index 29463d73fc..0000000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fb.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index c33df6d4db..78a90af54f 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -7305,24 +7305,16 @@ constvalop_long_to_double:
/* ------------------------------ */
.balign 128
-.L_op_unused_fa: /* 0xfa */
-/* File: arm/op_unused_fa.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_polymorphic: /* 0xfa */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fb: /* 0xfb */
-/* File: arm/op_unused_fb.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_polymorphic_range: /* 0xfb */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
@@ -11734,7 +11726,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fa: /* 0xfa */
+.L_ALT_op_invoke_polymorphic: /* 0xfa */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11751,7 +11743,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fb: /* 0xfb */
+.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index c7303b9c3f..dafcc3ef6a 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -6862,24 +6862,16 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fa: /* 0xfa */
-/* File: arm64/op_unused_fa.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_polymorphic: /* 0xfa */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fb: /* 0xfb */
-/* File: arm64/op_unused_fb.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_polymorphic_range: /* 0xfb */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
@@ -11519,7 +11511,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fa: /* 0xfa */
+.L_ALT_op_invoke_polymorphic: /* 0xfa */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11536,7 +11528,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fb: /* 0xfb */
+.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index fef7dc6816..aadbf20454 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -7624,25 +7624,15 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fa: /* 0xfa */
-/* File: mips/op_unused_fa.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
+.L_op_invoke_polymorphic: /* 0xfa */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fb: /* 0xfb */
-/* File: mips/op_unused_fb.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
+.L_op_invoke_polymorphic_range: /* 0xfb */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
@@ -12537,7 +12527,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fa: /* 0xfa */
+.L_ALT_op_invoke_polymorphic: /* 0xfa */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12555,7 +12545,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fb: /* 0xfb */
+.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 88e972ffb3..143aeb034c 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -7080,26 +7080,16 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fa: /* 0xfa */
-/* File: mips64/op_unused_fa.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_polymorphic: /* 0xfa */
+/* Transfer stub to alternate interpreter */
b MterpFallback
-
/* ------------------------------ */
.balign 128
-.L_op_unused_fb: /* 0xfb */
-/* File: mips64/op_unused_fb.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_polymorphic_range: /* 0xfb */
+/* Transfer stub to alternate interpreter */
b MterpFallback
-
/* ------------------------------ */
.balign 128
.L_op_unused_fc: /* 0xfc */
@@ -11962,7 +11952,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fa: /* 0xfa */
+.L_ALT_op_invoke_polymorphic: /* 0xfa */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11981,7 +11971,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fb: /* 0xfb */
+.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 29ee2489c7..d676fdab96 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -6278,23 +6278,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fa: /* 0xfa */
-/* File: x86/op_unused_fa.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_polymorphic: /* 0xfa */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fb: /* 0xfb */
-/* File: x86/op_unused_fb.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_polymorphic_range: /* 0xfb */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
@@ -12370,7 +12362,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fa: /* 0xfa */
+.L_ALT_op_invoke_polymorphic: /* 0xfa */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12394,7 +12386,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fb: /* 0xfb */
+.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index bc1abcc2f2..df88499a62 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -6043,23 +6043,15 @@ movswl %ax, %eax
/* ------------------------------ */
.balign 128
-.L_op_unused_fa: /* 0xfa */
-/* File: x86_64/op_unused_fa.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_polymorphic: /* 0xfa */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fb: /* 0xfb */
-/* File: x86_64/op_unused_fb.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_polymorphic_range: /* 0xfb */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
@@ -11635,7 +11627,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fa: /* 0xfa */
+.L_ALT_op_invoke_polymorphic: /* 0xfa */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11657,7 +11649,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fb: /* 0xfb */
+.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/x86/op_unused_fa.S b/runtime/interpreter/mterp/x86/op_unused_fa.S
deleted file mode 100644
index 31d98c1f39..0000000000
--- a/runtime/interpreter/mterp/x86/op_unused_fa.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_fb.S b/runtime/interpreter/mterp/x86/op_unused_fb.S
deleted file mode 100644
index 31d98c1f39..0000000000
--- a/runtime/interpreter/mterp/x86/op_unused_fb.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fa.S b/runtime/interpreter/mterp/x86_64/op_unused_fa.S
deleted file mode 100644
index 280615f08b..0000000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fa.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fb.S b/runtime/interpreter/mterp/x86_64/op_unused_fb.S
deleted file mode 100644
index 280615f08b..0000000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fb.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 845fc60b12..e12a6997e6 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -38,6 +38,7 @@
#include "gc/reference_processor.h"
#include "handle_scope-inl.h"
#include "interpreter/interpreter_common.h"
+#include "jvalue-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class.h"
#include "mirror/field-inl.h"
@@ -340,7 +341,7 @@ void UnstartedRuntime::UnstartedClassGetDeclaredMethod(
Runtime* runtime = Runtime::Current();
bool transaction = runtime->IsActiveTransaction();
PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
- mirror::Method* method;
+ ObjPtr<mirror::Method> method;
if (transaction) {
if (pointer_size == PointerSize::k64) {
method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
@@ -374,7 +375,7 @@ void UnstartedRuntime::UnstartedClassGetDeclaredConstructor(
Runtime* runtime = Runtime::Current();
bool transaction = runtime->IsActiveTransaction();
PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
- mirror::Constructor* constructor;
+ ObjPtr<mirror::Constructor> constructor;
if (transaction) {
if (pointer_size == PointerSize::k64) {
constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k64,
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f0a7c16146..6828124464 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -88,13 +88,19 @@ static std::string NormalizeJniClassDescriptor(const char* name) {
return result;
}
-static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
- const char* name, const char* sig, const char* kind)
+static void ThrowNoSuchMethodError(ScopedObjectAccess& soa,
+ ObjPtr<mirror::Class> c,
+ const char* name,
+ const char* sig,
+ const char* kind)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
- kind, c->GetDescriptor(&temp), name, sig);
+ kind,
+ c->GetDescriptor(&temp),
+ name,
+ sig);
}
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa,
@@ -148,7 +154,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
}
}
if (method == nullptr || method->IsStatic() != is_static) {
- ThrowNoSuchMethodError(soa, c.Ptr(), name, sig, is_static ? "static" : "non-static");
+ ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
return nullptr;
}
return soa.EncodeMethod(method);
@@ -629,7 +635,7 @@ class JNI {
WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
- mirror::Object* result = c->AllocObject(soa.Self());
+ ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
if (result == nullptr) {
return nullptr;
}
@@ -656,7 +662,7 @@ class JNI {
WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
- mirror::Object* result = c->AllocObject(soa.Self());
+ ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
if (result == nullptr) {
return nullptr;
}
@@ -2245,14 +2251,14 @@ class JNI {
<< "Failed to register native method "
<< PrettyDescriptor(c) << "." << name << sig << " in "
<< c->GetDexCache()->GetLocation()->ToModifiedUtf8();
- ThrowNoSuchMethodError(soa, c.Ptr(), name, sig, "static or non-static");
+ ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static");
return JNI_ERR;
} else if (!m->IsNative()) {
LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
<< "Failed to register non-native method "
<< PrettyDescriptor(c) << "." << name << sig
<< " as native";
- ThrowNoSuchMethodError(soa, c.Ptr(), name, sig, "native");
+ ThrowNoSuchMethodError(soa, c, name, sig, "native");
return JNI_ERR;
}
@@ -2481,7 +2487,7 @@ class JNI {
// Sanity check: If elements is not the same as the java array's data, it better not be a
// heap address. TODO: This might be slow to check, may be worth keeping track of which
// copies we make?
- if (heap->IsNonDiscontinuousSpaceHeapAddress(reinterpret_cast<mirror::Object*>(elements))) {
+ if (heap->IsNonDiscontinuousSpaceHeapAddress(elements)) {
soa.Vm()->JniAbortF("ReleaseArrayElements",
"invalid element pointer %p, array elements are %p",
reinterpret_cast<void*>(elements), array_data);
diff --git a/runtime/jvalue-inl.h b/runtime/jvalue-inl.h
new file mode 100644
index 0000000000..b33686c6c5
--- /dev/null
+++ b/runtime/jvalue-inl.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JVALUE_INL_H_
+#define ART_RUNTIME_JVALUE_INL_H_
+
+#include "jvalue.h"
+
+#include "obj_ptr.h"
+
+namespace art {
+
+inline void JValue::SetL(ObjPtr<mirror::Object> new_l) {
+ l = new_l.Ptr();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_JVALUE_INL_H_
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 7b91b0b2b6..52a0f23361 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -18,9 +18,12 @@
#define ART_RUNTIME_JVALUE_H_
#include "base/macros.h"
+#include "base/mutex.h"
#include <stdint.h>
+#include "obj_ptr.h"
+
namespace art {
namespace mirror {
class Object;
@@ -52,8 +55,10 @@ union PACKED(4) JValue {
int64_t GetJ() const { return j; }
void SetJ(int64_t new_j) { j = new_j; }
- mirror::Object* GetL() const { return l; }
- void SetL(mirror::Object* new_l) { l = new_l; }
+ mirror::Object* GetL() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return l;
+ }
+ void SetL(ObjPtr<mirror::Object> new_l) REQUIRES_SHARED(Locks::mutator_lock_);
int16_t GetS() const { return s; }
void SetS(int16_t new_s) {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 9d7f98fe62..7cbcac8030 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -100,10 +100,10 @@ class SetLengthVisitor {
explicit SetLengthVisitor(int32_t length) : length_(length) {
}
- void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
- Array* array = down_cast<Array*>(obj);
+ ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
// DCHECK(array->IsArrayInstance());
array->SetLength(length_);
}
@@ -124,10 +124,10 @@ class SetLengthToUsableSizeVisitor {
component_size_shift_(component_size_shift) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
- Array* array = down_cast<Array*>(obj);
+ ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
// DCHECK(array->IsArrayInstance());
int32_t length = (usable_size - header_size_) >> component_size_shift_;
DCHECK_GE(length, minimum_length_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index cc088b8aa8..62c583b026 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -306,14 +306,14 @@ inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, Pointer
SetEmbeddedVTableEntryUnchecked(i, method, pointer_size);
}
-inline bool Class::Implements(Class* klass) {
+inline bool Class::Implements(ObjPtr<Class> klass) {
DCHECK(klass != nullptr);
DCHECK(klass->IsInterface()) << PrettyClass(this);
// All interfaces implemented directly and by our superclass, and
// recursively all super-interfaces of those interfaces, are listed
// in iftable_, so we can just do a linear scan through that.
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; i++) {
if (iftable->GetInterface(i) == klass) {
return true;
@@ -353,7 +353,7 @@ inline bool Class::IsAssignableFromArray(ObjPtr<Class> src) {
if (!IsArrayClass()) {
// If "this" is not also an array, it must be Object.
// src's super should be java_lang_Object, since it is an array.
- Class* java_lang_Object = src->GetSuperClass();
+ ObjPtr<Class> java_lang_Object = src->GetSuperClass();
DCHECK(java_lang_Object != nullptr) << PrettyClass(src);
DCHECK(java_lang_Object->GetSuperClass() == nullptr) << PrettyClass(src);
return this == java_lang_Object;
@@ -384,7 +384,7 @@ inline bool Class::ResolvedFieldAccessTest(ObjPtr<Class> access_to,
DCHECK(dex_access_to != nullptr);
if (UNLIKELY(!this->CanAccess(dex_access_to))) {
if (throw_on_failure) {
- ThrowIllegalAccessErrorClass(this, dex_access_to.Ptr());
+ ThrowIllegalAccessErrorClass(this, dex_access_to);
}
return false;
}
@@ -451,15 +451,20 @@ inline bool Class::CheckResolvedFieldAccess(ObjPtr<Class> access_to,
return ResolvedFieldAccessTest<true, true>(access_to, field, field_idx, nullptr);
}
-inline bool Class::CanAccessResolvedMethod(Class* access_to, ArtMethod* method,
- DexCache* dex_cache, uint32_t method_idx) {
+inline bool Class::CanAccessResolvedMethod(ObjPtr<Class> access_to,
+ ArtMethod* method,
+ ObjPtr<DexCache> dex_cache,
+ uint32_t method_idx) {
return ResolvedMethodAccessTest<false, false, kStatic>(access_to, method, method_idx, dex_cache);
}
template <InvokeType throw_invoke_type>
-inline bool Class::CheckResolvedMethodAccess(Class* access_to, ArtMethod* method,
+inline bool Class::CheckResolvedMethodAccess(ObjPtr<Class> access_to,
+ ArtMethod* method,
uint32_t method_idx) {
- return ResolvedMethodAccessTest<true, true, throw_invoke_type>(access_to, method, method_idx,
+ return ResolvedMethodAccessTest<true, true, throw_invoke_type>(access_to,
+ method,
+ method_idx,
nullptr);
}
@@ -478,13 +483,13 @@ inline bool Class::IsSubClass(ObjPtr<Class> klass) {
inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method,
PointerSize pointer_size) {
- Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<Class> declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
DCHECK(!method->IsCopied());
// TODO cache to improve lookup speed
const int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; i++) {
if (iftable->GetInterface(i) == declaring_class) {
return iftable->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
@@ -526,14 +531,14 @@ inline IfTable* Class::GetIfTable() {
}
inline int32_t Class::GetIfTableCount() {
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
if (iftable == nullptr) {
return 0;
}
return iftable->Count();
}
-inline void Class::SetIfTable(IfTable* new_iftable) {
+inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable);
}
@@ -544,20 +549,20 @@ inline LengthPrefixedArray<ArtField>* Class::GetIFieldsPtr() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
- Class* super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
return (super_class != nullptr)
? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags, kReadBarrierOption>(),
- sizeof(mirror::HeapReference<mirror::Object>)))
+ kHeapReferenceSize))
: ClassOffset();
}
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointer_size) {
DCHECK(IsResolved());
- uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ uint32_t base = sizeof(Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
// Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(
+ base = Class::ComputeClassSize(
true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
@@ -566,10 +571,10 @@ inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointe
inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(
PointerSize pointer_size) {
DCHECK(IsLoaded());
- uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ uint32_t base = sizeof(Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable()) {
// Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
+ base = Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
@@ -700,16 +705,20 @@ inline void Class::CheckObjectAlloc() {
}
template<bool kIsInstrumented, bool kCheckAddFinalizer>
-inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
+inline ObjPtr<Object> Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
CheckObjectAlloc();
gc::Heap* heap = Runtime::Current()->GetHeap();
const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
if (!kCheckAddFinalizer) {
DCHECK(!IsFinalizable());
}
- mirror::Object* obj =
- heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, this, this->object_size_,
- allocator_type, VoidFunctor());
+ // Note that the this pointer may be invalidated after the allocation.
+ ObjPtr<Object> obj =
+ heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
+ this,
+ this->object_size_,
+ allocator_type,
+ VoidFunctor());
if (add_finalizer && LIKELY(obj != nullptr)) {
heap->AddFinalizerReference(self, &obj);
if (UNLIKELY(self->IsExceptionPending())) {
@@ -717,14 +726,14 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
obj = nullptr;
}
}
- return obj;
+ return obj.Ptr();
}
-inline Object* Class::AllocObject(Thread* self) {
+inline ObjPtr<Object> Class::AllocObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
-inline Object* Class::AllocNonMovableObject(Thread* self) {
+inline ObjPtr<Object> Class::AllocNonMovableObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
}
@@ -746,7 +755,7 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable,
}
// Space used by reference statics.
- size += num_ref_static_fields * sizeof(HeapReference<Object>);
+ size += num_ref_static_fields * kHeapReferenceSize;
if (!IsAligned<8>(size) && num_64bit_static_fields > 0) {
uint32_t gap = 8 - (size & 0x7);
size += gap; // will be padded
@@ -777,8 +786,8 @@ template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
- VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+inline void Class::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) {
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass.Ptr(), visitor);
// Right after a class is allocated, but not yet loaded
// (kStatusNotReady, see ClassLinker::LoadClass()), GC may find it
// and scan it. IsTemp() may call Class::GetAccessFlags() but may
@@ -806,7 +815,7 @@ inline bool Class::IsReferenceClass() const {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Class::IsClassClass() {
- Class* java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
+ ObjPtr<Class> java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
template GetClass<kVerifyFlags, kReadBarrierOption>();
return this == java_lang_Class;
}
@@ -879,12 +888,11 @@ inline void Class::SetSlowPath(bool enabled) {
SetFieldBoolean<false, false>(GetSlowPathFlagOffset(), enabled);
}
-inline void Class::InitializeClassVisitor::operator()(
- mirror::Object* obj, size_t usable_size) const {
+inline void Class::InitializeClassVisitor::operator()(ObjPtr<Object> obj,
+ size_t usable_size) const {
DCHECK_LE(class_size_, usable_size);
// Avoid AsClass as object is not yet in live bitmap or allocation stack.
- mirror::Class* klass = down_cast<mirror::Class*>(obj);
- // DCHECK(klass->IsClass());
+ ObjPtr<Class> klass = ObjPtr<Class>::DownCast(obj);
klass->SetClassSize(class_size_);
klass->SetPrimitiveType(Primitive::kPrimNot); // Default to not being primitive.
klass->SetDexClassDefIndex(DexFile::kDexNoIndex16); // Default to no valid class def index.
@@ -916,7 +924,7 @@ inline uint32_t Class::NumDirectInterfaces() {
} else if (IsArrayClass()) {
return 2;
} else if (IsProxyClass()) {
- mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
+ ObjectArray<Class>* interfaces = GetInterfaces();
return interfaces != nullptr ? interfaces->GetLength() : 0;
} else {
const DexFile::TypeList* interfaces = GetInterfaceTypeList();
@@ -937,7 +945,7 @@ inline StringDexCacheType* Class::GetDexCacheStrings() {
}
template<ReadBarrierOption kReadBarrierOption, class Visitor>
-void mirror::Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
+void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
for (ArtField& field : GetSFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
@@ -1066,7 +1074,7 @@ inline uint32_t Class::NumStaticFields() {
}
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Class::FixupNativePointers(mirror::Class* dest,
+inline void Class::FixupNativePointers(Class* dest,
PointerSize pointer_size,
const Visitor& visitor) {
// Update the field arrays.
@@ -1135,6 +1143,14 @@ inline bool Class::CanAccessMember(ObjPtr<Class> access_to, uint32_t member_flag
return this->IsInSamePackage(access_to);
}
+inline bool Class::CannotBeAssignedFromOtherTypes() {
+ if (!IsArrayClass()) {
+ return IsFinal();
+ }
+ ObjPtr<Class> component = GetComponentType();
+ return component->IsPrimitive() || component->CannotBeAssignedFromOtherTypes();
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 40742d2731..f93f72ff8b 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -40,12 +40,12 @@ namespace mirror {
GcRoot<Class> Class::java_lang_Class_;
-void Class::SetClassClass(Class* java_lang_Class) {
+void Class::SetClassClass(ObjPtr<Class> java_lang_Class) {
CHECK(java_lang_Class_.IsNull())
<< java_lang_Class_.Read()
<< " " << java_lang_Class;
CHECK(java_lang_Class != nullptr);
- java_lang_Class->SetClassFlags(mirror::kClassFlagClass);
+ java_lang_Class->SetClassFlags(kClassFlagClass);
java_lang_Class_ = GcRoot<Class>(java_lang_Class);
}
@@ -58,7 +58,7 @@ void Class::VisitRoots(RootVisitor* visitor) {
java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-inline void Class::SetVerifyError(mirror::Object* error) {
+inline void Class::SetVerifyError(ObjPtr<Object> error) {
CHECK(error != nullptr) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error);
@@ -140,7 +140,7 @@ void Class::SetStatus(Handle<Class> h_this, Status new_status, Thread* self) {
}
}
-void Class::SetDexCache(DexCache* new_dex_cache) {
+void Class::SetDexCache(ObjPtr<DexCache> new_dex_cache) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache);
SetDexCacheStrings(new_dex_cache != nullptr ? new_dex_cache->GetStrings() : nullptr);
}
@@ -209,8 +209,8 @@ void Class::DumpClass(std::ostream& os, int flags) {
Thread* const self = Thread::Current();
StackHandleScope<2> hs(self);
- Handle<mirror::Class> h_this(hs.NewHandle(this));
- Handle<mirror::Class> h_super(hs.NewHandle(GetSuperClass()));
+ Handle<Class> h_this(hs.NewHandle(this));
+ Handle<Class> h_super(hs.NewHandle(GetSuperClass()));
auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
std::string temp;
@@ -231,12 +231,12 @@ void Class::DumpClass(std::ostream& os, int flags) {
if (num_direct_interfaces > 0) {
os << " interfaces (" << num_direct_interfaces << "):\n";
for (size_t i = 0; i < num_direct_interfaces; ++i) {
- Class* interface = GetDirectInterface(self, h_this, i);
+ ObjPtr<Class> interface = GetDirectInterface(self, h_this, i);
if (interface == nullptr) {
os << StringPrintf(" %2zd: nullptr!\n", i);
} else {
- const ClassLoader* cl = interface->GetClassLoader();
- os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl);
+ ObjPtr<ClassLoader> cl = interface->GetClassLoader();
+ os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl.Ptr());
}
}
}
@@ -283,7 +283,7 @@ void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) {
// Sanity check that the number of bits set in the reference offset bitmap
// agrees with the number of references
uint32_t count = 0;
- for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
+ for (ObjPtr<Class> c = this; c != nullptr; c = c->GetSuperClass()) {
count += c->NumReferenceInstanceFieldsDuringLinking();
}
// +1 for the Class in Object.
@@ -338,7 +338,7 @@ bool Class::IsThrowableClass() {
return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this);
}
-void Class::SetClassLoader(ClassLoader* new_class_loader) {
+void Class::SetClassLoader(ObjPtr<ClassLoader> new_class_loader) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
} else {
@@ -356,7 +356,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
}
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -376,7 +376,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
}
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -386,7 +386,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache,
+ArtMethod* Class::FindInterfaceMethod(ObjPtr<DexCache> dex_cache,
uint32_t dex_method_idx,
PointerSize pointer_size) {
// Check the current class before checking the interfaces.
@@ -396,7 +396,7 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache,
}
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(
dex_cache, dex_method_idx, pointer_size);
@@ -429,7 +429,7 @@ ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache,
+ArtMethod* Class::FindDeclaredDirectMethod(ObjPtr<DexCache> dex_cache,
uint32_t dex_method_idx,
PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
@@ -445,7 +445,7 @@ ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache,
ArtMethod* Class::FindDirectMethod(const StringPiece& name,
const StringPiece& signature,
PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -457,7 +457,7 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name,
ArtMethod* Class::FindDirectMethod(const StringPiece& name,
const Signature& signature,
PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -466,9 +466,10 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindDirectMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
@@ -516,7 +517,7 @@ ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache,
+ArtMethod* Class::FindDeclaredVirtualMethod(ObjPtr<DexCache> dex_cache,
uint32_t dex_method_idx,
PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
@@ -540,9 +541,10 @@ ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const StringPiece& signature, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -551,9 +553,10 @@ ArtMethod* Class::FindVirtualMethod(
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const Signature& signature, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -562,9 +565,10 @@ ArtMethod* Class::FindVirtualMethod(
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
@@ -591,8 +595,8 @@ ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerS
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
- MutableHandle<mirror::IfTable> iftable(hs.NewHandle(GetIfTable()));
- MutableHandle<mirror::Class> iface(hs.NewHandle<mirror::Class>(nullptr));
+ MutableHandle<IfTable> iftable(hs.NewHandle(GetIfTable()));
+ MutableHandle<Class> iface(hs.NewHandle<Class>(nullptr));
size_t iftable_count = GetIfTableCount();
// Find the method. We don't need to check for conflicts because they would have been in the
// copied virtuals of this interface. Order matters, traverse in reverse topological order; most
@@ -696,7 +700,7 @@ ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const String
return FindFieldByNameAndType(GetIFieldsPtr(), name, type);
}
-ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) {
+ArtField* Class::FindDeclaredInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx) {
if (GetDexCache() == dex_cache) {
for (ArtField& field : GetIFields()) {
if (field.GetDexFieldIndex() == dex_field_idx) {
@@ -710,7 +714,7 @@ ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t d
ArtField* Class::FindInstanceField(const StringPiece& name, const StringPiece& type) {
// Is the field in this class, or any of its superclasses?
// Interfaces are not relevant because they can't contain instance fields.
- for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
+ for (ObjPtr<Class> c = this; c != nullptr; c = c->GetSuperClass()) {
ArtField* f = c->FindDeclaredInstanceField(name, type);
if (f != nullptr) {
return f;
@@ -719,10 +723,10 @@ ArtField* Class::FindInstanceField(const StringPiece& name, const StringPiece& t
return nullptr;
}
-ArtField* Class::FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) {
+ArtField* Class::FindInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx) {
// Is the field in this class, or any of its superclasses?
// Interfaces are not relevant because they can't contain instance fields.
- for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
+ for (ObjPtr<Class> c = this; c != nullptr; c = c->GetSuperClass()) {
ArtField* f = c->FindDeclaredInstanceField(dex_cache, dex_field_idx);
if (f != nullptr) {
return f;
@@ -736,7 +740,7 @@ ArtField* Class::FindDeclaredStaticField(const StringPiece& name, const StringPi
return FindFieldByNameAndType(GetSFieldsPtr(), name, type);
}
-ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) {
+ArtField* Class::FindDeclaredStaticField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx) {
if (dex_cache == GetDexCache()) {
for (ArtField& field : GetSFields()) {
if (field.GetDexFieldIndex() == dex_field_idx) {
@@ -747,11 +751,13 @@ ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex
return nullptr;
}
-ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
+ArtField* Class::FindStaticField(Thread* self,
+ Handle<Class> klass,
+ const StringPiece& name,
const StringPiece& type) {
// Is the field in this class (or its interfaces), or any of its
// superclasses (or their interfaces)?
- for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
+ for (ObjPtr<Class> k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredStaticField(name, type);
if (f != nullptr) {
@@ -759,11 +765,11 @@ ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const String
}
// Wrap k incase it moves during GetDirectInterface.
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
+ HandleWrapperObjPtr<Class> h_k(hs.NewHandleWrapper(&k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
- Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
+ Handle<Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -774,10 +780,10 @@ ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const String
}
ArtField* Class::FindStaticField(Thread* self,
- Class* klass,
- const DexCache* dex_cache,
+ ObjPtr<Class> klass,
+ ObjPtr<DexCache> dex_cache,
uint32_t dex_field_idx) {
- for (Class* k = klass; k != nullptr; k = k->GetSuperClass()) {
+ for (ObjPtr<Class> k = klass; k != nullptr; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredStaticField(dex_cache, dex_field_idx);
if (f != nullptr) {
@@ -787,10 +793,10 @@ ArtField* Class::FindStaticField(Thread* self,
// from here, it takes a Handle as an argument, so we need to wrap `k`.
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_k(hs.NewHandle(k));
+ Handle<Class> h_k(hs.NewHandle(k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- mirror::Class* interface = GetDirectInterface(self, h_k, i);
+ ObjPtr<Class> interface = GetDirectInterface(self, h_k, i);
f = FindStaticField(self, interface, dex_cache, dex_field_idx);
if (f != nullptr) {
return f;
@@ -800,10 +806,12 @@ ArtField* Class::FindStaticField(Thread* self,
return nullptr;
}
-ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
+ArtField* Class::FindField(Thread* self,
+ Handle<Class> klass,
+ const StringPiece& name,
const StringPiece& type) {
// Find a field using the JLS field resolution order
- for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
+ for (ObjPtr<Class> k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredInstanceField(name, type);
if (f != nullptr) {
@@ -815,10 +823,10 @@ ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece&
}
// Is this field in any of this class' interfaces?
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
+ HandleWrapperObjPtr<Class> h_k(hs.NewHandleWrapper(&k));
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
- Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
+ Handle<Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = interface->FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -874,8 +882,9 @@ uint16_t Class::GetDirectInterfaceTypeIdx(uint32_t idx) {
return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_;
}
-mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
- uint32_t idx) {
+ObjPtr<Class> Class::GetDirectInterface(Thread* self,
+ Handle<Class> klass,
+ uint32_t idx) {
DCHECK(klass.Get() != nullptr);
DCHECK(!klass->IsPrimitive());
if (klass->IsArrayClass()) {
@@ -887,12 +896,12 @@ mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> kla
return class_linker->FindSystemClass(self, "Ljava/io/Serializable;");
}
} else if (klass->IsProxyClass()) {
- mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfaces();
+ ObjPtr<ObjectArray<Class>> interfaces = klass.Get()->GetInterfaces();
DCHECK(interfaces != nullptr);
return interfaces->Get(idx);
} else {
uint16_t type_idx = klass->GetDirectInterfaceTypeIdx(idx);
- mirror::Class* interface = klass->GetDexCache()->GetResolvedType(type_idx);
+ ObjPtr<Class> interface = klass->GetDexCache()->GetResolvedType(type_idx);
if (interface == nullptr) {
interface = Runtime::Current()->GetClassLinker()->ResolveType(klass->GetDexFile(), type_idx,
klass.Get());
@@ -902,13 +911,13 @@ mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> kla
}
}
-mirror::Class* Class::GetCommonSuperClass(Handle<Class> klass) {
+ObjPtr<Class> Class::GetCommonSuperClass(Handle<Class> klass) {
DCHECK(klass.Get() != nullptr);
DCHECK(!klass->IsInterface());
DCHECK(!IsInterface());
- mirror::Class* common_super_class = this;
+ ObjPtr<Class> common_super_class = this;
while (!common_super_class->IsAssignableFrom(klass.Get())) {
- mirror::Class* old_common = common_super_class;
+ ObjPtr<Class> old_common = common_super_class;
common_super_class = old_common->GetSuperClass();
DCHECK(common_super_class != nullptr) << PrettyClass(old_common);
}
@@ -926,7 +935,7 @@ const char* Class::GetSourceFile() {
}
std::string Class::GetLocation() {
- mirror::DexCache* dex_cache = GetDexCache();
+ ObjPtr<DexCache> dex_cache = GetDexCache();
if (dex_cache != nullptr && !IsProxyClass()) {
return dex_cache->GetLocation()->ToModifiedUtf8();
}
@@ -959,28 +968,28 @@ void Class::PopulateEmbeddedVTable(PointerSize pointer_size) {
class ReadBarrierOnNativeRootsVisitor {
public:
- void operator()(mirror::Object* obj ATTRIBUTE_UNUSED,
+ void operator()(ObjPtr<Object> obj ATTRIBUTE_UNUSED,
MemberOffset offset ATTRIBUTE_UNUSED,
bool is_static ATTRIBUTE_UNUSED) const {}
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ void VisitRootIfNonNull(CompressedReference<Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ void VisitRoot(CompressedReference<Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Object* old_ref = root->AsMirrorPtr();
- mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root);
+ ObjPtr<Object> old_ref = root->AsMirrorPtr();
+ ObjPtr<Object> new_ref = ReadBarrier::BarrierForRoot(root);
if (old_ref != new_ref) {
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
auto* atomic_root =
- reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
+ reinterpret_cast<Atomic<CompressedReference<Object>>*>(root);
atomic_root->CompareExchangeStrongSequentiallyConsistent(
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(old_ref),
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(new_ref));
+ CompressedReference<Object>::FromMirrorPtr(old_ref.Ptr()),
+ CompressedReference<Object>::FromMirrorPtr(new_ref.Ptr()));
}
}
};
@@ -989,7 +998,7 @@ class ReadBarrierOnNativeRootsVisitor {
class CopyClassVisitor {
public:
CopyClassVisitor(Thread* self,
- Handle<mirror::Class>* orig,
+ Handle<Class>* orig,
size_t new_length,
size_t copy_bytes,
ImTable* imt,
@@ -998,24 +1007,24 @@ class CopyClassVisitor {
copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
}
- void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
- mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
- mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
+ Object::CopyObject(h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
+ Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
h_new_class_obj->PopulateEmbeddedVTable(pointer_size_);
h_new_class_obj->SetImt(imt_, pointer_size_);
h_new_class_obj->SetClassSize(new_length_);
// Visit all of the references to make sure there is no from space references in the native
// roots.
- static_cast<mirror::Object*>(h_new_class_obj.Get())->VisitReferences(
+ ObjPtr<Object>(h_new_class_obj.Get())->VisitReferences(
ReadBarrierOnNativeRootsVisitor(), VoidFunctor());
}
private:
Thread* const self_;
- Handle<mirror::Class>* const orig_;
+ Handle<Class>* const orig_;
const size_t new_length_;
const size_t copy_bytes_;
ImTable* imt_;
@@ -1027,12 +1036,12 @@ Class* Class::CopyOf(Thread* self, int32_t new_length, ImTable* imt, PointerSize
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_this(hs.NewHandle(this));
+ Handle<Class> h_this(hs.NewHandle(this));
gc::Heap* heap = Runtime::Current()->GetHeap();
// The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
// to skip copying the tail part that we will overwrite here.
CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt, pointer_size);
- mirror::Object* new_class = kMovingClasses ?
+ ObjPtr<Object> new_class = kMovingClasses ?
heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor) :
heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
if (UNLIKELY(new_class == nullptr)) {
@@ -1049,7 +1058,7 @@ bool Class::ProxyDescriptorEquals(const char* match) {
// TODO: Move this to java_lang_Class.cc?
ArtMethod* Class::GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size) {
+ Thread* self, Handle<ObjectArray<Class>> args, PointerSize pointer_size) {
for (auto& m : GetDirectMethods(pointer_size)) {
// Skip <clinit> which is a static constructor, as well as non constructors.
if (m.IsStatic() || !m.IsConstructor()) {
@@ -1068,7 +1077,7 @@ ArtMethod* Class::GetDeclaredConstructor(
uint32_t Class::Depth() {
uint32_t depth = 0;
- for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
+ for (ObjPtr<Class> klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
depth++;
}
return depth;
@@ -1081,10 +1090,11 @@ uint32_t Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) {
}
template <PointerSize kPointerSize, bool kTransactionActive>
-mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args) {
+ObjPtr<Method> Class::GetDeclaredMethodInternal(
+ Thread* self,
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args) {
// Covariant return types permit the class to define multiple
// methods with the same name and parameter types. Prefer to
// return a non-synthetic method in such situations. We may
@@ -1099,12 +1109,12 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
return nullptr;
}
auto h_args = hs.NewHandle(args);
- Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+ Handle<Class> h_klass = hs.NewHandle(klass);
ArtMethod* result = nullptr;
for (auto& m : h_klass->GetDeclaredVirtualMethods(kPointerSize)) {
auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
// May cause thread suspension.
- mirror::String* np_name = np_method->GetNameAsString(self);
+ ObjPtr<String> np_name = np_method->GetNameAsString(self);
if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
if (UNLIKELY(self->IsExceptionPending())) {
return nullptr;
@@ -1113,7 +1123,7 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
}
auto modifiers = m.GetAccessFlags();
if ((modifiers & kSkipModifiers) == 0) {
- return mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+ return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
if ((modifiers & kAccMiranda) == 0) {
result = &m; // Remember as potential result if it's not a miranda method.
@@ -1127,7 +1137,7 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
}
auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
// May cause thread suspension.
- mirror::String* np_name = np_method->GetNameAsString(self);
+ ObjPtr<String> np_name = np_method->GetNameAsString(self);
if (np_name == nullptr) {
self->AssertPendingException();
return nullptr;
@@ -1139,76 +1149,76 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
continue;
}
if ((modifiers & kSkipModifiers) == 0) {
- return mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+ return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
// Direct methods cannot be miranda methods, so this potential result must be synthetic.
result = &m;
}
}
return result != nullptr
- ? mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
+ ? Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
: nullptr;
}
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template <PointerSize kPointerSize, bool kTransactionActive>
-mirror::Constructor* Class::GetDeclaredConstructorInternal(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args) {
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args) {
StackHandleScope<1> hs(self);
ArtMethod* result = klass->GetDeclaredConstructor(self, hs.NewHandle(args), kPointerSize);
return result != nullptr
- ? mirror::Constructor::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
+ ? Constructor::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
: nullptr;
}
-// mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
+// Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, false>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k32, false>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, true>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k32, true>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, false>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k64, false>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, true>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k64, true>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
int32_t Class::GetInnerClassFlags(Handle<Class> h_this, int32_t default_value) {
if (h_this->IsProxyClass() || h_this->GetDexCache() == nullptr) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index a0d6f37672..12ce014a53 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -56,7 +56,7 @@ class IfTable;
class Method;
template <typename T> struct PACKED(8) DexCachePair;
-using StringDexCachePair = DexCachePair<mirror::String>;
+using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
// C++ mirror of java.lang.Class
@@ -337,18 +337,7 @@ class MANAGED Class FINAL : public Object {
// For array classes, where all the classes are final due to there being no sub-classes, an
// Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
// types as the component is final.
- bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsArrayClass()) {
- return IsFinal();
- } else {
- Class* component = GetComponentType();
- if (component->IsPrimitive()) {
- return true;
- } else {
- return component->CannotBeAssignedFromOtherTypes();
- }
- }
- }
+ bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
@@ -473,7 +462,7 @@ class MANAGED Class FINAL : public Object {
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Class* GetComponentType() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetComponentType(Class* new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
+ void SetComponentType(ObjPtr<Class> new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
@@ -508,7 +497,7 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Class* const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && !component_type->IsPrimitive();
}
@@ -528,12 +517,12 @@ class MANAGED Class FINAL : public Object {
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
- ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
+ ALWAYS_INLINE ObjPtr<Object> Alloc(Thread* self, gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- Object* AllocObject(Thread* self)
+ ObjPtr<Object> AllocObject(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- Object* AllocNonMovableObject(Thread* self)
+ ObjPtr<Object> AllocNonMovableObject(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -638,11 +627,14 @@ class MANAGED Class FINAL : public Object {
// Can this class access a resolved method?
// Note that access to methods's class is checked and this may require looking up the class
// referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
- bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
- DexCache* dex_cache, uint32_t method_idx)
+ bool CanAccessResolvedMethod(ObjPtr<Class> access_to,
+ ArtMethod* resolved_method,
+ ObjPtr<DexCache> dex_cache,
+ uint32_t method_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
template <InvokeType throw_invoke_type>
- bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
+ bool CheckResolvedMethodAccess(ObjPtr<Class> access_to,
+ ArtMethod* resolved_method,
uint32_t method_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -661,11 +653,12 @@ class MANAGED Class FINAL : public Object {
// Get first common super class. It will never return null.
// `This` and `klass` must be classes.
- Class* GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<Class> GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetSuperClass(Class* new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
+ void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
- Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+ ObjPtr<Class> old_super_class =
+ GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
DCHECK(new_super_class != nullptr);
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
@@ -681,7 +674,7 @@ class MANAGED Class FINAL : public Object {
ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- void SetClassLoader(ClassLoader* new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -699,7 +692,7 @@ class MANAGED Class FINAL : public Object {
DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
- void SetDexCache(DexCache* new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetDexCache(ObjPtr<DexCache> new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -753,15 +746,16 @@ class MANAGED Class FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive>
- static Method* GetDeclaredMethodInternal(Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args)
+ static ObjPtr<Method> GetDeclaredMethodInternal(Thread* self,
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args)
REQUIRES_SHARED(Locks::mutator_lock_);
+
template <PointerSize kPointerSize, bool kTransactionActive>
- static Constructor* GetDeclaredConstructorInternal(Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args)
+ static ObjPtr<Constructor> GetDeclaredConstructorInternal(Thread* self,
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -894,69 +888,86 @@ class MANAGED Class FINAL : public Object {
ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindInterfaceMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindInterfaceMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindInterfaceMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindDeclaredDirectMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindDirectMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindDirectMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindDeclaredVirtualMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
+ ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
+ ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindVirtualMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindVirtualMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -976,7 +987,8 @@ class MANAGED Class FINAL : public Object {
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE IfTable* GetIfTable() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(ObjPtr<IfTable> new_iftable)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
LengthPrefixedArray<ArtField>* GetIFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1077,32 +1089,34 @@ class MANAGED Class FINAL : public Object {
// Finds the given instance field in this class or a superclass, only searches classes that
// have the same dex cache.
- ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
+ ArtField* FindInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
+ ArtField* FindDeclaredInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or a superclass.
- static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
+ static ArtField* FindStaticField(Thread* self,
+ Handle<Class> klass,
+ const StringPiece& name,
const StringPiece& type)
REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or superclass, only searches classes that
// have the same dex cache.
static ArtField* FindStaticField(Thread* self,
- Class* klass,
- const DexCache* dex_cache,
+ ObjPtr<Class> klass,
+ ObjPtr<DexCache> dex_cache,
uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
+ ArtField* FindDeclaredStaticField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
pid_t GetClinitThreadId() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1148,7 +1162,7 @@ class MANAGED Class FINAL : public Object {
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
- static void SetClassClass(Class* java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClassClass(ObjPtr<Class> java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1178,8 +1192,9 @@ class MANAGED Class FINAL : public Object {
uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
- uint32_t idx)
+ static ObjPtr<Class> GetDirectInterface(Thread* self,
+ Handle<Class> klass,
+ uint32_t idx)
REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1194,7 +1209,9 @@ class MANAGED Class FINAL : public Object {
void AssertInitializedOrInitializingInThread(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_);
- Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
+ Class* CopyOf(Thread* self,
+ int32_t new_length,
+ ImTable* imt,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -1218,8 +1235,9 @@ class MANAGED Class FINAL : public Object {
}
// May cause thread suspension due to EqualParameters.
- ArtMethod* GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
+ ArtMethod* GetDeclaredConstructor(Thread* self,
+ Handle<ObjectArray<Class>> args,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
@@ -1232,7 +1250,7 @@ class MANAGED Class FINAL : public Object {
explicit InitializeClassVisitor(uint32_t class_size) : class_size_(class_size) {
}
- void operator()(mirror::Object* obj, size_t usable_size) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size) const
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -1277,14 +1295,14 @@ class MANAGED Class FINAL : public Object {
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
+ void FixupNativePointers(Class* dest, PointerSize pointer_size, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVerifyError(Object* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetVerifyError(ObjPtr<Object> klass) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
@@ -1300,7 +1318,7 @@ class MANAGED Class FINAL : public Object {
ObjPtr<DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool Implements(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool Implements(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsArrayAssignableFromArray(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAssignableFromArray(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1333,7 +1351,7 @@ class MANAGED Class FINAL : public Object {
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// 'Class' Object Fields
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 445f23fa08..940e82446d 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -48,7 +48,7 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, b
self->ClearException();
}
}
- auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self)));
+ auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self).Ptr()));
if (UNLIKELY(ret.Get() == nullptr)) {
self->AssertPendingOOMException();
return nullptr;
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 71bac7e3d6..7ddadda06b 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -54,12 +54,12 @@ void Method::ResetArrayClass() {
template <PointerSize kPointerSize, bool kTransactionActive>
Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
- auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
+ ObjPtr<Method> ret = ObjPtr<Method>::DownCast(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<Executable*>(ret)->
+ ObjPtr<Executable>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
- return ret;
+ return ret.Ptr();
}
template Method* Method::CreateFromArtMethod<PointerSize::k32, false>(Thread* self,
@@ -106,12 +106,12 @@ void Constructor::VisitRoots(RootVisitor* visitor) {
template <PointerSize kPointerSize, bool kTransactionActive>
Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
- auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
+ ObjPtr<Constructor> ret = ObjPtr<Constructor>::DownCast(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<Executable*>(ret)->
+ ObjPtr<Executable>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
- return ret;
+ return ret.Ptr();
}
template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, false>(
diff --git a/runtime/mirror/method_type.cc b/runtime/mirror/method_type.cc
index ba6ea5e4ff..0b52931b76 100644
--- a/runtime/mirror/method_type.cc
+++ b/runtime/mirror/method_type.cc
@@ -29,7 +29,7 @@ mirror::MethodType* MethodType::Create(Thread* const self,
Handle<ObjectArray<Class>> param_types) {
StackHandleScope<1> hs(self);
Handle<mirror::MethodType> mt(
- hs.NewHandle(static_cast<MethodType*>(StaticClass()->AllocObject(self))));
+ hs.NewHandle(ObjPtr<MethodType>::DownCast(StaticClass()->AllocObject(self))));
// TODO: Do we ever create a MethodType during a transaction ? There doesn't
// seem like a good reason to do a polymorphic invoke that results in the
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 8e497431ba..daee727769 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -43,10 +43,10 @@ Atomic<uint32_t> Object::hash_code_seed(987654321U + std::time(nullptr));
class CopyReferenceFieldsWithReadBarrierVisitor {
public:
- explicit CopyReferenceFieldsWithReadBarrierVisitor(Object* dest_obj)
+ explicit CopyReferenceFieldsWithReadBarrierVisitor(ObjPtr<Object> dest_obj)
: dest_obj_(dest_obj) {}
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
Object* ref = obj->GetFieldObject<Object>(offset);
@@ -55,7 +55,7 @@ class CopyReferenceFieldsWithReadBarrierVisitor {
dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// Copy java.lang.ref.Reference.referent which isn't visited in
// Object::VisitReferences().
@@ -69,31 +69,32 @@ class CopyReferenceFieldsWithReadBarrierVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
private:
- Object* const dest_obj_;
+ ObjPtr<Object> const dest_obj_;
};
-Object* Object::CopyObject(Thread* self,
- mirror::Object* dest,
- mirror::Object* src,
+Object* Object::CopyObject(ObjPtr<mirror::Object> dest,
+ ObjPtr<mirror::Object> src,
size_t num_bytes) {
// Copy instance data. Don't assume memcpy copies by words (b/32012820).
{
const size_t offset = sizeof(Object);
- uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src) + offset;
- uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest) + offset;
+ uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src.Ptr()) + offset;
+ uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest.Ptr()) + offset;
num_bytes -= offset;
DCHECK_ALIGNED(src_bytes, sizeof(uintptr_t));
DCHECK_ALIGNED(dst_bytes, sizeof(uintptr_t));
// Use word sized copies to begin.
while (num_bytes >= sizeof(uintptr_t)) {
- *reinterpret_cast<uintptr_t*>(dst_bytes) = *reinterpret_cast<uintptr_t*>(src_bytes);
+ reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->StoreRelaxed(
+ reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->LoadRelaxed());
src_bytes += sizeof(uintptr_t);
dst_bytes += sizeof(uintptr_t);
num_bytes -= sizeof(uintptr_t);
}
// Copy possible 32 bit word.
if (sizeof(uintptr_t) != sizeof(uint32_t) && num_bytes >= sizeof(uint32_t)) {
- *reinterpret_cast<uint32_t*>(dst_bytes) = *reinterpret_cast<uint32_t*>(src_bytes);
+ reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->StoreRelaxed(
+ reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->LoadRelaxed());
src_bytes += sizeof(uint32_t);
dst_bytes += sizeof(uint32_t);
num_bytes -= sizeof(uint32_t);
@@ -101,7 +102,8 @@ Object* Object::CopyObject(Thread* self,
// Copy remaining bytes, avoid going past the end of num_bytes since there may be a redzone
// there.
while (num_bytes > 0) {
- *reinterpret_cast<uint8_t*>(dst_bytes) = *reinterpret_cast<uint8_t*>(src_bytes);
+ reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->StoreRelaxed(
+ reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->LoadRelaxed());
src_bytes += sizeof(uint8_t);
dst_bytes += sizeof(uint8_t);
num_bytes -= sizeof(uint8_t);
@@ -125,26 +127,21 @@ Object* Object::CopyObject(Thread* self,
} else {
heap->WriteBarrierEveryFieldOf(dest);
}
- if (c->IsFinalizable()) {
- heap->AddFinalizerReference(self, &dest);
- }
- return dest;
+ return dest.Ptr();
}
// An allocation pre-fence visitor that copies the object.
class CopyObjectVisitor {
public:
- CopyObjectVisitor(Thread* self, Handle<Object>* orig, size_t num_bytes)
- : self_(self), orig_(orig), num_bytes_(num_bytes) {
- }
+ CopyObjectVisitor(Handle<Object>* orig, size_t num_bytes)
+ : orig_(orig), num_bytes_(num_bytes) {}
- void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
+ Object::CopyObject(obj, orig_->Get(), num_bytes_);
}
private:
- Thread* const self_;
Handle<Object>* const orig_;
const size_t num_bytes_;
DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
@@ -158,14 +155,17 @@ Object* Object::Clone(Thread* self) {
size_t num_bytes = SizeOf();
StackHandleScope<1> hs(self);
Handle<Object> this_object(hs.NewHandle(this));
- Object* copy;
- CopyObjectVisitor visitor(self, &this_object, num_bytes);
+ ObjPtr<Object> copy;
+ CopyObjectVisitor visitor(&this_object, num_bytes);
if (heap->IsMovableObject(this)) {
copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
} else {
copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
}
- return copy;
+ if (this_object->GetClass()->IsFinalizable()) {
+ heap->AddFinalizerReference(self, &copy);
+ }
+ return copy.Ptr();
}
uint32_t Object::GenerateIdentityHashCode() {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 9ddf99500e..84aa96cbd7 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -609,10 +609,11 @@ class MANAGED LOCKABLE Object {
}
}
- // A utility function that copies an object in a read barrier and
- // write barrier-aware way. This is internally used by Clone() and
- // Class::CopyOf().
- static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
+ // A utility function that copies an object in a read barrier and write barrier-aware way.
+ // This is internally used by Clone() and Class::CopyOf(). If the object is finalizable,
+ // it is the callers job to call Heap::AddFinalizerReference.
+ static Object* CopyObject(ObjPtr<mirror::Object> dest,
+ ObjPtr<mirror::Object> src,
size_t num_bytes)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index c3c523187d..d5bc2564bd 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -131,28 +131,24 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* s
// Perform the memmove using int memmove then perform the write barrier.
static_assert(sizeof(HeapReference<T>) == sizeof(uint32_t),
"art::mirror::HeapReference<T> and uint32_t have different sizes.");
- IntArray* dstAsIntArray = reinterpret_cast<IntArray*>(this);
- IntArray* srcAsIntArray = reinterpret_cast<IntArray*>(src);
- if (kUseReadBarrier) {
- // TODO: Optimize this later?
- const bool copy_forward = (src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count);
- if (copy_forward) {
- // Forward copy.
- for (int i = 0; i < count; ++i) {
- // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
- Object* obj = src->GetWithoutChecks(src_pos + i);
- SetWithoutChecks<false>(dst_pos + i, obj);
- }
- } else {
- // Backward copy.
- for (int i = count - 1; i >= 0; --i) {
- // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
- Object* obj = src->GetWithoutChecks(src_pos + i);
- SetWithoutChecks<false>(dst_pos + i, obj);
- }
+ // TODO: Optimize this later?
+ // We can't use memmove since it does not handle read barriers and may do by per byte copying.
+ // See b/32012820.
+ const bool copy_forward = (src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count);
+ if (copy_forward) {
+ // Forward copy.
+ for (int i = 0; i < count; ++i) {
+ // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+ Object* obj = src->GetWithoutChecks(src_pos + i);
+ SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
}
} else {
- dstAsIntArray->Memmove(dst_pos, srcAsIntArray, src_pos, count);
+ // Backward copy.
+ for (int i = count - 1; i >= 0; --i) {
+ // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+ Object* obj = src->GetWithoutChecks(src_pos + i);
+ SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+ }
}
Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
if (kIsDebugBuild) {
@@ -175,17 +171,13 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* sr
// Perform the memmove using int memcpy then perform the write barrier.
static_assert(sizeof(HeapReference<T>) == sizeof(uint32_t),
"art::mirror::HeapReference<T> and uint32_t have different sizes.");
- IntArray* dstAsIntArray = reinterpret_cast<IntArray*>(this);
- IntArray* srcAsIntArray = reinterpret_cast<IntArray*>(src);
- if (kUseReadBarrier) {
- // TODO: Optimize this later?
- for (int i = 0; i < count; ++i) {
- // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
- T* obj = src->GetWithoutChecks(src_pos + i);
- SetWithoutChecks<false>(dst_pos + i, obj);
- }
- } else {
- dstAsIntArray->Memcpy(dst_pos, srcAsIntArray, src_pos, count);
+ // TODO: Optimize this later?
+ // We can't use memmove since it does not handle read barriers and may do by per byte copying.
+ // See b/32012820.
+ for (int i = 0; i < count; ++i) {
+ // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+ T* obj = src->GetWithoutChecks(src_pos + i);
+ SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
}
Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
if (kIsDebugBuild) {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 062afd31ee..60e2bf81e6 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -139,10 +139,10 @@ TEST_F(ObjectTest, AllocObjectArray) {
ASSERT_TRUE(oa->GetClass() != nullptr);
Handle<mirror::Class> klass(hs.NewHandle(oa->GetClass()));
ASSERT_EQ(2U, klass->NumDirectInterfaces());
- EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
- mirror::Class::GetDirectInterface(soa.Self(), klass, 0));
- EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;"),
- mirror::Class::GetDirectInterface(soa.Self(), klass, 1));
+ EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
+ mirror::Class::GetDirectInterface(soa.Self(), klass, 0));
+ EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;"),
+ mirror::Class::GetDirectInterface(soa.Self(), klass, 1));
}
TEST_F(ObjectTest, AllocArray) {
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index 96f6a53396..e2050cc144 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -42,8 +42,8 @@ void StackTraceElement::ResetClass() {
StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
Handle<String> method_name, Handle<String> file_name,
int32_t line_number) {
- StackTraceElement* trace =
- down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
+ ObjPtr<StackTraceElement> trace =
+ ObjPtr<StackTraceElement>::DownCast(GetStackTraceElement()->AllocObject(self));
if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
trace->Init<true>(declaring_class, method_name, file_name, line_number);
@@ -51,7 +51,7 @@ StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declari
trace->Init<false>(declaring_class, method_name, file_name, line_number);
}
}
- return trace;
+ return trace.Ptr();
}
template<bool kTransactionActive>
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index aea6ff1579..cf902af0c0 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -43,10 +43,10 @@ class SetStringCountVisitor {
explicit SetStringCountVisitor(int32_t count) : count_(count) {
}
- void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
- String* string = down_cast<String*>(obj);
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
string->SetCount(count_);
DCHECK(!string->IsCompressed() || kUseStringCompression);
}
@@ -63,10 +63,10 @@ class SetStringCountAndBytesVisitor {
: count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
}
- void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
- String* string = down_cast<String*>(obj);
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
string->SetCount(count_);
DCHECK(!string->IsCompressed() || kUseStringCompression);
int32_t length = String::GetLengthFromCount(count_);
@@ -99,10 +99,10 @@ class SetStringCountAndValueVisitorFromCharArray {
count_(count), src_array_(src_array), offset_(offset) {
}
- void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
- String* string = down_cast<String*>(obj);
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
string->SetCount(count_);
const uint16_t* const src = src_array_->GetData() + offset_;
const int32_t length = String::GetLengthFromCount(count_);
@@ -131,10 +131,10 @@ class SetStringCountAndValueVisitorFromString {
count_(count), src_string_(src_string), offset_(offset) {
}
- void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
- String* string = down_cast<String*>(obj);
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
string->SetCount(count_);
const int32_t length = String::GetLengthFromCount(count_);
bool compressible = kUseStringCompression && String::GetCompressionFlagFromCount(count_);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 8e81bc98b6..18529561cf 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -254,7 +254,9 @@ static void VMDebug_infopoint(JNIEnv*, jclass, jint id) {
LOG(INFO) << "VMDebug infopoint " << id << " hit";
}
-static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass,
+static jlong VMDebug_countInstancesOfClass(JNIEnv* env,
+ jclass,
+ jclass javaClass,
jboolean countAssignable) {
ScopedObjectAccess soa(env);
gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -263,13 +265,16 @@ static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass
if (c == nullptr) {
return 0;
}
- std::vector<mirror::Class*> classes {c.Ptr()};
+ VariableSizedHandleScope hs(soa.Self());
+ std::vector<Handle<mirror::Class>> classes {hs.NewHandle(c)};
uint64_t count = 0;
heap->CountInstances(classes, countAssignable, &count);
return count;
}
-static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, jclass, jobjectArray javaClasses,
+static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env,
+ jclass,
+ jobjectArray javaClasses,
jboolean countAssignable) {
ScopedObjectAccess soa(env);
gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -279,14 +284,15 @@ static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, jclass, jobjectAr
if (decoded_classes == nullptr) {
return nullptr;
}
- std::vector<mirror::Class*> classes;
+ VariableSizedHandleScope hs(soa.Self());
+ std::vector<Handle<mirror::Class>> classes;
for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) {
- classes.push_back(decoded_classes->Get(i));
+ classes.push_back(hs.NewHandle(decoded_classes->Get(i)));
}
std::vector<uint64_t> counts(classes.size(), 0u);
// Heap::CountInstances can handle null and will put 0 for these classes.
heap->CountInstances(classes, countAssignable, &counts[0]);
- auto* long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size());
+ ObjPtr<mirror::LongArray> long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size());
if (long_counts == nullptr) {
soa.Self()->AssertPendingOOMException();
return nullptr;
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 2a5c04d54b..75631616e7 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -278,7 +278,7 @@ static mirror::Field* GetPublicFieldRecursive(
uint32_t num_direct_interfaces = h_clazz->NumDirectInterfaces();
for (uint32_t i = 0; i < num_direct_interfaces; i++) {
- mirror::Class *iface = mirror::Class::GetDirectInterface(self, h_clazz, i);
+ ObjPtr<mirror::Class> iface = mirror::Class::GetDirectInterface(self, h_clazz, i);
if (UNLIKELY(iface == nullptr)) {
self->AssertPendingException();
return nullptr;
@@ -403,11 +403,12 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
ScopedFastNativeObjectAccess soa(env);
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- mirror::Method* result = mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
- soa.Self(),
- DecodeClass(soa, javaThis).Ptr(),
- soa.Decode<mirror::String>(name).Ptr(),
- soa.Decode<mirror::ObjectArray<mirror::Class>>(args).Ptr());
+ ObjPtr<mirror::Method> result =
+ mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
+ soa.Self(),
+ DecodeClass(soa, javaThis),
+ soa.Decode<mirror::String>(name),
+ soa.Decode<mirror::ObjectArray<mirror::Class>>(args));
return soa.AddLocalReference<jobject>(result);
}
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index f3756a23c7..3f5fa73b45 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -35,7 +35,8 @@ namespace art {
* References are never torn regardless of the number of bits used to represent them.
*/
-static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
+static void ThrowArrayStoreException_NotAnArray(const char* identifier,
+ ObjPtr<mirror::Object> array)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
@@ -62,12 +63,12 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
// Make sure source and destination are both arrays.
ObjPtr<mirror::Object> srcObject = soa.Decode<mirror::Object>(javaSrc);
if (UNLIKELY(!srcObject->IsArrayInstance())) {
- ThrowArrayStoreException_NotAnArray("source", srcObject.Ptr());
+ ThrowArrayStoreException_NotAnArray("source", srcObject);
return;
}
ObjPtr<mirror::Object> dstObject = soa.Decode<mirror::Object>(javaDst);
if (UNLIKELY(!dstObject->IsArrayInstance())) {
- ThrowArrayStoreException_NotAnArray("destination", dstObject.Ptr());
+ ThrowArrayStoreException_NotAnArray("destination", dstObject);
return;
}
mirror::Array* srcArray = srcObject->AsArray();
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 7de0147103..505f85d94c 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -104,7 +104,7 @@ static jobject Constructor_newInstance0(JNIEnv* env, jobject javaMethod, jobject
return InvokeMethod(soa, javaMethod, nullptr, javaArgs, 2);
}
- mirror::Object* receiver =
+ ObjPtr<mirror::Object> receiver =
movable ? c->AllocObject(soa.Self()) : c->AllocNonMovableObject(soa.Self());
if (receiver == nullptr) {
return nullptr;
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index c20c8b88a4..6b9468d3e1 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -296,10 +296,10 @@ void DumpNativeStack(std::ostream& os,
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
os << prefix << "(backtrace::Unwind failed for thread " << tid
- << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")\n";
+ << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")" << std::endl;
return;
} else if (backtrace->NumFrames() == 0) {
- os << prefix << "(no native stack frames for thread " << tid << ")\n";
+ os << prefix << "(no native stack frames for thread " << tid << ")" << std::endl;
return;
}
@@ -354,7 +354,7 @@ void DumpNativeStack(std::ostream& os,
}
os << ")";
}
- os << "\n";
+ os << std::endl;
if (try_addr2line && use_addr2line) {
Addr2line(it->map.name, it->pc - it->map.start, os, prefix, &addr2line_state);
}
@@ -395,7 +395,7 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
if (include_count) {
os << StringPrintf("#%02zd ", i);
}
- os << text << "\n";
+ os << text << std::endl;
}
}
diff --git a/runtime/obj_ptr-inl.h b/runtime/obj_ptr-inl.h
index f0a5f6f2e2..d0be6dc981 100644
--- a/runtime/obj_ptr-inl.h
+++ b/runtime/obj_ptr-inl.h
@@ -33,7 +33,7 @@ inline bool ObjPtr<MirrorType, kPoison>::IsValid() const {
template<class MirrorType, bool kPoison>
inline void ObjPtr<MirrorType, kPoison>::AssertValid() const {
if (kPoison) {
- CHECK(IsValid()) << "Stale object pointer " << Ptr() << " , expected cookie "
+ CHECK(IsValid()) << "Stale object pointer " << PtrUnchecked() << " , expected cookie "
<< TrimCookie(Thread::Current()->GetPoisonObjectCookie()) << " but got " << GetCookie();
}
}
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index d7a6c0a86c..de6683cc83 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -17,7 +17,10 @@ cc_defaults {
name: "libopenjdkjvmti_defaults",
defaults: ["art_defaults"],
host_supported: true,
- srcs: ["OpenjdkJvmTi.cc",
+ srcs: ["events.cc",
+ "heap.cc",
+ "object_tagging.cc",
+ "OpenjdkJvmTi.cc",
"transform.cc"],
include_dirs: ["art/runtime"],
shared_libs: [
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index bea40c8781..05da585b3a 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -37,12 +37,16 @@
#include "openjdkjvmti/jvmti.h"
#include "art_jvmti.h"
+#include "base/mutex.h"
+#include "events-inl.h"
+#include "heap.h"
#include "jni_env_ext-inl.h"
#include "object_tagging.h"
#include "obj_ptr-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread_list.h"
+#include "thread-inl.h"
#include "transform.h"
// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
@@ -51,7 +55,8 @@
namespace openjdkjvmti {
-ObjectTagTable gObjectTagTable;
+EventHandler gEventHandler;
+ObjectTagTable gObjectTagTable(&gEventHandler);
class JvmtiFunctions {
private:
@@ -272,7 +277,8 @@ class JvmtiFunctions {
jclass klass,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
- return ERR(NOT_IMPLEMENTED);
+ HeapUtil heap_util(&gObjectTagTable);
+ return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
static jvmtiError GetTag(jvmtiEnv* env, jobject object, jlong* tag_ptr) {
@@ -731,10 +737,33 @@ class JvmtiFunctions {
return ERR(NOT_IMPLEMENTED);
}
+ // TODO: This will require locking, so that an agent can't remove callbacks when we're dispatching
+ // an event.
static jvmtiError SetEventCallbacks(jvmtiEnv* env,
const jvmtiEventCallbacks* callbacks,
jint size_of_callbacks) {
- return ERR(NOT_IMPLEMENTED);
+ if (env == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+ if (size_of_callbacks < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+
+ if (callbacks == nullptr) {
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->event_callbacks.reset();
+ return ERR(NONE);
+ }
+
+ std::unique_ptr<jvmtiEventCallbacks> tmp(new jvmtiEventCallbacks());
+ memset(tmp.get(), 0, sizeof(jvmtiEventCallbacks));
+ size_t copy_size = std::min(sizeof(jvmtiEventCallbacks),
+ static_cast<size_t>(size_of_callbacks));
+ copy_size = art::RoundDown(copy_size, sizeof(void*));
+ memcpy(tmp.get(), callbacks, copy_size);
+
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->event_callbacks = std::move(tmp);
+
+ return ERR(NONE);
}
static jvmtiError SetEventNotificationMode(jvmtiEnv* env,
@@ -742,7 +771,21 @@ class JvmtiFunctions {
jvmtiEvent event_type,
jthread event_thread,
...) {
- return ERR(NOT_IMPLEMENTED);
+ art::Thread* art_thread = nullptr;
+ if (event_thread != nullptr) {
+ // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art_thread = art::Thread::FromManagedThread(soa, event_thread);
+
+ if (art_thread == nullptr || // The thread hasn't been started or is already dead.
+ art_thread->IsStillStarting()) {
+ // TODO: We may want to let the EventHandler know, so it could clean up masks, potentially.
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ }
+
+ return gEventHandler.SetEvent(ArtJvmTiEnv::AsArtJvmTiEnv(env), art_thread, event_type, mode);
}
static jvmtiError GenerateEvents(jvmtiEnv* env, jvmtiEvent event_type) {
@@ -1018,6 +1061,8 @@ static bool IsJvmtiVersion(jint version) {
static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm);
*new_jvmtiEnv = env;
+
+ gEventHandler.RegisterArtJvmTiEnv(env);
}
// A hook that the runtime uses to allow plugins to handle GetEnv calls. It returns true and
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index a2c6882ac1..66d093782a 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -32,8 +32,12 @@
#ifndef ART_RUNTIME_OPENJDKJVMTI_ART_JVMTI_H_
#define ART_RUNTIME_OPENJDKJVMTI_ART_JVMTI_H_
+#include <memory>
+
#include <jni.h>
+#include "base/casts.h"
+#include "events.h"
#include "java_vm_ext.h"
#include "jni_env_ext.h"
#include "jvmti.h"
@@ -47,9 +51,16 @@ struct ArtJvmTiEnv : public jvmtiEnv {
art::JavaVMExt* art_vm;
void* local_data;
+ EventMasks event_masks;
+ std::unique_ptr<jvmtiEventCallbacks> event_callbacks;
+
explicit ArtJvmTiEnv(art::JavaVMExt* runtime) : art_vm(runtime), local_data(nullptr) {
functions = &gJvmtiInterface;
}
+
+ static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
+ return art::down_cast<ArtJvmTiEnv*>(env);
+ }
};
// Macro and constexpr to make error values less annoying to write.
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
new file mode 100644
index 0000000000..d0272010d4
--- /dev/null
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_EVENTS_INL_H_
+#define ART_RUNTIME_OPENJDKJVMTI_EVENTS_INL_H_
+
+#include "events.h"
+
+#include "art_jvmti.h"
+
+namespace openjdkjvmti {
+
+template <typename FnType>
+ALWAYS_INLINE static inline FnType* GetCallback(ArtJvmTiEnv* env, jvmtiEvent event) {
+ if (env->event_callbacks == nullptr) {
+ return nullptr;
+ }
+
+ // TODO: Add a type check. Can be done, for example, by an explicitly instantiated template
+ // function.
+
+ switch (event) {
+ case JVMTI_EVENT_VM_INIT:
+ return reinterpret_cast<FnType*>(env->event_callbacks->VMInit);
+ case JVMTI_EVENT_VM_DEATH:
+ return reinterpret_cast<FnType*>(env->event_callbacks->VMDeath);
+ case JVMTI_EVENT_THREAD_START:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ThreadStart);
+ case JVMTI_EVENT_THREAD_END:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ThreadEnd);
+ case JVMTI_EVENT_CLASS_FILE_LOAD_HOOK:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ClassFileLoadHook);
+ case JVMTI_EVENT_CLASS_LOAD:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ClassLoad);
+ case JVMTI_EVENT_CLASS_PREPARE:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ClassPrepare);
+ case JVMTI_EVENT_VM_START:
+ return reinterpret_cast<FnType*>(env->event_callbacks->VMStart);
+ case JVMTI_EVENT_EXCEPTION:
+ return reinterpret_cast<FnType*>(env->event_callbacks->Exception);
+ case JVMTI_EVENT_EXCEPTION_CATCH:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ExceptionCatch);
+ case JVMTI_EVENT_SINGLE_STEP:
+ return reinterpret_cast<FnType*>(env->event_callbacks->SingleStep);
+ case JVMTI_EVENT_FRAME_POP:
+ return reinterpret_cast<FnType*>(env->event_callbacks->FramePop);
+ case JVMTI_EVENT_BREAKPOINT:
+ return reinterpret_cast<FnType*>(env->event_callbacks->Breakpoint);
+ case JVMTI_EVENT_FIELD_ACCESS:
+ return reinterpret_cast<FnType*>(env->event_callbacks->FieldAccess);
+ case JVMTI_EVENT_FIELD_MODIFICATION:
+ return reinterpret_cast<FnType*>(env->event_callbacks->FieldModification);
+ case JVMTI_EVENT_METHOD_ENTRY:
+ return reinterpret_cast<FnType*>(env->event_callbacks->MethodEntry);
+ case JVMTI_EVENT_METHOD_EXIT:
+ return reinterpret_cast<FnType*>(env->event_callbacks->MethodExit);
+ case JVMTI_EVENT_NATIVE_METHOD_BIND:
+ return reinterpret_cast<FnType*>(env->event_callbacks->NativeMethodBind);
+ case JVMTI_EVENT_COMPILED_METHOD_LOAD:
+ return reinterpret_cast<FnType*>(env->event_callbacks->CompiledMethodLoad);
+ case JVMTI_EVENT_COMPILED_METHOD_UNLOAD:
+ return reinterpret_cast<FnType*>(env->event_callbacks->CompiledMethodUnload);
+ case JVMTI_EVENT_DYNAMIC_CODE_GENERATED:
+ return reinterpret_cast<FnType*>(env->event_callbacks->DynamicCodeGenerated);
+ case JVMTI_EVENT_DATA_DUMP_REQUEST:
+ return reinterpret_cast<FnType*>(env->event_callbacks->DataDumpRequest);
+ case JVMTI_EVENT_MONITOR_WAIT:
+ return reinterpret_cast<FnType*>(env->event_callbacks->MonitorWait);
+ case JVMTI_EVENT_MONITOR_WAITED:
+ return reinterpret_cast<FnType*>(env->event_callbacks->MonitorWaited);
+ case JVMTI_EVENT_MONITOR_CONTENDED_ENTER:
+ return reinterpret_cast<FnType*>(env->event_callbacks->MonitorContendedEnter);
+ case JVMTI_EVENT_MONITOR_CONTENDED_ENTERED:
+ return reinterpret_cast<FnType*>(env->event_callbacks->MonitorContendedEntered);
+ case JVMTI_EVENT_RESOURCE_EXHAUSTED:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ResourceExhausted);
+ case JVMTI_EVENT_GARBAGE_COLLECTION_START:
+ return reinterpret_cast<FnType*>(env->event_callbacks->GarbageCollectionStart);
+ case JVMTI_EVENT_GARBAGE_COLLECTION_FINISH:
+ return reinterpret_cast<FnType*>(env->event_callbacks->GarbageCollectionFinish);
+ case JVMTI_EVENT_OBJECT_FREE:
+ return reinterpret_cast<FnType*>(env->event_callbacks->ObjectFree);
+ case JVMTI_EVENT_VM_OBJECT_ALLOC:
+ return reinterpret_cast<FnType*>(env->event_callbacks->VMObjectAlloc);
+ }
+ return nullptr;
+}
+
+template <typename ...Args>
+inline void EventHandler::DispatchEvent(art::Thread* thread, jvmtiEvent event, Args... args) {
+ using FnType = void(jvmtiEnv*, Args...);
+ for (ArtJvmTiEnv* env : envs) {
+ bool dispatch = env->event_masks.global_event_mask.Test(event);
+
+ if (!dispatch && thread != nullptr && env->event_masks.unioned_thread_event_mask.Test(event)) {
+ EventMask* mask = env->event_masks.GetEventMaskOrNull(thread);
+ dispatch = mask != nullptr && mask->Test(event);
+ }
+
+ if (dispatch) {
+ FnType* callback = GetCallback<FnType>(env, event);
+ if (callback != nullptr) {
+ (*callback)(env, args...);
+ }
+ }
+ }
+}
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_EVENTS_INL_H_
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
new file mode 100644
index 0000000000..450f85e51a
--- /dev/null
+++ b/runtime/openjdkjvmti/events.cc
@@ -0,0 +1,249 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "events-inl.h"
+
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "gc/allocation_listener.h"
+#include "instrumentation.h"
+#include "jni_env_ext-inl.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+
+namespace openjdkjvmti {
+
+EventMask& EventMasks::GetEventMask(art::Thread* thread) {
+ if (thread == nullptr) {
+ return global_event_mask;
+ }
+
+ for (auto& pair : thread_event_masks) {
+ const UniqueThread& unique_thread = pair.first;
+ if (unique_thread.first == thread &&
+ unique_thread.second == static_cast<uint32_t>(thread->GetTid())) {
+ return pair.second;
+ }
+ }
+
+ // TODO: Remove old UniqueThread with the same pointer, if exists.
+
+ thread_event_masks.emplace_back(UniqueThread(thread, thread->GetTid()), EventMask());
+ return thread_event_masks.back().second;
+}
+
+EventMask* EventMasks::GetEventMaskOrNull(art::Thread* thread) {
+ if (thread == nullptr) {
+ return &global_event_mask;
+ }
+
+ for (auto& pair : thread_event_masks) {
+ const UniqueThread& unique_thread = pair.first;
+ if (unique_thread.first == thread &&
+ unique_thread.second == static_cast<uint32_t>(thread->GetTid())) {
+ return &pair.second;
+ }
+ }
+
+ return nullptr;
+}
+
+
+void EventMasks::EnableEvent(art::Thread* thread, jvmtiEvent event) {
+ DCHECK(EventMask::EventIsInRange(event));
+ GetEventMask(thread).Set(event);
+ if (thread != nullptr) {
+ unioned_thread_event_mask.Set(event, true);
+ }
+}
+
+void EventMasks::DisableEvent(art::Thread* thread, jvmtiEvent event) {
+ DCHECK(EventMask::EventIsInRange(event));
+ GetEventMask(thread).Set(event, false);
+ if (thread != nullptr) {
+ // Regenerate union for the event.
+ bool union_value = false;
+ for (auto& pair : thread_event_masks) {
+ union_value |= pair.second.Test(event);
+ if (union_value) {
+ break;
+ }
+ }
+ unioned_thread_event_mask.Set(event, union_value);
+ }
+}
+
+void EventHandler::RegisterArtJvmTiEnv(ArtJvmTiEnv* env) {
+ envs.push_back(env);
+}
+
+static bool IsThreadControllable(jvmtiEvent event) {
+ switch (event) {
+ case JVMTI_EVENT_VM_INIT:
+ case JVMTI_EVENT_VM_START:
+ case JVMTI_EVENT_VM_DEATH:
+ case JVMTI_EVENT_THREAD_START:
+ case JVMTI_EVENT_COMPILED_METHOD_LOAD:
+ case JVMTI_EVENT_COMPILED_METHOD_UNLOAD:
+ case JVMTI_EVENT_DYNAMIC_CODE_GENERATED:
+ case JVMTI_EVENT_DATA_DUMP_REQUEST:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+class JvmtiAllocationListener : public art::gc::AllocationListener {
+ public:
+ explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
+
+ void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK_EQ(self, art::Thread::Current());
+
+ if (handler_->IsEventEnabledAnywhere(JVMTI_EVENT_VM_OBJECT_ALLOC)) {
+ art::StackHandleScope<1> hs(self);
+ auto h = hs.NewHandleWrapper(obj);
+ // jvmtiEventVMObjectAlloc parameters:
+ // jvmtiEnv *jvmti_env,
+ // JNIEnv* jni_env,
+ // jthread thread,
+ // jobject object,
+ // jclass object_klass,
+ // jlong size
+ art::JNIEnvExt* jni_env = self->GetJniEnv();
+
+ jthread thread_peer;
+ if (self->IsStillStarting()) {
+ thread_peer = nullptr;
+ } else {
+ thread_peer = jni_env->AddLocalReference<jthread>(self->GetPeer());
+ }
+
+ ScopedLocalRef<jthread> thread(jni_env, thread_peer);
+ ScopedLocalRef<jobject> object(
+ jni_env, jni_env->AddLocalReference<jobject>(*obj));
+ ScopedLocalRef<jclass> klass(
+ jni_env, jni_env->AddLocalReference<jclass>(obj->Ptr()->GetClass()));
+
+ handler_->DispatchEvent(self,
+ JVMTI_EVENT_VM_OBJECT_ALLOC,
+ jni_env,
+ thread.get(),
+ object.get(),
+ klass.get(),
+ byte_count);
+ }
+ }
+
+ private:
+ EventHandler* handler_;
+};
+
+static void SetupObjectAllocationTracking(art::gc::AllocationListener* listener, bool enable) {
+ if (enable) {
+ art::Runtime::Current()->GetHeap()->SetAllocationListener(listener);
+ } else {
+ art::Runtime::Current()->GetHeap()->RemoveAllocationListener();
+ }
+}
+
+// Handle special work for the given event type, if necessary.
+void EventHandler::HandleEventType(jvmtiEvent event, bool enable) {
+ if (event == JVMTI_EVENT_VM_OBJECT_ALLOC) {
+ SetupObjectAllocationTracking(alloc_listener_.get(), enable);
+ return;
+ }
+}
+
+jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
+ art::Thread* thread,
+ jvmtiEvent event,
+ jvmtiEventMode mode) {
+ if (thread != nullptr) {
+ art::ThreadState state = thread->GetState();
+ if (state == art::ThreadState::kStarting ||
+ state == art::ThreadState::kTerminated ||
+ thread->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ if (!IsThreadControllable(event)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ }
+
+ // TODO: Capability check.
+
+ if (mode != JVMTI_ENABLE && mode != JVMTI_DISABLE) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+
+ if (!EventMask::EventIsInRange(event)) {
+ return ERR(INVALID_EVENT_TYPE);
+ }
+
+ if (mode == JVMTI_ENABLE) {
+ env->event_masks.EnableEvent(thread, event);
+ global_mask.Set(event);
+ } else {
+ DCHECK_EQ(mode, JVMTI_DISABLE);
+
+ env->event_masks.DisableEvent(thread, event);
+
+ // Gotta recompute the global mask.
+ bool union_value = false;
+ for (const ArtJvmTiEnv* stored_env : envs) {
+ union_value |= stored_env->event_masks.global_event_mask.Test(event);
+ union_value |= stored_env->event_masks.unioned_thread_event_mask.Test(event);
+ if (union_value) {
+ break;
+ }
+ }
+ global_mask.Set(event, union_value);
+ }
+
+ // Handle any special work required for the event type.
+ HandleEventType(event, mode == JVMTI_ENABLE);
+
+ return ERR(NONE);
+}
+
+EventHandler::EventHandler() {
+ alloc_listener_.reset(new JvmtiAllocationListener(this));
+}
+
+EventHandler::~EventHandler() {
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
new file mode 100644
index 0000000000..3212b12a54
--- /dev/null
+++ b/runtime/openjdkjvmti/events.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_EVENTS_H_
+#define ART_RUNTIME_OPENJDKJVMTI_EVENTS_H_
+
+#include <bitset>
+#include <vector>
+
+#include "base/logging.h"
+#include "jvmti.h"
+#include "thread.h"
+
+namespace openjdkjvmti {
+
+struct ArtJvmTiEnv;
+class JvmtiAllocationListener;
+
+struct EventMask {
+ static constexpr size_t kEventsSize = JVMTI_MAX_EVENT_TYPE_VAL - JVMTI_MIN_EVENT_TYPE_VAL + 1;
+ std::bitset<kEventsSize> bit_set;
+
+ static bool EventIsInRange(jvmtiEvent event) {
+ return event >= JVMTI_MIN_EVENT_TYPE_VAL && event <= JVMTI_MAX_EVENT_TYPE_VAL;
+ }
+
+ void Set(jvmtiEvent event, bool value = true) {
+ DCHECK(EventIsInRange(event));
+ bit_set.set(event - JVMTI_MIN_EVENT_TYPE_VAL, value);
+ }
+
+ bool Test(jvmtiEvent event) const {
+ DCHECK(EventIsInRange(event));
+ return bit_set.test(event - JVMTI_MIN_EVENT_TYPE_VAL);
+ }
+};
+
+struct EventMasks {
+ // The globally enabled events.
+ EventMask global_event_mask;
+
+ // The per-thread enabled events.
+
+ // It is not enough to store a Thread pointer, as these may be reused. Use the pointer and the
+ // thread id.
+ // Note: We could just use the tid like tracing does.
+ using UniqueThread = std::pair<art::Thread*, uint32_t>;
+ // TODO: Native thread objects are immovable, so we can use them as keys in an (unordered) map,
+ // if necessary.
+ std::vector<std::pair<UniqueThread, EventMask>> thread_event_masks;
+
+ // A union of the per-thread events, for fast-pathing.
+ EventMask unioned_thread_event_mask;
+
+ EventMask& GetEventMask(art::Thread* thread);
+ EventMask* GetEventMaskOrNull(art::Thread* thread);
+ void EnableEvent(art::Thread* thread, jvmtiEvent event);
+ void DisableEvent(art::Thread* thread, jvmtiEvent event);
+};
+
+// Helper class for event handling.
+class EventHandler {
+ public:
+ EventHandler();
+ ~EventHandler();
+
+ // Register an env. It is assumed that this happens on env creation, that is, no events are
+ // enabled, yet.
+ void RegisterArtJvmTiEnv(ArtJvmTiEnv* env);
+
+ bool IsEventEnabledAnywhere(jvmtiEvent event) {
+ if (!EventMask::EventIsInRange(event)) {
+ return false;
+ }
+ return global_mask.Test(event);
+ }
+
+ jvmtiError SetEvent(ArtJvmTiEnv* env, art::Thread* thread, jvmtiEvent event, jvmtiEventMode mode);
+
+ template <typename ...Args>
+ ALWAYS_INLINE inline void DispatchEvent(art::Thread* thread, jvmtiEvent event, Args... args);
+
+ private:
+ void HandleEventType(jvmtiEvent event, bool enable);
+
+ // List of all JvmTiEnv objects that have been created, in their creation order.
+ std::vector<ArtJvmTiEnv*> envs;
+
+ // A union of all enabled events, anywhere.
+ EventMask global_mask;
+
+ std::unique_ptr<JvmtiAllocationListener> alloc_listener_;
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_EVENTS_H_
diff --git a/runtime/openjdkjvmti/heap.cc b/runtime/openjdkjvmti/heap.cc
new file mode 100644
index 0000000000..95d9a1d315
--- /dev/null
+++ b/runtime/openjdkjvmti/heap.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heap.h"
+
+#include "art_jvmti.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/heap.h"
+#include "mirror/class.h"
+#include "object_callbacks.h"
+#include "object_tagging.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace openjdkjvmti {
+
+struct IterateThroughHeapData {
+ IterateThroughHeapData(HeapUtil* _heap_util,
+ jint heap_filter,
+ art::ObjPtr<art::mirror::Class> klass,
+ const jvmtiHeapCallbacks* _callbacks,
+ const void* _user_data)
+ : heap_util(_heap_util),
+ filter_klass(klass),
+ callbacks(_callbacks),
+ user_data(_user_data),
+ filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
+ filter_out_untagged((heap_filter & JVMTI_HEAP_FILTER_UNTAGGED) != 0),
+ filter_out_class_tagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_TAGGED) != 0),
+ filter_out_class_untagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_UNTAGGED) != 0),
+ any_filter(filter_out_tagged ||
+ filter_out_untagged ||
+ filter_out_class_tagged ||
+ filter_out_class_untagged),
+ stop_reports(false) {
+ }
+
+ bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) {
+ if (!any_filter) {
+ return true;
+ }
+
+ if ((tag == 0 && filter_out_untagged) || (tag != 0 && filter_out_tagged)) {
+ return false;
+ }
+
+ if ((class_tag == 0 && filter_out_class_untagged) ||
+ (class_tag != 0 && filter_out_class_tagged)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ HeapUtil* heap_util;
+ art::ObjPtr<art::mirror::Class> filter_klass;
+ const jvmtiHeapCallbacks* callbacks;
+ const void* user_data;
+ const bool filter_out_tagged;
+ const bool filter_out_untagged;
+ const bool filter_out_class_tagged;
+ const bool filter_out_class_untagged;
+ const bool any_filter;
+
+ bool stop_reports;
+};
+
+static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ IterateThroughHeapData* ithd = reinterpret_cast<IterateThroughHeapData*>(arg);
+ // Early return, as we can't really stop visiting.
+ if (ithd->stop_reports) {
+ return;
+ }
+
+ art::ScopedAssertNoThreadSuspension no_suspension("IterateThroughHeapCallback");
+
+ jlong tag = 0;
+ ithd->heap_util->GetTags()->GetTag(obj, &tag);
+
+ jlong class_tag = 0;
+ art::ObjPtr<art::mirror::Class> klass = obj->GetClass();
+ ithd->heap_util->GetTags()->GetTag(klass.Ptr(), &class_tag);
+ // For simplicity, even if we find a tag = 0, assume 0 = not tagged.
+
+ if (!ithd->ShouldReportByHeapFilter(tag, class_tag)) {
+ return;
+ }
+
+ // TODO: Handle array_primitive_value_callback.
+
+ if (ithd->filter_klass != nullptr) {
+ if (ithd->filter_klass != klass) {
+ return;
+ }
+ }
+
+ jlong size = obj->SizeOf();
+
+ jint length = -1;
+ if (obj->IsArrayInstance()) {
+ length = obj->AsArray()->GetLength();
+ }
+
+ jlong saved_tag = tag;
+ jint ret = ithd->callbacks->heap_iteration_callback(class_tag,
+ size,
+ &tag,
+ length,
+ const_cast<void*>(ithd->user_data));
+
+ if (tag != saved_tag) {
+ ithd->heap_util->GetTags()->Set(obj, tag);
+ }
+
+ ithd->stop_reports = (ret & JVMTI_VISIT_ABORT) != 0;
+
+ // TODO Implement array primitive and string primitive callback.
+ // TODO Implement primitive field callback.
+}
+
+jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass,
+ const jvmtiHeapCallbacks* callbacks,
+ const void* user_data) {
+ if (callbacks == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ if (callbacks->array_primitive_value_callback != nullptr) {
+ // TODO: Implement.
+ return ERR(NOT_IMPLEMENTED);
+ }
+
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
+
+ IterateThroughHeapData ithd(this,
+ heap_filter,
+ soa.Decode<art::mirror::Class>(klass),
+ callbacks,
+ user_data);
+
+ art::Runtime::Current()->GetHeap()->VisitObjects(IterateThroughHeapObjectCallback, &ithd);
+
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/heap.h b/runtime/openjdkjvmti/heap.h
new file mode 100644
index 0000000000..fb9a2164ae
--- /dev/null
+++ b/runtime/openjdkjvmti/heap.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_HEAP_H_
+#define ART_RUNTIME_OPENJDKJVMTI_HEAP_H_
+
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class ObjectTagTable;
+
+class HeapUtil {
+ public:
+ explicit HeapUtil(ObjectTagTable* tags) : tags_(tags) {
+ }
+
+ jvmtiError IterateThroughHeap(jvmtiEnv* env,
+ jint heap_filter,
+ jclass klass,
+ const jvmtiHeapCallbacks* callbacks,
+ const void* user_data);
+
+ ObjectTagTable* GetTags() {
+ return tags_;
+ }
+
+ private:
+ ObjectTagTable* tags_;
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_HEAP_H_
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
new file mode 100644
index 0000000000..29d483094b
--- /dev/null
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -0,0 +1,154 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "object_tagging.h"
+
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "events-inl.h"
+#include "gc/allocation_listener.h"
+#include "instrumentation.h"
+#include "jni_env_ext-inl.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+
+namespace openjdkjvmti {
+
+void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ if (first_free_ == tagged_objects_.size()) {
+ tagged_objects_.push_back(Entry(art::GcRoot<art::mirror::Object>(obj), tag));
+ first_free_++;
+ } else {
+ DCHECK_LT(first_free_, tagged_objects_.size());
+ DCHECK(tagged_objects_[first_free_].first.IsNull());
+ tagged_objects_[first_free_] = Entry(art::GcRoot<art::mirror::Object>(obj), tag);
+ // TODO: scan for free elements.
+ first_free_ = tagged_objects_.size();
+ }
+}
+
+bool ObjectTagTable::Remove(art::mirror::Object* obj, jlong* tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ for (auto it = tagged_objects_.begin(); it != tagged_objects_.end(); ++it) {
+ if (it->first.Read(nullptr) == obj) {
+ if (tag != nullptr) {
+ *tag = it->second;
+ }
+ it->first = art::GcRoot<art::mirror::Object>(nullptr);
+
+ size_t index = it - tagged_objects_.begin();
+ if (index < first_free_) {
+ first_free_ = index;
+ }
+
+ // TODO: compaction.
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ for (auto& pair : tagged_objects_) {
+ if (pair.first.Read(nullptr) == obj) {
+ pair.second = new_tag;
+ return true;
+ }
+ }
+
+ // TODO refactor with Add.
+ if (first_free_ == tagged_objects_.size()) {
+ tagged_objects_.push_back(Entry(art::GcRoot<art::mirror::Object>(obj), new_tag));
+ first_free_++;
+ } else {
+ DCHECK_LT(first_free_, tagged_objects_.size());
+ DCHECK(tagged_objects_[first_free_].first.IsNull());
+ tagged_objects_[first_free_] = Entry(art::GcRoot<art::mirror::Object>(obj), new_tag);
+ // TODO: scan for free elements.
+ first_free_ = tagged_objects_.size();
+ }
+
+ return false;
+}
+
+void ObjectTagTable::Sweep(art::IsMarkedVisitor* visitor) {
+ if (event_handler_->IsEventEnabledAnywhere(JVMTI_EVENT_OBJECT_FREE)) {
+ SweepImpl<true>(visitor);
+ } else {
+ SweepImpl<false>(visitor);
+ }
+}
+
+template <bool kHandleNull>
+void ObjectTagTable::SweepImpl(art::IsMarkedVisitor* visitor) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+
+ for (auto it = tagged_objects_.begin(); it != tagged_objects_.end(); ++it) {
+ if (!it->first.IsNull()) {
+ art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
+ art::mirror::Object* target_obj = visitor->IsMarked(original_obj);
+ if (original_obj != target_obj) {
+ it->first = art::GcRoot<art::mirror::Object>(target_obj);
+
+ if (kHandleNull && target_obj == nullptr) {
+ HandleNullSweep(it->second);
+ }
+ }
+ } else {
+ size_t index = it - tagged_objects_.begin();
+ if (index < first_free_) {
+ first_free_ = index;
+ }
+ }
+ }
+}
+
+void ObjectTagTable::HandleNullSweep(jlong tag) {
+ event_handler_->DispatchEvent(nullptr, JVMTI_EVENT_OBJECT_FREE, tag);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 523e15f6df..b399e653ee 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -25,29 +25,26 @@
namespace openjdkjvmti {
+class EventHandler;
+
class ObjectTagTable : public art::gc::SystemWeakHolder {
public:
- ObjectTagTable() : art::gc::SystemWeakHolder(art::LockLevel::kAllocTrackerLock) {
+ explicit ObjectTagTable(EventHandler* event_handler)
+ : art::gc::SystemWeakHolder(art::LockLevel::kAllocTrackerLock),
+ event_handler_(event_handler) {
}
void Add(art::mirror::Object* obj, jlong tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
+ REQUIRES(!allow_disallow_lock_);
- if (first_free_ == tagged_objects_.size()) {
- tagged_objects_.push_back(Entry(art::GcRoot<art::mirror::Object>(obj), tag));
- first_free_++;
- } else {
- DCHECK_LT(first_free_, tagged_objects_.size());
- DCHECK(tagged_objects_[first_free_].first.IsNull());
- tagged_objects_[first_free_] = Entry(art::GcRoot<art::mirror::Object>(obj), tag);
- // TODO: scan for free elements.
- first_free_ = tagged_objects_.size();
- }
- }
+ bool Remove(art::mirror::Object* obj, jlong* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ bool Set(art::mirror::Object* obj, jlong tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
bool GetTag(art::mirror::Object* obj, jlong* result)
REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -66,70 +63,23 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
return false;
}
- bool Remove(art::mirror::Object* obj, jlong* tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- for (auto it = tagged_objects_.begin(); it != tagged_objects_.end(); ++it) {
- if (it->first.Read(nullptr) == obj) {
- if (tag != nullptr) {
- *tag = it->second;
- }
- it->first = art::GcRoot<art::mirror::Object>(nullptr);
-
- size_t index = it - tagged_objects_.begin();
- if (index < first_free_) {
- first_free_ = index;
- }
-
- // TODO: compaction.
-
- return true;
- }
- }
-
- return false;
- }
-
void Sweep(art::IsMarkedVisitor* visitor)
REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
-
- for (auto it = tagged_objects_.begin(); it != tagged_objects_.end(); ++it) {
- if (!it->first.IsNull()) {
- art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
- art::mirror::Object* target_obj = visitor->IsMarked(original_obj);
- if (original_obj != target_obj) {
- it->first = art::GcRoot<art::mirror::Object>(target_obj);
-
- if (target_obj == nullptr) {
- HandleNullSweep(it->second);
- }
- }
- } else {
- size_t index = it - tagged_objects_.begin();
- if (index < first_free_) {
- first_free_ = index;
- }
- }
- }
- }
+ REQUIRES(!allow_disallow_lock_);
private:
using Entry = std::pair<art::GcRoot<art::mirror::Object>, jlong>;
- void HandleNullSweep(jlong tag ATTRIBUTE_UNUSED) {
- // TODO: Handle deallocation reporting here. We'll have to enqueue tags temporarily, as we
- // probably shouldn't call the callbacks directly (to avoid any issues).
- }
+ template <bool kHandleNull>
+ void SweepImpl(art::IsMarkedVisitor* visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+ void HandleNullSweep(jlong tag);
std::vector<Entry> tagged_objects_ GUARDED_BY(allow_disallow_lock_);
size_t first_free_ = 0;
+
+ EventHandler* event_handler_;
};
} // namespace openjdkjvmti
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 84985c2997..32a55822b7 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -128,8 +128,8 @@ TEST_F(ProxyTest, ProxyClassHelper) {
ASSERT_TRUE(proxy_class->IsInitialized());
EXPECT_EQ(2U, proxy_class->NumDirectInterfaces()); // Interfaces$I and Interfaces$J.
- EXPECT_EQ(I.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0));
- EXPECT_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 1));
+ EXPECT_OBJ_PTR_EQ(I.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0));
+ EXPECT_OBJ_PTR_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 1));
std::string temp;
const char* proxy_class_descriptor = proxy_class->GetDescriptor(&temp);
EXPECT_STREQ("L$Proxy1234;", proxy_class_descriptor);
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index e0ba8e7489..a9f39d09b4 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -41,7 +41,7 @@ ReferenceTable::~ReferenceTable() {
void ReferenceTable::Add(ObjPtr<mirror::Object> obj) {
DCHECK(obj != nullptr);
- VerifyObject(obj.Ptr());
+ VerifyObject(obj);
if (entries_.size() >= max_size_) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
<< "overflowed (" << max_size_ << " entries)";
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index 52cdfb817a..c4d4fae17c 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -21,7 +21,7 @@
#include "base/stringprintf.h"
#include "common_throws.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
#include "mirror/object-inl.h"
#include "obj_ptr-inl.h"
#include "primitive.h"
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 189ed03fb0..22076bbc05 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -83,14 +83,15 @@ class ReflectionTest : public CommonCompilerTest {
}
void ReflectionTestMakeExecutable(ArtMethod** method,
- mirror::Object** receiver,
- bool is_static, const char* method_name,
+ ObjPtr<mirror::Object>* receiver,
+ bool is_static,
+ const char* method_name,
const char* method_signature)
REQUIRES_SHARED(Locks::mutator_lock_) {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
+ StackHandleScope<3> hs(self);
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(
ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader>(jclass_loader)));
@@ -100,8 +101,9 @@ class ReflectionTest : public CommonCompilerTest {
}
MakeExecutable(class_loader.Get(), class_name);
- mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
- class_loader);
+ ObjPtr<mirror::Class> c = class_linker_->FindClass(self,
+ DotToDescriptor(class_name).c_str(),
+ class_loader);
CHECK(c != nullptr);
*method = is_static ? c->FindDirectMethod(method_name, method_signature, kRuntimePointerSize)
@@ -112,14 +114,17 @@ class ReflectionTest : public CommonCompilerTest {
*receiver = nullptr;
} else {
// Ensure class is initialized before allocating object
- StackHandleScope<1> hs2(self);
- Handle<mirror::Class> h_class(hs2.NewHandle(c));
- bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
- CHECK(initialized);
+ {
+ StackHandleScope<1> hs2(self);
+ HandleWrapperObjPtr<mirror::Class> h_class(hs2.NewHandleWrapper(&c));
+ bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
+ CHECK(initialized);
+ }
*receiver = c->AllocObject(self);
}
// Start runtime.
+ HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(receiver));
bool started = runtime_->Start();
CHECK(started);
self->TransitionFromSuspendedToRunnable();
@@ -128,7 +133,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeNopMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), nullptr);
@@ -137,7 +142,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityByteMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
@@ -163,7 +168,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
@@ -188,7 +193,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
@@ -213,7 +218,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[2];
@@ -242,7 +247,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[3];
@@ -281,7 +286,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[4];
@@ -325,7 +330,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[5];
@@ -374,7 +379,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[2];
@@ -408,7 +413,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[3];
@@ -435,7 +440,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[4];
@@ -465,7 +470,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[5];
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index aed6a2b1cf..be9786024a 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -27,7 +27,6 @@
namespace art {
-static constexpr bool kDumpHeapObjectOnSigsevg = false;
static constexpr bool kUseSignalHandler = false;
struct sigaction old_action;
@@ -48,11 +47,6 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
if (runtime != nullptr) {
// Print this out first in case DumpObject faults.
LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
- gc::Heap* heap = runtime->GetHeap();
- if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
- LOG(FATAL_WITHOUT_ABORT) << "Dump heap object at fault address: ";
- heap->DumpObject(LOG_STREAM(FATAL_WITHOUT_ABORT), reinterpret_cast<mirror::Object*>(info->si_addr));
- }
}
// Run the old signal handler.
old_action.sa_sigaction(signal_number, info, raw_context);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index cee73e175a..93704a971c 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -21,6 +21,7 @@
#include <sys/utsname.h>
#include <inttypes.h>
+#include <iostream>
#include <sstream>
#include "base/dumpable.h"
@@ -35,7 +36,6 @@
namespace art {
-static constexpr bool kDumpHeapObjectOnSigsevg = false;
static constexpr bool kUseSigRTTimeout = true;
static constexpr bool kDumpNativeStackOnTimeout = true;
@@ -337,17 +337,21 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
UContext thread_context(raw_context);
Backtrace thread_backtrace(raw_context);
- LOG(FATAL_WITHOUT_ABORT) << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
- << StringPrintf("Fatal signal %d (%s), code %d (%s)",
- signal_number, GetSignalName(signal_number),
- info->si_code,
- GetSignalCodeName(signal_number, info->si_code))
- << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << "\n"
- << "OS: " << Dumpable<OsInfo>(os_info) << "\n"
- << "Cmdline: " << cmd_line << "\n"
- << "Thread: " << tid << " \"" << thread_name << "\"\n"
- << "Registers:\n" << Dumpable<UContext>(thread_context) << "\n"
- << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace);
+ // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
+ // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
+ // considering this is an abort situation.
+
+ std::cerr << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
+ << StringPrintf("Fatal signal %d (%s), code %d (%s)",
+ signal_number, GetSignalName(signal_number),
+ info->si_code,
+ GetSignalCodeName(signal_number, info->si_code))
+ << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << std::endl
+ << "OS: " << Dumpable<OsInfo>(os_info) << std::endl
+ << "Cmdline: " << cmd_line << std::endl
+ << "Thread: " << tid << " \"" << thread_name << "\"" << std::endl
+ << "Registers:\n" << Dumpable<UContext>(thread_context) << std::endl
+ << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << std::endl;
if (kIsDebugBuild && signal_number == SIGSEGV) {
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
}
@@ -357,23 +361,20 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
// Special timeout signal. Try to dump all threads.
// Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
// are of value here.
- runtime->GetThreadList()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT), kDumpNativeStackOnTimeout);
- }
- gc::Heap* heap = runtime->GetHeap();
- LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
- if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
- LOG(FATAL_WITHOUT_ABORT) << "Dump heap object at fault address: ";
- heap->DumpObject(LOG_STREAM(FATAL_WITHOUT_ABORT), reinterpret_cast<mirror::Object*>(info->si_addr));
+ runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
+ std::cerr << std::endl;
}
+ std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
}
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
- LOG(FATAL_WITHOUT_ABORT) << "********************************************************\n"
- << "* Process " << getpid() << " thread " << tid << " \"" << thread_name
- << "\""
- << " has been suspended while crashing.\n"
- << "* Attach gdb:\n"
- << "* gdb -p " << tid << "\n"
- << "********************************************************\n";
+ std::cerr << "********************************************************\n"
+ << "* Process " << getpid() << " thread " << tid << " \"" << thread_name
+ << "\""
+ << " has been suspended while crashing.\n"
+ << "* Attach gdb:\n"
+ << "* gdb -p " << tid << "\n"
+ << "********************************************************"
+ << std::endl;
// Wait for debugger to attach.
while (true) {
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3e2ecfe55e..6acce273c9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1826,7 +1826,7 @@ void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
size_t Thread::NumHandleReferences() {
size_t count = 0;
- for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
+ for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
count += cur->NumberOfReferences();
}
return count;
@@ -1835,7 +1835,7 @@ size_t Thread::NumHandleReferences() {
bool Thread::HandleScopeContains(jobject obj) const {
StackReference<mirror::Object>* hs_entry =
reinterpret_cast<StackReference<mirror::Object>*>(obj);
- for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
+ for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
if (cur->Contains(hs_entry)) {
return true;
}
@@ -1847,12 +1847,8 @@ bool Thread::HandleScopeContains(jobject obj) const {
void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootNativeStack, thread_id));
- for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
- for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
- // GetReference returns a pointer to the stack reference within the handle scope. If this
- // needs to be updated, it will be done by the root visitor.
- buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
- }
+ for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
+ cur->VisitRoots(buffered_visitor);
}
}
@@ -1875,7 +1871,7 @@ ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
if (LIKELY(HandleScopeContains(obj))) {
// Read from handle scope.
result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
- VerifyObject(result.Ptr());
+ VerifyObject(result);
} else {
tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
expect_null = true;
@@ -2276,7 +2272,7 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
}
DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
Handle<mirror::Throwable> exception(
- hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
+ hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this))));
// If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
if (exception.Get() == nullptr) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 20b4cc144b..6f5913e6b3 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -570,6 +570,10 @@ class Thread {
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
}
+ static constexpr size_t IsGcMarkingSize() {
+ return sizeof(tls32_.is_gc_marking);
+ }
+
// Deoptimize the Java stack.
void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -799,17 +803,17 @@ class Thread {
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
REQUIRES_SHARED(Locks::mutator_lock_);
- HandleScope* GetTopHandleScope() {
+ BaseHandleScope* GetTopHandleScope() {
return tlsPtr_.top_handle_scope;
}
- void PushHandleScope(HandleScope* handle_scope) {
+ void PushHandleScope(BaseHandleScope* handle_scope) {
DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
tlsPtr_.top_handle_scope = handle_scope;
}
- HandleScope* PopHandleScope() {
- HandleScope* handle_scope = tlsPtr_.top_handle_scope;
+ BaseHandleScope* PopHandleScope() {
+ BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
DCHECK(handle_scope != nullptr);
tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
return handle_scope;
@@ -1446,7 +1450,7 @@ class Thread {
mirror::Object* monitor_enter_object;
// Top of linked list of handle scopes or null for none.
- HandleScope* top_handle_scope;
+ BaseHandleScope* top_handle_scope;
// Needed to get the right ClassLoader in JNI_OnLoad, but also
// useful for testing.
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 4892b49533..43151dd425 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -29,7 +29,7 @@ inline void VerifyObject(ObjPtr<mirror::Object> obj) {
if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
if (kVerifyObjectSupport > kVerifyObjectModeFast) {
// Slow object verification, try the heap right away.
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj.Ptr());
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
} else {
// Fast object verification, only call the heap if our quick sanity tests fail. The heap will
// print the diagnostic message.
@@ -40,7 +40,7 @@ inline void VerifyObject(ObjPtr<mirror::Object> obj) {
failed = failed || !VerifyClassClass(c);
}
if (UNLIKELY(failed)) {
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj.Ptr());
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
}
}
}
diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java
index 7acf0080f8..dca00bd2ec 100644
--- a/test/530-checker-loops2/src/Main.java
+++ b/test/530-checker-loops2/src/Main.java
@@ -111,6 +111,24 @@ public class Main {
return result;
}
+ /// CHECK-START: int Main.periodicXorSequence(int) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: int Main.periodicXorSequence(int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ private static int periodicXorSequence(int tc) {
+ int[] x = { 1, 3 };
+ // Loop with periodic sequence (0, 1).
+ int k = 0;
+ int result = 0;
+ for (int i = 0; i < tc; i++) {
+ result += x[k];
+ k ^= 1;
+ }
+ return result;
+ }
+
/// CHECK-START: int Main.justRightUp1() BCE (before)
/// CHECK-DAG: BoundsCheck
//
@@ -895,8 +913,9 @@ public class Main {
expectEquals(0, periodicIdiom(-1));
for (int tc = 0; tc < 32; tc++) {
int expected = (tc >> 1) << 2;
- if ((tc & 1) != 0)
+ if ((tc & 1) != 0) {
expected += 1;
+ }
expectEquals(expected, periodicIdiom(tc));
}
@@ -904,8 +923,9 @@ public class Main {
expectEquals(0, periodicSequence2(-1));
for (int tc = 0; tc < 32; tc++) {
int expected = (tc >> 1) << 2;
- if ((tc & 1) != 0)
+ if ((tc & 1) != 0) {
expected += 1;
+ }
expectEquals(expected, periodicSequence2(tc));
}
@@ -915,6 +935,16 @@ public class Main {
expectEquals(tc * 16, periodicSequence4(tc));
}
+ // Periodic adds (1, 3), one at the time.
+ expectEquals(0, periodicXorSequence(-1));
+ for (int tc = 0; tc < 32; tc++) {
+ int expected = (tc >> 1) << 2;
+ if ((tc & 1) != 0) {
+ expected += 1;
+ }
+ expectEquals(expected, periodicXorSequence(tc));
+ }
+
// Large bounds.
expectEquals(55, justRightUp1());
expectEquals(55, justRightUp2());
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index 0ea85da5ce..5c789cdd84 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -134,17 +134,20 @@ public class Main {
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-NOT: BoundsCheck
//
/// CHECK-START: void Main.deadCycleWithException(int) loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-NOT: Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-NOT: ArrayGet loop:<<Loop>> outer_loop:none
static void deadCycleWithException(int k) {
int dead = 0;
for (int i = 0; i < a.length; i++) {
a[i] = 4;
- // Increment value of dead cycle may throw exception.
+ // Increment value of dead cycle may throw exception. Dynamic
+ // BCE takes care of the bounds check though, which enables
+ // removing the ArrayGet after removing the dead cycle.
dead += a[k];
}
}
@@ -180,7 +183,17 @@ public class Main {
return closed; // only needs last value
}
- // TODO: move closed form even further out?
+ /// CHECK-START: int Main.closedFormNested() loop_optimization (before)
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ //
+ /// CHECK-START: int Main.closedFormNested() loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:loop{{B\d+}}
+ /// CHECK-DAG: Return loop:none
static int closedFormNested() {
int closed = 0;
for (int i = 0; i < 10; i++) {
@@ -191,6 +204,27 @@ public class Main {
return closed; // only needs last-value
}
+ /// CHECK-START: int Main.closedFormNestedAlt() loop_optimization (before)
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ //
+ /// CHECK-START: int Main.closedFormNestedAlt() loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:loop{{B\d+}}
+ /// CHECK-DAG: Return loop:none
+ static int closedFormNestedAlt() {
+ int closed = 12345;
+ for (int i = 0; i < 17; i++) {
+ for (int j = 0; j < 23; j++) {
+ closed += 7;
+ }
+ }
+ return closed; // only needs last-value
+ }
+
// TODO: taken test around closed form?
static int closedFormInductionUpN(int n) {
int closed = 12345;
@@ -220,9 +254,20 @@ public class Main {
}
// TODO: move closed form even further out?
- static int closedFormNestedNN(int n) {
- int closed = 0;
+ static int closedFormNestedNAlt(int n) {
+ int closed = 12345;
for (int i = 0; i < n; i++) {
+ for (int j = 0; j < 23; j++) {
+ closed += 7;
+ }
+ }
+ return closed; // only needs last-value
+ }
+
+ // TODO: move closed form even further out?
+ static int closedFormNestedMN(int m, int n) {
+ int closed = 0;
+ for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
closed++;
}
@@ -230,6 +275,17 @@ public class Main {
return closed; // only needs last-value
}
+ // TODO: move closed form even further out?
+ static int closedFormNestedMNAlt(int m, int n) {
+ int closed = 12345;
+ for (int i = 0; i < m; i++) {
+ for (int j = 0; j < n; j++) {
+ closed += 7;
+ }
+ }
+ return closed; // only needs last-value
+ }
+
/// CHECK-START: int Main.mainIndexReturned() loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:{{B\d+}} outer_loop:none
/// CHECK-DAG: Return [<<Phi>>] loop:none
@@ -444,12 +500,15 @@ public class Main {
expectEquals(12395, closedFormInductionUp());
expectEquals(12295, closedFormInductionInAndDown(12345));
expectEquals(10 * 10, closedFormNested());
+ expectEquals(12345 + 17 * 23 * 7, closedFormNestedAlt());
for (int n = -4; n < 10; n++) {
int tc = (n <= 0) ? 0 : n;
expectEquals(12345 + tc * 5, closedFormInductionUpN(n));
expectEquals(12345 - tc * 5, closedFormInductionInAndDownN(12345, n));
expectEquals(tc * 10, closedFormNestedN(n));
- expectEquals(tc * tc, closedFormNestedNN(n));
+ expectEquals(12345 + tc * 23 * 7, closedFormNestedNAlt(n));
+ expectEquals(tc * (tc + 1), closedFormNestedMN(n, n + 1));
+ expectEquals(12345 + tc * (tc + 1) * 7, closedFormNestedMNAlt(n, n + 1));
}
expectEquals(10, mainIndexReturned());
diff --git a/test/619-checker-current-method/expected.txt b/test/619-checker-current-method/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/619-checker-current-method/expected.txt
diff --git a/test/619-checker-current-method/info.txt b/test/619-checker-current-method/info.txt
new file mode 100644
index 0000000000..75f5213975
--- /dev/null
+++ b/test/619-checker-current-method/info.txt
@@ -0,0 +1,2 @@
+Checks that we don't store the current method when the compiled
+code does not need it.
diff --git a/test/619-checker-current-method/src/Main.java b/test/619-checker-current-method/src/Main.java
new file mode 100644
index 0000000000..d829370d74
--- /dev/null
+++ b/test/619-checker-current-method/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ // Check that there is no instruction storing to stack.
+ /// CHECK-START-X86: int Main.foo(int, int, int, int, int, int) disassembly (after)
+ /// CHECK-NOT: mov [{{\w+}}], {{\w+}}
+
+ // Use enough parameters to ensure we'll need a frame.
+ public static int foo(int a, int b, int c, int d, int e, int f) {
+ return a + b + c + d + e + f;
+ }
+
+ public static void main(String[] args) {
+ if (foo(1, 2, 3, 4, 5, 6) != 21) {
+ throw new Error("Expected 21");
+ }
+ }
+}
diff --git a/test/902-hello-transformation/transform.cc b/test/902-hello-transformation/transform.cc
index e0d623e6e1..5b0d219d8f 100644
--- a/test/902-hello-transformation/transform.cc
+++ b/test/902-hello-transformation/transform.cc
@@ -23,6 +23,7 @@
#include "base/logging.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_load.h"
#include "utils.h"
namespace art {
@@ -30,7 +31,6 @@ namespace Test902HelloTransformation {
static bool RuntimeIsJvm = false;
-jvmtiEnv* jvmti_env;
bool IsJVM() {
return RuntimeIsJvm;
}
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index 8ccdf49892..7d692fb5de 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -25,13 +25,12 @@
#include "base/logging.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_load.h"
#include "utils.h"
namespace art {
namespace Test903HelloTagging {
-static jvmtiEnv* jvmti_env;
-
extern "C" JNIEXPORT void JNICALL Java_Main_setTag(JNIEnv* env ATTRIBUTE_UNUSED,
jclass,
jobject obj,
diff --git a/test/904-object-allocation/build b/test/904-object-allocation/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/904-object-allocation/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/904-object-allocation/expected.txt b/test/904-object-allocation/expected.txt
new file mode 100644
index 0000000000..371d2b7593
--- /dev/null
+++ b/test/904-object-allocation/expected.txt
@@ -0,0 +1,8 @@
+ObjectAllocated type java.lang.Object/java.lang.Object size 8
+ObjectAllocated type java.lang.Integer/java.lang.Integer size 16
+ObjectAllocated type java.lang.Short/java.lang.Short size 16
+Tracking on same thread
+ObjectAllocated type java.lang.Double/java.lang.Double size 16
+Tracking on same thread, not disabling tracking
+ObjectAllocated type java.lang.Double/java.lang.Double size 16
+Tracking on different thread
diff --git a/test/904-object-allocation/info.txt b/test/904-object-allocation/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/904-object-allocation/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/904-object-allocation/run b/test/904-object-allocation/run
new file mode 100755
index 0000000000..2f7ad21886
--- /dev/null
+++ b/test/904-object-allocation/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=904-object-allocation,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/904-object-allocation/src/Main.java b/test/904-object-allocation/src/Main.java
new file mode 100644
index 0000000000..9a089bdd78
--- /dev/null
+++ b/test/904-object-allocation/src/Main.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ prefetchClassNames();
+
+ doTest(l);
+ }
+
+ // Pre-resolve class names so the strings don't have to be allocated as a side effect of
+ // callback printing.
+ private static void prefetchClassNames() {
+ Object.class.getName();
+ Integer.class.getName();
+ Float.class.getName();
+ Short.class.getName();
+ Byte.class.getName();
+ Double.class.getName();
+ }
+
+ public static void doTest(ArrayList<Object> l) throws Exception {
+ setupObjectAllocCallback();
+
+ enableAllocationTracking(null, true);
+
+ l.add(new Object());
+ l.add(new Integer(1));
+
+ enableAllocationTracking(null, false);
+
+ l.add(new Float(1.0f));
+
+ enableAllocationTracking(Thread.currentThread(), true);
+
+ l.add(new Short((short)0));
+
+ enableAllocationTracking(Thread.currentThread(), false);
+
+ l.add(new Byte((byte)0));
+
+ System.out.println("Tracking on same thread");
+
+ testThread(l, true, true);
+
+ l.add(new Byte((byte)0));
+
+ System.out.println("Tracking on same thread, not disabling tracking");
+
+ testThread(l, true, false);
+
+ System.out.println("Tracking on different thread");
+
+ testThread(l, false, true);
+
+ l.add(new Byte((byte)0));
+ }
+
+ private static void testThread(final ArrayList<Object> l, final boolean sameThread,
+ final boolean disableTracking) throws Exception {
+ final SimpleBarrier startBarrier = new SimpleBarrier(1);
+ final SimpleBarrier trackBarrier = new SimpleBarrier(1);
+ final SimpleBarrier disableBarrier = new SimpleBarrier(1);
+
+ Thread t = new Thread() {
+ public void run() {
+ try {
+ startBarrier.dec();
+ trackBarrier.waitFor();
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
+ System.exit(1);
+ }
+
+ l.add(new Double(0.0));
+
+ if (disableTracking) {
+ enableAllocationTracking(sameThread ? this : Thread.currentThread(), false);
+ }
+ }
+ };
+
+ t.start();
+ startBarrier.waitFor();
+ enableAllocationTracking(sameThread ? t : Thread.currentThread(), true);
+ trackBarrier.dec();
+
+ t.join();
+ }
+
+ private static class SimpleBarrier {
+ int count;
+
+ public SimpleBarrier(int i) {
+ count = i;
+ }
+
+ public synchronized void dec() throws Exception {
+ count--;
+ notifyAll();
+ }
+
+ public synchronized void waitFor() throws Exception {
+ while (count != 0) {
+ wait();
+ }
+ }
+ }
+
+ private static native void setupObjectAllocCallback();
+ private static native void enableAllocationTracking(Thread thread, boolean enable);
+}
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
new file mode 100644
index 0000000000..b22fc6c8c0
--- /dev/null
+++ b/test/904-object-allocation/tracking.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tracking.h"
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+#include "ti-agent/common_load.h"
+#include "utils.h"
+
+namespace art {
+namespace Test904ObjectAllocation {
+
+static std::string GetClassName(JNIEnv* jni_env, jclass cls) {
+ ScopedLocalRef<jclass> class_class(jni_env, jni_env->GetObjectClass(cls));
+ jmethodID mid = jni_env->GetMethodID(class_class.get(), "getName", "()Ljava/lang/String;");
+ ScopedLocalRef<jstring> str(
+ jni_env, reinterpret_cast<jstring>(jni_env->CallObjectMethod(cls, mid)));
+ ScopedUtfChars utf_chars(jni_env, str.get());
+ return utf_chars.c_str();
+}
+
+static void JNICALL ObjectAllocated(jvmtiEnv* ti_env ATTRIBUTE_UNUSED,
+ JNIEnv* jni_env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jobject object,
+ jclass object_klass,
+ jlong size) {
+ std::string object_klass_descriptor = GetClassName(jni_env, object_klass);
+ ScopedLocalRef<jclass> object_klass2(jni_env, jni_env->GetObjectClass(object));
+ std::string object_klass_descriptor2 = GetClassName(jni_env, object_klass2.get());
+
+ printf("ObjectAllocated type %s/%s size %zu\n",
+ object_klass_descriptor.c_str(),
+ object_klass_descriptor2.c_str(),
+ static_cast<size_t>(size));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_setupObjectAllocCallback(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.VMObjectAlloc = ObjectAllocated;
+
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error setting callbacks: %s\n", err);
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableAllocationTracking(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass,
+ jthread thread,
+ jboolean enable) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_VM_OBJECT_ALLOC,
+ thread);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error enabling/disabling allocation tracking: %s\n", err);
+ }
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Test904ObjectAllocation
+} // namespace art
+
diff --git a/test/904-object-allocation/tracking.h b/test/904-object-allocation/tracking.h
new file mode 100644
index 0000000000..21c1837523
--- /dev/null
+++ b/test/904-object-allocation/tracking.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
+#define ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test904ObjectAllocation {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test904ObjectAllocation
+} // namespace art
+
+#endif // ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
diff --git a/test/905-object-free/build b/test/905-object-free/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/905-object-free/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/905-object-free/expected.txt b/test/905-object-free/expected.txt
new file mode 100644
index 0000000000..31b73185f9
--- /dev/null
+++ b/test/905-object-free/expected.txt
@@ -0,0 +1,10 @@
+ObjectFree tag=1
+---
+ObjectFree tag=10
+ObjectFree tag=100
+ObjectFree tag=1000
+---
+---
+---
+---
+---
diff --git a/test/905-object-free/info.txt b/test/905-object-free/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/905-object-free/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/905-object-free/run b/test/905-object-free/run
new file mode 100755
index 0000000000..753b742681
--- /dev/null
+++ b/test/905-object-free/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=905-object-free,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/905-object-free/src/Main.java b/test/905-object-free/src/Main.java
new file mode 100644
index 0000000000..7b52e29285
--- /dev/null
+++ b/test/905-object-free/src/Main.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ setupObjectFreeCallback();
+
+ enableFreeTracking(true);
+ run(l);
+
+ enableFreeTracking(false);
+ run(l);
+ }
+
+ private static void run(ArrayList<Object> l) {
+ allocate(l, 1);
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ System.out.println("---");
+
+ // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
+ // the tag table should give us a stable output order.
+ for (int i = 10; i <= 1000; i *= 10) {
+ allocate(l, i);
+ }
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ System.out.println("---");
+
+ Runtime.getRuntime().gc();
+
+ System.out.println("---");
+ }
+
+ private static void allocate(ArrayList<Object> l, long tag) {
+ Object obj = new Object();
+ l.add(obj);
+ setTag(obj, tag);
+ }
+
+ private static native void setupObjectFreeCallback();
+ private static native void enableFreeTracking(boolean enable);
+ private static native void setTag(Object o, long tag);
+}
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
new file mode 100644
index 0000000000..5905d481cf
--- /dev/null
+++ b/test/905-object-free/tracking_free.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tracking_free.h"
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+#include "ti-agent/common_load.h"
+#include "utils.h"
+
+namespace art {
+namespace Test905ObjectFree {
+
+static void JNICALL ObjectFree(jvmtiEnv* ti_env ATTRIBUTE_UNUSED, jlong tag) {
+ printf("ObjectFree tag=%zu\n", static_cast<size_t>(tag));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_setupObjectFreeCallback(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.ObjectFree = ObjectFree;
+
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error setting callbacks: %s\n", err);
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableFreeTracking(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED,
+ jboolean enable) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_OBJECT_FREE,
+ nullptr);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error enabling/disabling object-free callbacks: %s\n", err);
+ }
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Test905ObjectFree
+} // namespace art
diff --git a/test/905-object-free/tracking_free.h b/test/905-object-free/tracking_free.h
new file mode 100644
index 0000000000..ba4aa43ffe
--- /dev/null
+++ b/test/905-object-free/tracking_free.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
+#define ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test905ObjectFree {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test905ObjectFree
+} // namespace art
+
+#endif // ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
diff --git a/test/906-iterate-heap/build b/test/906-iterate-heap/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/906-iterate-heap/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt
new file mode 100644
index 0000000000..72cd47dd62
--- /dev/null
+++ b/test/906-iterate-heap/expected.txt
@@ -0,0 +1,2 @@
+[{tag=1, class-tag=0, size=8, length=-1}, {tag=2, class-tag=100, size=8, length=-1}, {tag=3, class-tag=100, size=8, length=-1}, {tag=4, class-tag=0, size=32, length=5}, {tag=100, class-tag=0, size=<class>, length=-1}]
+[{tag=11, class-tag=0, size=8, length=-1}, {tag=12, class-tag=110, size=8, length=-1}, {tag=13, class-tag=110, size=8, length=-1}, {tag=14, class-tag=0, size=32, length=5}, {tag=110, class-tag=0, size=<class>, length=-1}]
diff --git a/test/906-iterate-heap/info.txt b/test/906-iterate-heap/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/906-iterate-heap/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
new file mode 100644
index 0000000000..ab1d8d8cb1
--- /dev/null
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "iterate_heap.h"
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedPrimitiveArray.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test906IterateHeap {
+
+class IterationConfig {
+ public:
+ IterationConfig() {}
+ virtual ~IterationConfig() {}
+
+ virtual jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) = 0;
+};
+
+static jint JNICALL HeapIterationCallback(jlong class_tag,
+ jlong size,
+ jlong* tag_ptr,
+ jint length,
+ void* user_data) {
+ IterationConfig* config = reinterpret_cast<IterationConfig*>(user_data);
+ return config->Handle(class_tag, size, tag_ptr, length);
+}
+
+static bool Run(jint heap_filter, jclass klass_filter, IterationConfig* config) {
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_iteration_callback = HeapIterationCallback;
+
+ jvmtiError ret = jvmti_env->IterateThroughHeap(heap_filter,
+ klass_filter,
+ &callbacks,
+ config);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Failure running IterateThroughHeap: %s\n", err);
+ return false;
+ }
+ return true;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_iterateThroughHeapCount(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass_filter,
+ jint stop_after) {
+ class CountIterationConfig : public IterationConfig {
+ public:
+ CountIterationConfig(jint _counter, jint _stop_after)
+ : counter(_counter),
+ stop_after(_stop_after) {
+ }
+
+ jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED) OVERRIDE {
+ counter++;
+ if (counter == stop_after) {
+ return JVMTI_VISIT_ABORT;
+ }
+ return 0;
+ }
+
+ jint counter;
+ const jint stop_after;
+ };
+
+ CountIterationConfig config(0, stop_after);
+ Run(heap_filter, klass_filter, &config);
+
+ if (config.counter > config.stop_after) {
+ printf("Error: more objects visited than signaled.");
+ }
+
+ return config.counter;
+}
+
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_iterateThroughHeapData(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass_filter,
+ jlongArray class_tags,
+ jlongArray sizes,
+ jlongArray tags,
+ jintArray lengths) {
+ class DataIterationConfig : public IterationConfig {
+ public:
+ jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) OVERRIDE {
+ class_tags_.push_back(class_tag);
+ sizes_.push_back(size);
+ tags_.push_back(*tag_ptr);
+ lengths_.push_back(length);
+
+ return 0; // Continue.
+ }
+
+ std::vector<jlong> class_tags_;
+ std::vector<jlong> sizes_;
+ std::vector<jlong> tags_;
+ std::vector<jint> lengths_;
+ };
+
+ DataIterationConfig config;
+ if (!Run(heap_filter, klass_filter, &config)) {
+ return -1;
+ }
+
+ ScopedLongArrayRW s_class_tags(env, class_tags);
+ ScopedLongArrayRW s_sizes(env, sizes);
+ ScopedLongArrayRW s_tags(env, tags);
+ ScopedIntArrayRW s_lengths(env, lengths);
+
+ for (size_t i = 0; i != config.class_tags_.size(); ++i) {
+ s_class_tags[i] = config.class_tags_[i];
+ s_sizes[i] = config.sizes_[i];
+ s_tags[i] = config.tags_[i];
+ s_lengths[i] = config.lengths_[i];
+ }
+
+ return static_cast<jint>(config.class_tags_.size());
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_iterateThroughHeapAdd(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass_filter) {
+ class AddIterationConfig : public IterationConfig {
+ public:
+ AddIterationConfig() {}
+
+ jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr,
+ jint length ATTRIBUTE_UNUSED) OVERRIDE {
+ jlong current_tag = *tag_ptr;
+ if (current_tag != 0) {
+ *tag_ptr = current_tag + 10;
+ }
+ return 0;
+ }
+ };
+
+ AddIterationConfig config;
+ Run(heap_filter, klass_filter, &config);
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Test906IterateHeap
+} // namespace art
diff --git a/test/906-iterate-heap/iterate_heap.h b/test/906-iterate-heap/iterate_heap.h
new file mode 100644
index 0000000000..f25cdbaf49
--- /dev/null
+++ b/test/906-iterate-heap/iterate_heap.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
+#define ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test906IterateHeap {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test906IterateHeap
+} // namespace art
+
+#endif // ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
diff --git a/test/906-iterate-heap/run b/test/906-iterate-heap/run
new file mode 100755
index 0000000000..3e135a378d
--- /dev/null
+++ b/test/906-iterate-heap/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java
new file mode 100644
index 0000000000..544a3656b2
--- /dev/null
+++ b/test/906-iterate-heap/src/Main.java
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ A a = new A();
+ B b = new B();
+ B b2 = new B();
+ C c = new C();
+ A[] aArray = new A[5];
+
+ setTag(a, 1);
+ setTag(b, 2);
+ setTag(b2, 3);
+ setTag(aArray, 4);
+ setTag(B.class, 100);
+
+ int all = iterateThroughHeapCount(0, null, Integer.MAX_VALUE);
+ int tagged = iterateThroughHeapCount(HEAP_FILTER_OUT_UNTAGGED, null, Integer.MAX_VALUE);
+ int untagged = iterateThroughHeapCount(HEAP_FILTER_OUT_TAGGED, null, Integer.MAX_VALUE);
+ int taggedClass = iterateThroughHeapCount(HEAP_FILTER_OUT_CLASS_UNTAGGED, null,
+ Integer.MAX_VALUE);
+ int untaggedClass = iterateThroughHeapCount(HEAP_FILTER_OUT_CLASS_TAGGED, null,
+ Integer.MAX_VALUE);
+
+ if (all != tagged + untagged) {
+ throw new IllegalStateException("Instances: " + all + " != " + tagged + " + " + untagged);
+ }
+ if (all != taggedClass + untaggedClass) {
+ throw new IllegalStateException("By class: " + all + " != " + taggedClass + " + " +
+ untaggedClass);
+ }
+ if (tagged != 5) {
+ throw new IllegalStateException(tagged + " tagged objects");
+ }
+ if (taggedClass != 2) {
+ throw new IllegalStateException(tagged + " objects with tagged class");
+ }
+ if (all == tagged) {
+ throw new IllegalStateException("All objects tagged");
+ }
+ if (all == taggedClass) {
+ throw new IllegalStateException("All objects have tagged class");
+ }
+
+ long classTags[] = new long[100];
+ long sizes[] = new long[100];
+ long tags[] = new long[100];
+ int lengths[] = new int[100];
+
+ int n = iterateThroughHeapData(HEAP_FILTER_OUT_UNTAGGED, null, classTags, sizes, tags, lengths);
+ System.out.println(sort(n, classTags, sizes, tags, lengths));
+
+ iterateThroughHeapAdd(HEAP_FILTER_OUT_UNTAGGED, null);
+ n = iterateThroughHeapData(HEAP_FILTER_OUT_UNTAGGED, null, classTags, sizes, tags, lengths);
+ System.out.println(sort(n, classTags, sizes, tags, lengths));
+ }
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ static class C {
+ }
+
+ static class HeapElem implements Comparable<HeapElem> {
+ long classTag;
+ long size;
+ long tag;
+ int length;
+
+ public int compareTo(HeapElem other) {
+ if (tag != other.tag) {
+ return Long.compare(tag, other.tag);
+ }
+ if (classTag != other.classTag) {
+ return Long.compare(classTag, other.classTag);
+ }
+ if (size != other.size) {
+ return Long.compare(size, other.size);
+ }
+ return Integer.compare(length, other.length);
+ }
+
+ public String toString() {
+ return "{tag=" + tag + ", class-tag=" + classTag + ", size=" +
+ (tag >= 100 ? "<class>" : size) // Class size is dependent on 32-bit vs 64-bit,
+ // so strip it.
+ + ", length=" + length + "}";
+ }
+ }
+
+ private static ArrayList<HeapElem> sort(int n, long classTags[], long sizes[], long tags[],
+ int lengths[]) {
+ ArrayList<HeapElem> ret = new ArrayList<HeapElem>(n);
+ for (int i = 0; i < n; i++) {
+ HeapElem elem = new HeapElem();
+ elem.classTag = classTags[i];
+ elem.size = sizes[i];
+ elem.tag = tags[i];
+ elem.length = lengths[i];
+ ret.add(elem);
+ }
+ Collections.sort(ret);
+ return ret;
+ }
+
+ private static native void setTag(Object o, long tag);
+ private static native long getTag(Object o);
+
+ private final static int HEAP_FILTER_OUT_TAGGED = 0x4;
+ private final static int HEAP_FILTER_OUT_UNTAGGED = 0x8;
+ private final static int HEAP_FILTER_OUT_CLASS_TAGGED = 0x10;
+ private final static int HEAP_FILTER_OUT_CLASS_UNTAGGED = 0x20;
+
+ private static native int iterateThroughHeapCount(int heapFilter,
+ Class<?> klassFilter, int stopAfter);
+ private static native int iterateThroughHeapData(int heapFilter,
+ Class<?> klassFilter, long classTags[], long sizes[], long tags[], int lengths[]);
+ private static native int iterateThroughHeapAdd(int heapFilter,
+ Class<?> klassFilter);
+}
diff --git a/test/Android.bp b/test/Android.bp
index d17261cd68..45673f55ff 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -238,18 +238,28 @@ art_cc_test_library {
shared_libs: ["libartd"],
}
-art_cc_test_library {
- name: "libtiagent",
+art_cc_defaults {
+ name: "libtiagent-defaults",
defaults: ["libartagent-defaults"],
srcs: [
"ti-agent/common_load.cc",
"901-hello-ti-agent/basics.cc",
"902-hello-transformation/transform.cc",
"903-hello-tagging/tagging.cc",
+ "904-object-allocation/tracking.cc",
+ "905-object-free/tracking_free.cc",
+ "906-iterate-heap/iterate_heap.cc",
],
shared_libs: [
- "libart",
"libbase",
+ ],
+}
+
+art_cc_test_library {
+ name: "libtiagent",
+ defaults: ["libtiagent-defaults"],
+ shared_libs: [
+ "libart",
"libopenjdkjvmti",
],
}
@@ -257,18 +267,11 @@ art_cc_test_library {
art_cc_test_library {
name: "libtiagentd",
defaults: [
- "libartagent-defaults",
+ "libtiagent-defaults",
"art_debug_defaults",
],
- srcs: [
- "ti-agent/common_load.cc",
- "901-hello-ti-agent/basics.cc",
- "902-hello-transformation/transform.cc",
- "903-hello-tagging/tagging.cc",
- ],
shared_libs: [
"libartd",
- "libbase",
"libopenjdkjvmtid",
],
}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index a858c75fc2..64ff5ba2f2 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -263,14 +263,16 @@ endif
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
-# 902-hello-transformation and 903-hello-tagging
-# isn't supported in current form due to linker
+# 90{2,3,4,5,6} are not supported in current form due to linker
# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
569-checker-pattern-replacement \
902-hello-transformation \
- 903-hello-tagging
+ 903-hello-tagging \
+ 904-object-allocation \
+ 905-object-free \
+ 906-iterate-heap \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -486,8 +488,11 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Known broken tests for the JIT.
# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
# also uses Generic JNI instead of the JNI compiler.
+# Test 906 iterates the heap filtering with different options. No instances should be created
+# between those runs to be able to have precise checks.
TEST_ART_BROKEN_JIT_RUN_TESTS := \
- 137-cfi
+ 137-cfi \
+ 906-iterate-heap \
ifneq (,$(filter jit,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/test/IMTA/Interfaces.java b/test/IMTA/Interfaces.java
new file mode 100644
index 0000000000..4322f157e5
--- /dev/null
+++ b/test/IMTA/Interfaces.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Interfaces {
+ interface A {
+ public void foo();
+ }
+ interface Z {
+ public void foo();
+ }
+}
diff --git a/test/IMTB/Interfaces.java b/test/IMTB/Interfaces.java
new file mode 100644
index 0000000000..f252624d9c
--- /dev/null
+++ b/test/IMTB/Interfaces.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Interfaces {
+ interface A {
+ public void bar();
+ public void foo();
+ }
+ interface L {
+ public void foo();
+ }
+ interface Z {
+ public void foo();
+ }
+}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index d8f42a2b4b..3535f323d0 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -44,7 +44,7 @@ SECONDARY_DEX=""
TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
# Value in seconds
if [ "$ART_USE_READ_BARRIER" = "true" ]; then
- TIME_OUT_VALUE=1800 # 30 minutes.
+ TIME_OUT_VALUE=2400 # 40 minutes.
else
TIME_OUT_VALUE=1200 # 20 minutes.
fi
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 4c7df97374..c4126365fc 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -22,13 +22,19 @@
#include "art_method-inl.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "common_load.h"
#include "901-hello-ti-agent/basics.h"
#include "902-hello-transformation/transform.h"
#include "903-hello-tagging/tagging.h"
+#include "904-object-allocation/tracking.h"
+#include "905-object-free/tracking_free.h"
+#include "906-iterate-heap/iterate_heap.h"
namespace art {
+jvmtiEnv* jvmti_env;
+
using OnLoad = jint (*)(JavaVM* vm, char* options, void* reserved);
using OnAttach = jint (*)(JavaVM* vm, char* options, void* reserved);
@@ -43,6 +49,9 @@ AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
{ "902-hello-transformation", Test902HelloTransformation::OnLoad, nullptr },
{ "903-hello-tagging", Test903HelloTagging::OnLoad, nullptr },
+ { "904-object-allocation", Test904ObjectAllocation::OnLoad, nullptr },
+ { "905-object-free", Test905ObjectFree::OnLoad, nullptr },
+ { "906-iterate-heap", Test906IterateHeap::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
diff --git a/test/ti-agent/common_load.h b/test/ti-agent/common_load.h
new file mode 100644
index 0000000000..fac94b4c6e
--- /dev/null
+++ b/test/ti-agent/common_load.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+
+#include "openjdkjvmti/jvmti.h"
+
+namespace art {
+
+extern jvmtiEnv* jvmti_env;
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_