summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--PREUPLOAD.cfg2
-rw-r--r--build/Android.bp2
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--build/art.go4
-rw-r--r--compiler/compiler.h3
-rw-r--r--compiler/debug/elf_debug_line_writer.h6
-rw-r--r--compiler/debug/elf_debug_loc_writer.h6
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc7
-rw-r--r--compiler/dex/dex_to_dex_compiler.h7
-rw-r--r--compiler/driver/compiler_driver-inl.h72
-rw-r--r--compiler/driver/compiler_driver.cc146
-rw-r--r--compiler/driver/compiler_driver.h50
-rw-r--r--compiler/driver/compiler_driver_test.cc1
-rw-r--r--compiler/driver/dex_compilation_unit.cc2
-rw-r--r--compiler/driver/dex_compilation_unit.h8
-rw-r--r--compiler/image_writer.cc11
-rw-r--r--compiler/image_writer.h10
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc10
-rw-r--r--compiler/oat_test.cc30
-rw-r--r--compiler/oat_writer.cc20
-rw-r--r--compiler/optimizing/builder.h20
-rw-r--r--compiler/optimizing/code_generator_arm.cc141
-rw-r--r--compiler/optimizing/code_generator_arm64.cc510
-rw-r--r--compiler/optimizing/code_generator_arm64.h13
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc123
-rw-r--r--compiler/optimizing/code_generator_mips.cc155
-rw-r--r--compiler/optimizing/code_generator_mips.h30
-rw-r--r--compiler/optimizing/code_generator_mips64.cc140
-rw-r--r--compiler/optimizing/code_generator_mips64.h24
-rw-r--r--compiler/optimizing/code_generator_x86.cc38
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc42
-rw-r--r--compiler/optimizing/inliner.cc61
-rw-r--r--compiler/optimizing/instruction_builder.cc263
-rw-r--r--compiler/optimizing/instruction_builder.h29
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc50
-rw-r--r--compiler/optimizing/intrinsics_mips.cc55
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc49
-rw-r--r--compiler/optimizing/nodes.cc9
-rw-r--r--compiler/optimizing/nodes.h26
-rw-r--r--compiler/optimizing/optimizing_compiler.cc46
-rw-r--r--compiler/optimizing/optimizing_compiler.h9
-rw-r--r--compiler/optimizing/reference_type_propagation.cc51
-rw-r--r--compiler/optimizing/reference_type_propagation.h3
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc1
-rw-r--r--compiler/optimizing/sharpening.cc143
-rw-r--r--compiler/optimizing/sharpening.h12
-rw-r--r--compiler/optimizing/ssa_builder.cc8
-rw-r--r--compiler/optimizing/ssa_builder.h3
-rw-r--r--compiler/optimizing/stack_map_stream.cc193
-rw-r--r--compiler/optimizing/stack_map_stream.h19
-rw-r--r--compiler/optimizing/stack_map_test.cc144
-rw-r--r--dex2oat/dex2oat.cc6
-rw-r--r--dex2oat/dex2oat_test.cc2
-rw-r--r--oatdump/oatdump.cc135
-rw-r--r--oatdump/oatdump_test.cc1
-rw-r--r--patchoat/patchoat.cc4
-rw-r--r--profman/profile_assistant_test.cc99
-rw-r--r--profman/profman.cc326
-rw-r--r--runtime/Android.bp5
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S277
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S251
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S262
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S258
-rw-r--r--runtime/arch/quick_alloc_entrypoints.S2
-rw-r--r--runtime/arch/stub_test.cc4
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S260
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S79
-rw-r--r--runtime/art_field-inl.h15
-rw-r--r--runtime/art_field.cc4
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method-inl.h13
-rw-r--r--runtime/art_method.cc2
-rw-r--r--runtime/art_method.h6
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/atomic.h9
-rw-r--r--runtime/base/arena_allocator.cc2
-rw-r--r--runtime/base/mutex.cc54
-rw-r--r--runtime/base/mutex.h25
-rw-r--r--runtime/bit_memory_region.h4
-rw-r--r--runtime/cha.cc1
-rw-r--r--runtime/class_linker-inl.h31
-rw-r--r--runtime/class_linker.cc305
-rw-r--r--runtime/class_linker.h59
-rw-r--r--runtime/class_linker_test.cc12
-rw-r--r--runtime/class_table.cc19
-rw-r--r--runtime/class_table.h6
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h8
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc6
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc17
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc8
-rw-r--r--runtime/gc/allocation_record.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc64
-rw-r--r--runtime/gc/heap-inl.h2
-rw-r--r--runtime/gc/heap.cc186
-rw-r--r--runtime/gc/heap.h70
-rw-r--r--runtime/gc/reference_processor.cc4
-rw-r--r--runtime/gc/space/image_space.cc33
-rw-r--r--runtime/gc/system_weak.h2
-rw-r--r--runtime/handle_scope-inl.h2
-rw-r--r--runtime/handle_scope_test.cc2
-rw-r--r--runtime/hprof/hprof.cc27
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/indirect_reference_table-inl.h2
-rw-r--r--runtime/indirect_reference_table.cc1
-rw-r--r--runtime/instrumentation.cc9
-rw-r--r--runtime/interpreter/unstarted_runtime.cc104
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc95
-rw-r--r--runtime/java_vm_ext.cc6
-rw-r--r--runtime/jdwp/jdwp.h5
-rw-r--r--runtime/jdwp/jdwp_event.cc37
-rw-r--r--runtime/jdwp/object_registry.cc1
-rw-r--r--runtime/jit/jit.cc15
-rw-r--r--runtime/jit/jit_code_cache.cc11
-rw-r--r--runtime/jit/profile_compilation_info.cc50
-rw-r--r--runtime/jit/profile_compilation_info.h8
-rw-r--r--runtime/mem_map.cc41
-rw-r--r--runtime/mem_map.h28
-rw-r--r--runtime/mirror/class-inl.h28
-rw-r--r--runtime/mirror/class.cc24
-rw-r--r--runtime/mirror/class.h20
-rw-r--r--runtime/mirror/dex_cache-inl.h110
-rw-r--r--runtime/mirror/dex_cache.cc20
-rw-r--r--runtime/mirror/dex_cache.h77
-rw-r--r--runtime/mirror/dex_cache_test.cc3
-rw-r--r--runtime/mirror/method_handle_impl.h2
-rw-r--r--runtime/mirror/method_type_test.cc7
-rw-r--r--runtime/mirror/object-inl.h87
-rw-r--r--runtime/mirror/object.h34
-rw-r--r--runtime/mirror/string.h5
-rw-r--r--runtime/monitor.cc60
-rw-r--r--runtime/native/dalvik_system_DexFile.cc12
-rw-r--r--runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc10
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc9
-rw-r--r--runtime/native/java_lang_DexCache.cc23
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_Thread.cc2
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc2
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file_manager.cc3
-rw-r--r--runtime/oat_quick_method_header.cc4
-rw-r--r--runtime/openjdkjvm/OpenjdkJvm.cc2
-rw-r--r--runtime/openjdkjvmti/ti_class.cc211
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc8
-rw-r--r--runtime/openjdkjvmti/ti_monitor.cc6
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc232
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h24
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc5
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc8
-rw-r--r--runtime/openjdkjvmti/ti_threadgroup.cc2
-rw-r--r--runtime/quick_exception_handler.cc2
-rw-r--r--runtime/runtime.cc39
-rw-r--r--runtime/scoped_thread_state_change-inl.h4
-rw-r--r--runtime/scoped_thread_state_change.h8
-rw-r--r--runtime/stack.cc31
-rw-r--r--runtime/stack.h12
-rw-r--r--runtime/stack_map.cc14
-rw-r--r--runtime/stack_map.h386
-rw-r--r--runtime/thread-inl.h29
-rw-r--r--runtime/thread.cc64
-rw-r--r--runtime/thread.h15
-rw-r--r--runtime/thread_list.cc74
-rw-r--r--runtime/thread_list.h4
-rw-r--r--runtime/transaction.cc171
-rw-r--r--runtime/transaction.h78
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h19
-rw-r--r--runtime/verifier/method_verifier.cc22
-rw-r--r--runtime/verify_object-inl.h22
-rw-r--r--runtime/verify_object.cc47
-rw-r--r--runtime/verify_object.h11
-rw-r--r--test/004-NativeAllocations/src/Main.java12
-rw-r--r--test/155-java-set-resolved-type/expected.txt1
-rw-r--r--test/155-java-set-resolved-type/info.txt2
-rw-r--r--test/155-java-set-resolved-type/src-ex/TestInterface.java19
-rw-r--r--test/155-java-set-resolved-type/src/Main.java94
-rw-r--r--test/155-java-set-resolved-type/src/TestImplementation.java19
-rw-r--r--test/155-java-set-resolved-type/src/TestInterface.java19
-rw-r--r--test/155-java-set-resolved-type/src/TestParameter.java19
-rw-r--r--test/156-register-dex-file-multi-loader/expected.txt0
-rw-r--r--test/156-register-dex-file-multi-loader/info.txt2
-rw-r--r--test/156-register-dex-file-multi-loader/src/Main.java88
-rw-r--r--test/482-checker-loop-back-edge-use/src/Main.java9
-rw-r--r--test/552-checker-sharpening/src/Main.java6
-rw-r--r--test/626-const-class-linking/clear_dex_cache_types.cc6
-rw-r--r--test/626-const-class-linking/src/Main.java4
-rwxr-xr-xtest/636-arm64-veneer-pool/build22
-rw-r--r--test/636-arm64-veneer-pool/expected.txt1
-rw-r--r--test/636-arm64-veneer-pool/info.txt1
-rw-r--r--test/636-arm64-veneer-pool/src/Main.java4223
-rw-r--r--test/636-wrong-static-access/expected.txt1
-rw-r--r--test/636-wrong-static-access/info.txt2
-rwxr-xr-xtest/636-wrong-static-access/run20
-rw-r--r--test/636-wrong-static-access/src-ex/Foo.java38
-rw-r--r--test/636-wrong-static-access/src/Holder.java19
-rw-r--r--test/636-wrong-static-access/src/Main.java39
-rw-r--r--test/636-wrong-static-access/src2/Holder.java19
-rw-r--r--test/911-get-stack-trace/src/PrintThread.java3
-rw-r--r--test/912-classes/classes.cc289
-rw-r--r--test/912-classes/src/Main.java82
-rw-r--r--test/921-hello-failure/expected.txt21
-rw-r--r--test/921-hello-failure/src/FieldChange.java61
-rw-r--r--test/921-hello-failure/src/Main.java7
-rw-r--r--test/921-hello-failure/src/MethodChange.java57
-rw-r--r--test/921-hello-failure/src/MissingField.java58
-rw-r--r--test/921-hello-failure/src/MissingMethod.java57
-rw-r--r--test/921-hello-failure/src/NewField.java60
-rw-r--r--test/921-hello-failure/src/NewMethod.java60
-rw-r--r--test/921-hello-failure/src/Transform3.java24
-rw-r--r--test/921-hello-failure/src/Transform4.java25
-rw-r--r--test/921-hello-failure/src/Verification.java82
-rw-r--r--test/924-threads/src/Main.java17
-rw-r--r--test/925-threadgroups/src/Main.java21
-rw-r--r--test/931-agent-thread/agent_thread.cc19
-rw-r--r--test/942-private-recursive/src/Transform.java8
-rw-r--r--test/956-methodhandles/src/Main.java7
-rw-r--r--test/Android.run-test.mk76
-rw-r--r--test/knownfailures.json338
-rw-r--r--test/testrunner/env.py213
-rwxr-xr-xtest/testrunner/testrunner.py797
-rw-r--r--tools/ahat/src/InstanceUtils.java21
-rw-r--r--tools/ahat/test-dump/Main.java2
-rw-r--r--tools/ahat/test/InstanceUtilsTest.java70
-rw-r--r--tools/cpp-define-generator/Android.bp13
-rw-r--r--tools/cpp-define-generator/constant_jit.def1
-rwxr-xr-xtools/cpp-define-generator/presubmit-check-files-up-to-date67
-rwxr-xr-xtools/cpp-define-generator/verify-asm-support101
-rwxr-xr-xtools/run-libcore-tests.sh5
228 files changed, 12884 insertions, 2761 deletions
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000000..cf1832beb4
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,2 @@
+[Hook Scripts]
+check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date
diff --git a/build/Android.bp b/build/Android.bp
index cd9d74a934..b1553c759c 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -70,6 +70,8 @@ art_global_defaults {
"-DART_STACK_OVERFLOW_GAP_mips64=16384",
"-DART_STACK_OVERFLOW_GAP_x86=8192",
"-DART_STACK_OVERFLOW_GAP_x86_64=8192",
+ // Enable thread annotations for std::mutex, etc.
+ "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
target: {
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e5258087db..5c49f193a5 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -93,7 +93,7 @@ ART_GTEST_class_table_test_DEX_DEPS := XandY
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB
ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
diff --git a/build/art.go b/build/art.go
index baa6e59b55..e7f7e2121e 100644
--- a/build/art.go
+++ b/build/art.go
@@ -68,10 +68,6 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
asflags = append(asflags,
"-DART_USE_READ_BARRIER=1",
"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
-
- // Temporarily override -fstack-protector-strong with -fstack-protector to avoid a major
- // slowdown with the read barrier config. b/26744236.
- cflags = append(cflags, "-fstack-protector")
}
if envTrue(ctx, "ART_USE_VIXL_ARM_BACKEND") {
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 2ca0b77a73..908d3669ed 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -27,6 +27,7 @@ namespace jit {
class JitCodeCache;
}
namespace mirror {
+ class ClassLoader;
class DexCache;
}
@@ -63,7 +64,7 @@ class Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const = 0;
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 18a9165854..cdd1e53f94 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -104,10 +104,10 @@ class ElfDebugLineWriter {
for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
StackMap stack_map = code_info.GetStackMapAt(s, encoding);
DCHECK(stack_map.IsValid());
- const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding, isa);
- const int32_t dex = stack_map.GetDexPc(encoding.stack_map_encoding);
+ const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
+ const int32_t dex = stack_map.GetDexPc(encoding.stack_map.encoding);
pc2dex_map.push_back({pc, dex});
- if (stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
+ if (stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
// Guess that the first map with local variables is the end of prologue.
prologue_end = std::min(prologue_end, pc);
}
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index bce538743b..cbfdbddd1d 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -104,7 +104,7 @@ std::vector<VariableLocation> GetVariableLocations(
for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
StackMap stack_map = code_info.GetStackMapAt(s, encoding);
DCHECK(stack_map.IsValid());
- if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
+ if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
// The compiler creates stackmaps without register maps at the start of
// basic blocks in order to keep instruction-accurate line number mapping.
// However, we never stop at those (breakpoint locations always have map).
@@ -112,7 +112,7 @@ std::vector<VariableLocation> GetVariableLocations(
// The main reason for this is to save space by avoiding undefined gaps.
continue;
}
- const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map_encoding, isa);
+ const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
DCHECK_LE(pc_offset, method_info->code_size);
DCHECK_LE(compilation_unit_code_address, method_info->code_address);
const uint32_t low_pc = dchecked_integral_cast<uint32_t>(
@@ -136,7 +136,7 @@ std::vector<VariableLocation> GetVariableLocations(
}
// Check that the stack map is in the requested range.
- uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map_encoding);
+ uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
// The variable is not in scope at this PC. Therefore omit the entry.
// Note that this is different to None() entry which means in scope, but unknown location.
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index d4f6545c59..76aeaa55d7 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -284,16 +284,13 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
}
uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(unit_.GetClassLoader())));
ClassLinker* class_linker = unit_.GetClassLinker();
ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
GetDexFile(),
method_idx,
unit_.GetDexCache(),
- class_loader,
+ unit_.GetClassLoader(),
/* referrer */ nullptr,
kVirtual);
@@ -330,7 +327,7 @@ CompiledMethod* ArtCompileDEX(
InvokeType invoke_type ATTRIBUTE_UNUSED,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level) {
DCHECK(driver != nullptr);
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 0a00d45297..00c596d60e 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
#include "dex_file.h"
+#include "handle.h"
#include "invoke_type.h"
namespace art {
@@ -25,6 +26,10 @@ namespace art {
class CompiledMethod;
class CompilerDriver;
+namespace mirror {
+class ClassLoader;
+} // namespace mirror
+
namespace optimizer {
enum class DexToDexCompilationLevel {
@@ -40,7 +45,7 @@ CompiledMethod* ArtCompileDEX(CompilerDriver* driver,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index f056dd3c00..582330611d 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,17 +31,12 @@
namespace art {
-inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit) {
- return soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()).Ptr();
-}
-
inline mirror::Class* CompilerDriver::ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, dex::TypeIndex cls_index,
const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
mirror::Class* cls = mUnit->GetClassLinker()->ResolveType(
*mUnit->GetDexFile(), cls_index, dex_cache, class_loader);
DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending());
@@ -56,7 +51,7 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
@@ -87,7 +82,7 @@ inline ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx,
is_static);
}
@@ -135,70 +130,11 @@ inline bool CompilerDriver::CanAccessResolvedMember<ArtMethod>(
return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx);
}
-template <typename ArtMember>
-inline std::pair<bool, bool> CompilerDriver::IsClassOfStaticMemberAvailableToReferrer(
- mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMember* resolved_member,
- uint16_t member_idx,
- dex::TypeIndex* storage_index) {
- DCHECK(resolved_member->IsStatic());
- if (LIKELY(referrer_class != nullptr)) {
- ObjPtr<mirror::Class> members_class = resolved_member->GetDeclaringClass();
- if (members_class == referrer_class) {
- *storage_index = members_class->GetDexTypeIndex();
- return std::make_pair(true, true);
- }
- if (CanAccessResolvedMember<ArtMember>(
- referrer_class, members_class.Ptr(), resolved_member, dex_cache, member_idx)) {
- // We have the resolved member, we must make it into a index for the referrer
- // in its static storage (which may fail if it doesn't have a slot for it)
- // TODO: for images we can elide the static storage base null check
- // if we know there's a non-null entry in the image
- const DexFile* dex_file = dex_cache->GetDexFile();
- dex::TypeIndex storage_idx(DexFile::kDexNoIndex16);
- if (LIKELY(members_class->GetDexCache() == dex_cache)) {
- // common case where the dex cache of both the referrer and the member are the same,
- // no need to search the dex file
- storage_idx = members_class->GetDexTypeIndex();
- } else {
- // Search dex file for localized ssb index, may fail if member's class is a parent
- // of the class mentioned in the dex file and there is no dex cache entry.
- storage_idx = resolved_member->GetDeclaringClass()->FindTypeIndexInOtherDexFile(*dex_file);
- }
- if (storage_idx.IsValid()) {
- *storage_index = storage_idx;
- return std::make_pair(true, !resolved_member->IsFinal());
- }
- }
- }
- // Conservative defaults.
- *storage_index = dex::TypeIndex(DexFile::kDexNoIndex16);
- return std::make_pair(false, false);
-}
-
-inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtField* resolved_field, uint16_t field_idx, dex::TypeIndex* storage_index) {
- return IsClassOfStaticMemberAvailableToReferrer(
- dex_cache, referrer_class, resolved_field, field_idx, storage_index);
-}
-
-inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtMethod* resolved_method, uint16_t method_idx, dex::TypeIndex* storage_index) {
- std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer(
- dex_cache, referrer_class, resolved_method, method_idx, storage_index);
- // Only the first member of `result` is meaningful, as there is no
- // "write access" to a method.
- return result.first;
-}
-
inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
ArtMethod* resolved_method =
check_incompatible_class_change
? mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kForceICCECheck>(
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1d4eaf8c5a..b738d5ce7e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -583,7 +583,7 @@ static void CompileMethod(Thread* self,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled,
@@ -624,9 +624,6 @@ static void CompileMethod(Thread* self,
// Look-up the ArtMethod associated with this code_item (if any)
// -- It is later used to lookup any [optimization] annotations for this method.
ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader_handle(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(class_loader)));
// TODO: Lookup annotation from DexFile directly without resolving method.
ArtMethod* method =
@@ -634,7 +631,7 @@ static void CompileMethod(Thread* self,
dex_file,
method_idx,
dex_cache,
- class_loader_handle,
+ class_loader,
/* referrer */ nullptr,
invoke_type);
@@ -681,9 +678,14 @@ static void CompileMethod(Thread* self,
if (compile) {
// NOTE: if compiler declines to compile this method, it will return null.
- compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type,
- class_def_idx, method_idx, class_loader,
- dex_file, dex_cache);
+ compiled_method = driver->GetCompiler()->Compile(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_cache);
}
if (compiled_method == nullptr &&
dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) {
@@ -730,12 +732,14 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
uint32_t method_idx = method->GetDexMethodIndex();
uint32_t access_flags = method->GetAccessFlags();
InvokeType invoke_type = method->GetInvokeType();
- StackHandleScope<1> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(method->GetDeclaringClass()->GetClassLoader()));
{
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> local_class_loader(
- soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+ soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get()));
jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
// Find the dex_file
dex_file = method->GetDexFile();
@@ -769,7 +773,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_to_dex_compilation_level,
true,
@@ -795,7 +799,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_to_dex_compilation_level,
true,
@@ -878,7 +882,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
for (const DexFile* dex_file : dex_files) {
- dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file, false));
+ dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file));
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
size_t class_def_count = dex_file->NumClassDefs();
@@ -1070,22 +1074,30 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r
class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
- explicit ResolveCatchBlockExceptionsClassVisitor(
- std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve)
- : exceptions_to_resolve_(exceptions_to_resolve) {}
+ ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ classes_.push_back(c);
+ return true;
+ }
+
+ void FindExceptionTypesToResolve(
+ std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m);
+ for (ObjPtr<mirror::Class> klass : classes_) {
+ for (ArtMethod& method : klass->GetMethods(pointer_size)) {
+ FindExceptionTypesToResolveForMethod(&method, exceptions_to_resolve);
+ }
}
- return true;
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle)
+ void FindExceptionTypesToResolveForMethod(
+ ArtMethod* method,
+ std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
+ const DexFile::CodeItem* code_item = method->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
}
@@ -1105,9 +1117,9 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
dex::TypeIndex encoded_catch_handler_handlers_type_idx =
dex::TypeIndex(DecodeUnsignedLeb128(&encoded_catch_handler_list));
// Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
- exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
- method_handle->GetDexFile());
+ if (!method->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ exceptions_to_resolve->emplace(encoded_catch_handler_handlers_type_idx,
+ method->GetDexFile());
}
// ignore address associated with catch handler
DecodeUnsignedLeb128(&encoded_catch_handler_list);
@@ -1119,7 +1131,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
}
}
- std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve_;
+ std::vector<ObjPtr<mirror::Class>> classes_;
};
class RecordImageClassesVisitor : public ClassVisitor {
@@ -1173,8 +1185,14 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
- ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types);
- class_linker->VisitClasses(&visitor);
+ {
+ // Thread suspension is not allowed while ResolveCatchBlockExceptionsClassVisitor
+ // is using a std::vector<ObjPtr<mirror::Class>>.
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ResolveCatchBlockExceptionsClassVisitor visitor;
+ class_linker->VisitClasses(&visitor);
+ visitor.FindExceptionTypesToResolve(&unresolved_exception_types);
+ }
for (const auto& exception_type : unresolved_exception_types) {
dex::TypeIndex exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
@@ -1182,10 +1200,12 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file,
nullptr)));
Handle<mirror::Class> klass(hs2.NewHandle(
- class_linker->ResolveType(*dex_file,
- exception_type_idx,
- dex_cache,
- ScopedNullHandle<mirror::ClassLoader>())));
+ (dex_cache.Get() != nullptr)
+ ? class_linker->ResolveType(*dex_file,
+ exception_type_idx,
+ dex_cache,
+ ScopedNullHandle<mirror::ClassLoader>())
+ : nullptr));
if (klass.Get() == nullptr) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
@@ -1423,19 +1443,14 @@ void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodRefere
dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index);
}
-bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx) {
- // Get type from dex cache assuming it was populated by the verifier
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class) {
if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx);
bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible.
if (!is_accessible) {
- mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -1452,12 +1467,9 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
return is_accessible;
}
-bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx,
+bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class,
bool* finalizable) {
- // Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
// Be conservative.
@@ -1465,10 +1477,8 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
return false; // Unknown class needs access checks.
}
*finalizable = resolved_class->IsFinalizable();
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx);
bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible.
if (!is_accessible) {
- mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -1512,9 +1522,7 @@ ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
mirror::Class* referrer_class;
Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
{
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader_handle(
- hs.NewHandle(soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader_handle = mUnit->GetClassLoader();
resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false);
referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr;
@@ -1776,7 +1784,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
- soa.Self(), dex_file, false)));
+ soa.Self(), dex_file)));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
@@ -1875,10 +1883,9 @@ class ResolveTypeVisitor : public CompilationVisitor {
Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
dex_file,
class_loader.Get())));
- mirror::Class* klass = class_linker->ResolveType(dex_file,
- dex::TypeIndex(type_idx),
- dex_cache,
- class_loader);
+ ObjPtr<mirror::Class> klass = (dex_cache.Get() != nullptr)
+ ? class_linker->ResolveType(dex_file, dex::TypeIndex(type_idx), dex_cache, class_loader)
+ : nullptr;
if (klass == nullptr) {
soa.Self()->AssertPendingException();
@@ -2135,7 +2142,7 @@ class VerifyClassVisitor : public CompilationVisitor {
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
- soa.Self(), dex_file, false)));
+ soa.Self(), dex_file)));
std::string error_msg;
failure_kind =
verifier::MethodVerifier::VerifyClass(soa.Self(),
@@ -2587,10 +2594,18 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_direct_method_idx = method_idx;
- CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled, dex_cache);
+ CompileMethod(soa.Self(),
+ driver,
+ it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
// Compile virtual methods
@@ -2604,10 +2619,17 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_virtual_method_idx = method_idx;
- CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled, dex_cache);
+ CompileMethod(soa.Self(),
+ driver, it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
DCHECK(!it.HasNext());
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 503fe3adfc..1e5c43d833 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -187,16 +187,14 @@ class CompilerDriver {
REQUIRES(!requires_constructor_barrier_lock_);
// Are runtime access checks necessary in the compiled code?
- bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx)
+ bool CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class)
REQUIRES_SHARED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
// out_is_finalizable is set to whether the type is finalizable.
- bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx,
+ bool CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class,
bool* out_is_finalizable)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -233,27 +231,6 @@ class CompilerDriver {
ArtField* resolved_field, uint16_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
- // of the declaring class in the referrer's dex file.
- std::pair<bool, bool> IsFastStaticField(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtField* resolved_field,
- uint16_t field_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Return whether the declaring class of `resolved_method` is
- // available to `referrer_class`. If this is true, compute the type
- // index of the declaring class in the referrer's dex file and
- // return it through the out argument `storage_index`; otherwise
- // return DexFile::kDexNoIndex through `storage_index`.
- bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMethod* resolved_method,
- uint16_t method_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -379,21 +356,6 @@ class CompilerDriver {
}
private:
- // Return whether the declaring class of `resolved_member` is
- // available to `referrer_class` for read or write access using two
- // Boolean values returned as a pair. If is true at least for read
- // access, compute the type index of the declaring class in the
- // referrer's dex file and return it through the out argument
- // `storage_index`; otherwise return DexFile::kDexNoIndex through
- // `storage_index`.
- template <typename ArtMember>
- std::pair<bool, bool> IsClassOfStaticMemberAvailableToReferrer(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMember* resolved_member,
- uint16_t member_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
// mirror::Class::CanAccessResolvedMember depending on the value of
@@ -406,10 +368,6 @@ class CompilerDriver {
uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 1e4ca16844..e4b66ebc5a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -101,6 +101,7 @@ class CompilerDriverTest : public CommonCompilerTest {
};
// Disabled due to 10 second runtime on host
+// TODO: Update the test for hash-based dex cache arrays. Bug: 30627598
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
CompileAll(nullptr);
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index 47b19297e5..7e8e812c4a 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -21,7 +21,7 @@
namespace art {
-DexCompilationUnit::DexCompilationUnit(jobject class_loader,
+DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
const DexFile::CodeItem* code_item,
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 854927d747..24a9a5b653 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -34,7 +34,7 @@ class VerifiedMethod;
class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
public:
- DexCompilationUnit(jobject class_loader,
+ DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
const DexFile::CodeItem* code_item,
@@ -44,7 +44,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const VerifiedMethod* verified_method,
Handle<mirror::DexCache> dex_cache);
- jobject GetClassLoader() const {
+ Handle<mirror::ClassLoader> GetClassLoader() const {
return class_loader_;
}
@@ -113,7 +113,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
}
private:
- const jobject class_loader_;
+ const Handle<mirror::ClassLoader> class_loader_;
ClassLinker* const class_linker_;
@@ -125,7 +125,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const uint32_t access_flags_;
const VerifiedMethod* verified_method_;
- Handle<mirror::DexCache> dex_cache_;
+ const Handle<mirror::DexCache> dex_cache_;
std::string symbol_;
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c72edb18a3..3e9ae0834c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -940,9 +940,11 @@ void ImageWriter::PruneNonImageClasses() {
}
ObjPtr<mirror::DexCache> dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache();
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- Class* klass = dex_cache->GetResolvedType(dex::TypeIndex(i));
+ mirror::TypeDexCachePair pair =
+ dex_cache->GetResolvedTypes()[i].load(std::memory_order_relaxed);
+ mirror::Class* klass = pair.object.Read();
if (klass != nullptr && !KeepClass(klass)) {
- dex_cache->SetResolvedType(dex::TypeIndex(i), nullptr);
+ dex_cache->ClearResolvedType(dex::TypeIndex(pair.index));
}
}
ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
@@ -1922,8 +1924,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
// above comment for intern tables.
ClassTable temp_class_table;
temp_class_table.ReadFromMemory(class_table_memory_ptr);
- CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
- mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr;
+ ObjPtr<mirror::ClassLoader> class_loader = GetClassLoader();
CHECK_EQ(temp_class_table.NumZygoteClasses(class_loader),
table->NumNonZygoteClasses(class_loader) + table->NumZygoteClasses(class_loader));
UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown));
@@ -2213,7 +2214,7 @@ void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache,
orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
ImageAddressVisitor(this));
}
- GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
NativeLocationInImage(orig_types),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index cc7df1ce21..bdc7146632 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -51,8 +51,13 @@ class ImageSpace;
} // namespace space
} // namespace gc
+namespace mirror {
+class ClassLoader;
+} // namespace mirror
+
class ClassLoaderVisitor;
class ClassTable;
+class ImtConflictTable;
static constexpr int kInvalidFd = -1;
@@ -79,6 +84,11 @@ class ImageWriter FINAL {
return true;
}
+ ObjPtr<mirror::ClassLoader> GetClassLoader() {
+ CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
+ return compile_app_image_ ? *class_loaders_.begin() : nullptr;
+ }
+
template <typename T>
T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (object == nullptr || IsInBootImage(object)) {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 79e1785e91..9ddf200237 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -31,9 +31,7 @@ namespace linker {
namespace {
inline bool IsAdrpPatch(const LinkerPatch& patch) {
- LinkerPatch::Type type = patch.GetType();
- return
- (type == LinkerPatch::Type::kStringRelative || type == LinkerPatch::Type::kDexCacheArray) &&
+ return (patch.IsPcRelative() && patch.GetType() != LinkerPatch::Type::kCallRelative) &&
patch.LiteralOffset() == patch.PcInsnOffset();
}
@@ -214,11 +212,11 @@ void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
DCHECK(patch.GetType() == LinkerPatch::Type::kStringRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative) << patch.GetType();
} else {
- // With the read barrier (non-Baker) enabled, it could be kDexCacheArray in the
- // HLoadString::LoadKind::kDexCachePcRelative case of VisitLoadString().
+ // With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry.
DCHECK(patch.GetType() == LinkerPatch::Type::kStringRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative ||
- patch.GetType() == LinkerPatch::Type::kDexCacheArray) << patch.GetType();
+ patch.GetType() == LinkerPatch::Type::kStringBssEntry ||
+ patch.GetType() == LinkerPatch::Type::kTypeBssEntry) << patch.GetType();
}
shift = 0u; // No shift for ADD.
} else {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d5842a8c9d..66111f6e23 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -149,11 +149,10 @@ class OatTest : public CommonCompilerTest {
File* oat_file,
const std::vector<const char*>& dex_filenames,
SafeMap<std::string, std::string>& key_value_store,
- bool verify) {
+ bool verify,
+ ProfileCompilationInfo* profile_compilation_info) {
TimingLogger timings("WriteElf", false, false);
- OatWriter oat_writer(/*compiling_boot_image*/false,
- &timings,
- /*profile_compilation_info*/nullptr);
+ OatWriter oat_writer(/*compiling_boot_image*/false, &timings, profile_compilation_info);
for (const char* dex_filename : dex_filenames) {
if (!oat_writer.AddDexFileSource(dex_filename, dex_filename)) {
return false;
@@ -264,7 +263,7 @@ class OatTest : public CommonCompilerTest {
return true;
}
- void TestDexFileInput(bool verify, bool low_4gb);
+ void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
void TestZipFileInput(bool verify);
std::unique_ptr<const InstructionSetFeatures> insn_features_;
@@ -568,7 +567,7 @@ static void MaybeModifyDexFileToFail(bool verify, std::unique_ptr<const DexFile>
}
}
-void OatTest::TestDexFileInput(bool verify, bool low_4gb) {
+void OatTest::TestDexFileInput(bool verify, bool low_4gb, bool use_profile) {
TimingLogger timings("OatTest::DexFileInput", false, false);
std::vector<const char*> input_filenames;
@@ -606,11 +605,14 @@ void OatTest::TestDexFileInput(bool verify, bool low_4gb) {
ScratchFile oat_file, vdex_file(oat_file, ".vdex");
SafeMap<std::string, std::string> key_value_store;
key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
+ std::unique_ptr<ProfileCompilationInfo>
+ profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
success = WriteElf(vdex_file.GetFile(),
oat_file.GetFile(),
input_filenames,
key_value_store,
- verify);
+ verify,
+ profile_compilation_info.get());
// In verify mode, we expect failure.
if (verify) {
@@ -654,15 +656,19 @@ void OatTest::TestDexFileInput(bool verify, bool low_4gb) {
}
TEST_F(OatTest, DexFileInputCheckOutput) {
- TestDexFileInput(false, /*low_4gb*/false);
+ TestDexFileInput(/*verify*/false, /*low_4gb*/false, /*use_profile*/false);
}
TEST_F(OatTest, DexFileInputCheckOutputLow4GB) {
- TestDexFileInput(false, /*low_4gb*/true);
+ TestDexFileInput(/*verify*/false, /*low_4gb*/true, /*use_profile*/false);
}
TEST_F(OatTest, DexFileInputCheckVerifier) {
- TestDexFileInput(true, /*low_4gb*/false);
+ TestDexFileInput(/*verify*/true, /*low_4gb*/false, /*use_profile*/false);
+}
+
+TEST_F(OatTest, DexFileFailsVerifierWithLayout) {
+ TestDexFileInput(/*verify*/true, /*low_4gb*/false, /*use_profile*/true);
}
void OatTest::TestZipFileInput(bool verify) {
@@ -717,8 +723,8 @@ void OatTest::TestZipFileInput(bool verify) {
std::vector<const char*> input_filenames { zip_file.GetFilename().c_str() }; // NOLINT [readability/braces] [4]
ScratchFile oat_file, vdex_file(oat_file, ".vdex");
- success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(),
- input_filenames, key_value_store, verify);
+ success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
+ key_value_store, verify, /*profile_compilation_info*/nullptr);
if (verify) {
ASSERT_FALSE(success);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bd2c5e3bfc..0ea11255a8 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1060,6 +1060,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
WriteCodeMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
size_t relative_offset) SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
: OatDexMethodVisitor(writer, relative_offset),
+ class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
out_(out),
file_offset_(file_offset),
soa_(Thread::Current()),
@@ -1245,12 +1246,13 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
private:
+ ObjPtr<mirror::ClassLoader> class_loader_;
OutputStream* const out_;
const size_t file_offset_;
const ScopedObjectAccess soa_;
const ScopedAssertNoThreadSuspension no_thread_suspension_;
ClassLinker* const class_linker_;
- mirror::DexCache* dex_cache_;
+ ObjPtr<mirror::DexCache> dex_cache_;
std::vector<uint8_t> patched_code_;
void ReportWriteFailure(const char* what, const ClassDataItemIterator& it) {
@@ -1261,7 +1263,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
ArtMethod* GetTargetMethod(const LinkerPatch& patch)
REQUIRES_SHARED(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
- mirror::DexCache* dex_cache =
+ ObjPtr<mirror::DexCache> dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
Thread::Current(), *ref.dex_file);
ArtMethod* method = dex_cache->GetResolvedMethod(
@@ -1295,7 +1297,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return target_offset;
}
- mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
+ ObjPtr<mirror::DexCache> GetDexCache(const DexFile* target_dex_file)
REQUIRES_SHARED(Locks::mutator_lock_) {
return (target_dex_file == dex_file_)
? dex_cache_
@@ -1303,10 +1305,12 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
- mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
+ DCHECK(writer_->HasImage());
+ ObjPtr<mirror::DexCache> dex_cache = GetDexCache(patch.TargetTypeDexFile());
+ ObjPtr<mirror::Class> type =
+ ClassLinker::LookupResolvedType(patch.TargetTypeIndex(), dex_cache, class_loader_);
CHECK(type != nullptr);
- return type;
+ return type.Ptr();
}
mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2266,6 +2270,10 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
File* raw_file = oat_dex_file->source_.GetRawFile();
dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg);
}
+ if (dex_file == nullptr) {
+ LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
+ return false;
+ }
Options options;
options.output_to_memmap_ = true;
DexLayout dex_layout(options, profile_compilation_info_, nullptr);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 8cf4089eba..3a4c9dbd16 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -32,6 +32,8 @@
namespace art {
+class CodeGenerator;
+
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(HGraph* graph,
@@ -40,6 +42,7 @@ class HGraphBuilder : public ValueObject {
const DexFile* dex_file,
const DexFile::CodeItem& code_item,
CompilerDriver* driver,
+ CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
Handle<mirror::DexCache> dex_cache,
@@ -51,7 +54,10 @@ class HGraphBuilder : public ValueObject {
compiler_driver_(driver),
compilation_stats_(compiler_stats),
block_builder_(graph, dex_file, code_item),
- ssa_builder_(graph, dex_compilation_unit->GetDexCache(), handles),
+ ssa_builder_(graph,
+ dex_compilation_unit->GetClassLoader(),
+ dex_compilation_unit->GetDexCache(),
+ handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -61,6 +67,7 @@ class HGraphBuilder : public ValueObject {
dex_compilation_unit,
outer_compilation_unit,
driver,
+ code_generator,
interpreter_metadata,
compiler_stats,
dex_cache,
@@ -76,10 +83,12 @@ class HGraphBuilder : public ValueObject {
code_item_(code_item),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
- null_dex_cache_(),
compilation_stats_(nullptr),
block_builder_(graph, nullptr, code_item),
- ssa_builder_(graph, null_dex_cache_, handles),
+ ssa_builder_(graph,
+ handles->NewHandle<mirror::ClassLoader>(nullptr),
+ handles->NewHandle<mirror::DexCache>(nullptr),
+ handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -89,9 +98,10 @@ class HGraphBuilder : public ValueObject {
/* dex_compilation_unit */ nullptr,
/* outer_compilation_unit */ nullptr,
/* compiler_driver */ nullptr,
+ /* code_generator */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- null_dex_cache_,
+ handles->NewHandle<mirror::DexCache>(nullptr),
handles) {}
GraphAnalysisResult BuildGraph();
@@ -112,8 +122,6 @@ class HGraphBuilder : public ValueObject {
CompilerDriver* const compiler_driver_;
- ScopedNullHandle<mirror::DexCache> null_dex_cache_;
-
OptimizingCompilerStats* compilation_stats_;
HBasicBlockBuilder block_builder_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f5b6ebef9c..759a951d6b 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -367,22 +367,37 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
class LoadClassSlowPathARM : public SlowPathCodeARM {
public:
- LoadClassSlowPathARM(HLoadClass* cls,
- HInstruction* at,
- uint32_t dex_pc,
- bool do_clinit)
+ LoadClassSlowPathARM(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
: SlowPathCodeARM(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location out = locations->Out();
+ constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
+ // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ bool is_load_class_bss_entry =
+ (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
+ Register entry_address = kNoRegister;
+ if (is_load_class_bss_entry && call_saves_everything_except_r0) {
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
+ // the kSaveEverything call.
+ bool temp_is_r0 = (temp == calling_convention.GetRegisterAt(0));
+ entry_address = temp_is_r0 ? out.AsRegister<Register>() : temp;
+ DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
+ if (temp_is_r0) {
+ __ mov(entry_address, ShifterOperand(temp));
+ }
+ }
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadImmediate(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -394,30 +409,31 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ if (is_load_class_bss_entry) {
+ if (call_saves_everything_except_r0) {
+ // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
+ __ str(R0, Address(entry_address));
+ } else {
+ // For non-Baker read barrier, we need to re-calculate the address of the string entry.
+ Register temp = IP;
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(temp, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(temp, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(temp, temp, ShifterOperand(PC));
+ __ str(R0, Address(temp));
+ }
+ }
// Move the class to the desired location.
- Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
- // kSaveEverything and use a temporary for the .bss entry address in the fast path,
- // so that we can avoid another calculation here.
- CodeGeneratorARM::PcRelativePatchInfo* labels =
- arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- __ BindTrackedLabel(&labels->movw_label);
- __ movw(IP, /* placeholder */ 0u);
- __ BindTrackedLabel(&labels->movt_label);
- __ movt(IP, /* placeholder */ 0u);
- __ BindTrackedLabel(&labels->add_pc_label);
- __ add(IP, IP, ShifterOperand(PC));
- __ str(locations->Out().AsRegister<Register>(), Address(IP));
- }
__ b(GetExitLabel());
}
@@ -441,12 +457,13 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
explicit LoadStringSlowPathARM(HLoadString* instruction) : SlowPathCodeARM(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ DCHECK(instruction_->IsLoadString());
+ DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
const dex::StringIndex string_index = load->GetStringIndex();
Register out = locations->Out().AsRegister<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -455,12 +472,16 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
InvokeRuntimeCallingConvention calling_convention;
// In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call (or use `out` for the address after non-kSaveEverything call).
- bool temp_is_r0 = (temp == calling_convention.GetRegisterAt(0));
- Register entry_address = temp_is_r0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (call_saves_everything_except_r0 && temp_is_r0) {
- __ mov(entry_address, ShifterOperand(temp));
+ // the kSaveEverything call.
+ Register entry_address = kNoRegister;
+ if (call_saves_everything_except_r0) {
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ bool temp_is_r0 = (temp == calling_convention.GetRegisterAt(0));
+ entry_address = temp_is_r0 ? out : temp;
+ DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
+ if (temp_is_r0) {
+ __ mov(entry_address, ShifterOperand(temp));
+ }
}
__ LoadImmediate(calling_convention.GetRegisterAt(0), string_index.index_);
@@ -473,15 +494,16 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
__ str(R0, Address(entry_address));
} else {
// For non-Baker read barrier, we need to re-calculate the address of the string entry.
+ Register temp = IP;
CodeGeneratorARM::PcRelativePatchInfo* labels =
arm_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
__ BindTrackedLabel(&labels->movw_label);
- __ movw(entry_address, /* placeholder */ 0u);
+ __ movw(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
- __ movt(entry_address, /* placeholder */ 0u);
+ __ movt(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->add_pc_label);
- __ add(entry_address, entry_address, ShifterOperand(PC));
- __ str(R0, Address(entry_address));
+ __ add(temp, temp, ShifterOperand(PC));
+ __ str(R0, Address(temp));
}
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
@@ -624,6 +646,10 @@ class ArraySetSlowPathARM : public SlowPathCodeARM {
// probably still be a from-space reference (unless it gets updated by
// another thread, or if another thread installed another object
// reference (different from `ref`) in `obj.field`).
+//
+// If `entrypoint` is a valid location it is assumed to already be
+// holding the entrypoint. The case where the entrypoint is passed in
+// is for the GcRoot read barrier.
class ReadBarrierMarkSlowPathARM : public SlowPathCodeARM {
public:
ReadBarrierMarkSlowPathARM(HInstruction* instruction,
@@ -3993,8 +4019,11 @@ void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -5719,6 +5748,9 @@ void ParallelMoveResolverARM::RestoreScratch(int reg) {
HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5749,6 +5781,7 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Location::RegisterLocation(R0));
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), R0);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -5766,6 +5799,22 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
+ if (load_kind == HLoadClass::LoadKind::kBssEntry) {
+ if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ // Rely on the type resolution or initialization and marking to save everything we need.
+ // Note that IP may be clobbered by saving/restoring the live register (only one thanks
+ // to the custom calling convention) or by marking, so we request a different temp.
+ locations->AddTemp(Location::RequiresRegister());
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+ // that the the kPrimNot result register is the same as the first argument register.
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
+ } else {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ }
+ }
}
// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
@@ -5828,15 +5877,18 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
break;
}
case HLoadClass::LoadKind::kBssEntry: {
+ Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
+ ? locations->GetTemp(0).AsRegister<Register>()
+ : out;
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
__ BindTrackedLabel(&labels->movw_label);
- __ movw(out, /* placeholder */ 0u);
+ __ movw(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
- __ movt(out, /* placeholder */ 0u);
+ __ movt(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->add_pc_label);
- __ add(out, out, ShifterOperand(PC));
- GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ __ add(temp, temp, ShifterOperand(PC));
+ GenerateGcRootFieldLoad(cls, out_loc, temp, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -5845,10 +5897,11 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -5931,9 +5984,9 @@ void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // Rely on the pResolveString and/or marking to save everything, including temps.
- // Note that IP may theoretically be clobbered by saving/restoring the live register
- // (only one thanks to the custom calling convention), so we request a different temp.
+ // Rely on the pResolveString and marking to save everything we need, including temps.
+ // Note that IP may be clobbered by saving/restoring the live register (only one thanks
+ // to the custom calling convention) or by marking, so we request a different temp.
locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
@@ -5984,7 +6037,9 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_S
}
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
+ ? locations->GetTemp(0).AsRegister<Register>()
+ : out;
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 26c8254c76..e6032d2381 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -34,6 +34,9 @@
#include "utils/stack_checks.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+using vixl::EmissionCheckScope;
#ifdef __
#error "ARM64 Codegen VIXL macro-assembler macro already defined."
@@ -275,15 +278,37 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
LoadClassSlowPathARM64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit)
- : SlowPathCodeARM64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ bool do_clinit,
+ vixl::aarch64::Register bss_entry_temp = vixl::aarch64::Register(),
+ vixl::aarch64::Label* bss_entry_adrp_label = nullptr)
+ : SlowPathCodeARM64(at),
+ cls_(cls),
+ dex_pc_(dex_pc),
+ do_clinit_(do_clinit),
+ bss_entry_temp_(bss_entry_temp),
+ bss_entry_adrp_label_(bss_entry_adrp_label) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location out = locations->Out();
+ constexpr bool call_saves_everything_except_r0_ip0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the page address of
+ // the entry which is in a scratch register. Make sure it's not used for saving/restoring
+ // registers. Exclude the scratch register also for non-Baker read barrier for simplicity.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ bool is_load_class_bss_entry =
+ (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
+ UseScratchRegisterScope temps(arm64_codegen->GetVIXLAssembler());
+ if (is_load_class_bss_entry) {
+ // This temp is a scratch register.
+ DCHECK(bss_entry_temp_.IsValid());
+ temps.Exclude(bss_entry_temp_);
+ }
+
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -300,7 +325,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
}
// Move the class to the desired location.
- Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
Primitive::Type type = instruction_->GetType();
@@ -308,25 +332,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
}
RestoreLiveRegisters(codegen, locations);
// For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ if (is_load_class_bss_entry) {
DCHECK(out.IsValid());
- UseScratchRegisterScope temps(arm64_codegen->GetVIXLAssembler());
- Register temp = temps.AcquireX();
const DexFile& dex_file = cls_->GetDexFile();
- // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
- // kSaveEverything and use a temporary for the ADRP in the fast path, so that we
- // can avoid the ADRP here.
- vixl::aarch64::Label* adrp_label =
- arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
- arm64_codegen->EmitAdrpPlaceholder(adrp_label, temp);
+ if (call_saves_everything_except_r0_ip0) {
+ // The class entry page address was preserved in bss_entry_temp_ thanks to kSaveEverything.
+ } else {
+ // For non-Baker read barrier, we need to re-calculate the address of the class entry page.
+ bss_entry_adrp_label_ = arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
+ arm64_codegen->EmitAdrpPlaceholder(bss_entry_adrp_label_, bss_entry_temp_);
+ }
vixl::aarch64::Label* strp_label =
- arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label_);
{
SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
__ Bind(strp_label);
__ str(RegisterFrom(locations->Out(), Primitive::kPrimNot),
- MemOperand(temp, /* offset placeholder */ 0));
+ MemOperand(bss_entry_temp_, /* offset placeholder */ 0));
}
}
__ B(GetExitLabel());
@@ -344,6 +366,10 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// Whether to initialize the class.
const bool do_clinit_;
+ // For HLoadClass/kBssEntry, the temp register and the label of the ADRP where it was loaded.
+ vixl::aarch64::Register bss_entry_temp_;
+ vixl::aarch64::Label* bss_entry_adrp_label_;
+
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
@@ -590,10 +616,9 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
// We are about to use the assembler to place literals directly. Make sure we have enough
// underlying code buffer and we have generated the jump table with right size.
- vixl::CodeBufferCheckScope scope(codegen->GetVIXLAssembler(),
- num_entries * sizeof(int32_t),
- vixl::CodeBufferCheckScope::kReserveBufferSpace,
- vixl::CodeBufferCheckScope::kExactSize);
+ EmissionCheckScope scope(codegen->GetVIXLAssembler(),
+ num_entries * sizeof(int32_t),
+ CodeBufferCheckScope::kExactSize);
__ Bind(&table_start_);
const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
@@ -619,8 +644,10 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
// probably still be a from-space reference (unless it gets updated by
// another thread, or if another thread installed another object
// reference (different from `ref`) in `obj.field`).
-// If entrypoint is a valid location it is assumed to already be holding the entrypoint. The case
-// where the entrypoint is passed in is for the GcRoot read barrier.
+//
+// If `entrypoint` is a valid location it is assumed to already be
+// holding the entrypoint. The case where the entrypoint is passed in
+// is for the GcRoot read barrier.
class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
public:
ReadBarrierMarkSlowPathARM64(HInstruction* instruction,
@@ -1254,7 +1281,6 @@ void ParallelMoveResolverARM64::EmitMove(size_t index) {
void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
__ Bind(&frame_entry_label_);
bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
@@ -1263,8 +1289,14 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
Register temp = temps.AcquireX();
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
- __ Ldr(wzr, MemOperand(temp, 0));
- RecordPcInfo(nullptr, 0);
+ {
+ // Ensure that between load and RecordPcInfo there are no pools emitted.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ __ ldr(wzr, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+ }
}
if (!HasEmptyFrame()) {
@@ -1299,7 +1331,6 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
}
void CodeGeneratorARM64::GenerateFrameExit() {
- BlockPoolsScope block_pools(GetVIXLAssembler());
GetAssembler()->cfi().RememberState();
if (!HasEmptyFrame()) {
int frame_size = GetFrameSize();
@@ -1626,7 +1657,6 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
const MemOperand& src,
bool needs_null_check) {
MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
UseScratchRegisterScope temps(masm);
Register temp_base = temps.AcquireX();
Primitive::Type type = instruction->GetType();
@@ -1636,58 +1666,79 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
// TODO(vixl): Let the MacroAssembler handle MemOperand.
__ Add(temp_base, src.GetBaseRegister(), OperandFromMemOperand(src));
- MemOperand base = MemOperand(temp_base);
- switch (type) {
- case Primitive::kPrimBoolean:
- __ Ldarb(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- break;
- case Primitive::kPrimByte:
- __ Ldarb(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
- break;
- case Primitive::kPrimChar:
- __ Ldarh(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- break;
- case Primitive::kPrimShort:
- __ Ldarh(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
- break;
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong:
- DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
- __ Ldar(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- DCHECK(dst.IsFPRegister());
- DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
-
- Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
- __ Ldar(temp, base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ MemOperand base = MemOperand(temp_base);
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarb(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ break;
+ case Primitive::kPrimByte:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarb(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
+ break;
+ case Primitive::kPrimChar:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarh(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ break;
+ case Primitive::kPrimShort:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarh(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldar(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ DCHECK(dst.IsFPRegister());
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
+
+ Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldar(temp, base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ __ Fmov(FPRegister(dst), temp);
+ break;
}
- __ Fmov(FPRegister(dst), temp);
- break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
}
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
}
}
@@ -1716,9 +1767,12 @@ void CodeGeneratorARM64::Store(Primitive::Type type,
}
}
-void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
+void CodeGeneratorARM64::StoreRelease(HInstruction* instruction,
+ Primitive::Type type,
CPURegister src,
- const MemOperand& dst) {
+ const MemOperand& dst,
+ bool needs_null_check) {
+ MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp_base = temps.AcquireX();
@@ -1729,20 +1783,39 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
Operand op = OperandFromMemOperand(dst);
__ Add(temp_base, dst.GetBaseRegister(), op);
MemOperand base = MemOperand(temp_base);
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- __ Stlrb(Register(src), base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlrb(Register(src), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- __ Stlrh(Register(src), base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlrh(Register(src), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
- __ Stlr(Register(src), base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlr(Register(src), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
@@ -1756,8 +1829,13 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
temp_src = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
__ Fmov(temp_src, FPRegister(src));
}
-
- __ Stlr(temp_src, base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlr(temp_src, base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
}
case Primitive::kPrimVoid:
@@ -1770,9 +1848,15 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
- GenerateInvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value());
- if (EntrypointRequiresStackMap(entrypoint)) {
- RecordPcInfo(instruction, dex_pc, slow_path);
+
+ __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value()));
+ {
+ // Ensure the pc position is recorded immediately after the `blr` instruction.
+ ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ blr(lr);
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
}
@@ -1780,11 +1864,6 @@ void CodeGeneratorARM64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point
HInstruction* instruction,
SlowPathCode* slow_path) {
ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
- GenerateInvokeRuntime(entry_point_offset);
-}
-
-void CodeGeneratorARM64::GenerateInvokeRuntime(int32_t entry_point_offset) {
- BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
}
@@ -1951,7 +2030,6 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
Location out = locations->Out();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
Primitive::Type field_type = field_info.GetFieldType();
- BlockPoolsScope block_pools(GetVIXLAssembler());
MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -1978,6 +2056,8 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
codegen_->LoadAcquire(
instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
} else {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
codegen_->Load(field_type, OutputCPURegister(instruction), field);
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -2007,7 +2087,6 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info,
bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
- BlockPoolsScope block_pools(GetVIXLAssembler());
Register obj = InputRegisterAt(instruction, 0);
CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 1);
@@ -2029,9 +2108,11 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
}
if (field_info.IsVolatile()) {
- codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ codegen_->StoreRelease(
+ instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
} else {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
codegen_->Store(field_type, source, HeapOperand(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -2317,10 +2398,7 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate*
masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
if (prev->IsLoadOrStore()) {
// Make sure we emit only exactly one nop.
- vixl::CodeBufferCheckScope scope(masm,
- kInstructionSize,
- vixl::CodeBufferCheckScope::kReserveBufferSpace,
- vixl::CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
__ nop();
}
}
@@ -2376,8 +2454,6 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
instruction->IsStringCharAt();
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
- // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
- BlockPoolsScope block_pools(masm);
// The read barrier instrumentation of object ArrayGet instructions
// does not support the HIntermediateAddress instruction.
@@ -2399,15 +2475,21 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
if (maybe_compressed_char_at) {
uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
length = temps.AcquireW();
- if (instruction->GetArray()->IsIntermediateAddress()) {
- DCHECK_LT(count_offset, offset);
- int64_t adjusted_offset = static_cast<int64_t>(count_offset) - static_cast<int64_t>(offset);
- // Note that `adjusted_offset` is negative, so this will be a LDUR.
- __ Ldr(length, MemOperand(obj.X(), adjusted_offset));
- } else {
- __ Ldr(length, HeapOperand(obj, count_offset));
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+
+ if (instruction->GetArray()->IsIntermediateAddress()) {
+ DCHECK_LT(count_offset, offset);
+ int64_t adjusted_offset =
+ static_cast<int64_t>(count_offset) - static_cast<int64_t>(offset);
+ // Note that `adjusted_offset` is negative, so this will be a LDUR.
+ __ Ldr(length, MemOperand(obj.X(), adjusted_offset));
+ } else {
+ __ Ldr(length, HeapOperand(obj, count_offset));
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (index.IsConstant()) {
if (maybe_compressed_char_at) {
@@ -2457,6 +2539,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
}
}
if (!maybe_compressed_char_at) {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
codegen_->Load(type, OutputCPURegister(instruction), source);
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -2484,9 +2568,12 @@ void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
vixl::aarch64::Register out = OutputRegister(instruction);
- BlockPoolsScope block_pools(GetVIXLAssembler());
- __ Ldr(out, HeapOperand(InputRegisterAt(instruction, 0), offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Ldr(out, HeapOperand(InputRegisterAt(instruction, 0), offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
// Mask out compression flag from String's array length.
if (mirror::kUseStringCompression && instruction->IsStringLength()) {
__ Lsr(out.W(), out.W(), 1u);
@@ -2527,7 +2614,6 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
MemOperand destination = HeapOperand(array);
MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
if (!needs_write_barrier) {
DCHECK(!may_need_runtime_call_for_type_check);
@@ -2554,8 +2640,12 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
LSL,
Primitive::ComponentSizeShift(value_type));
}
- codegen_->Store(value_type, value, destination);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ codegen_->Store(value_type, value, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
} else {
DCHECK(!instruction->GetArray()->IsIntermediateAddress());
vixl::aarch64::Label done;
@@ -2588,8 +2678,13 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
if (!index.IsConstant()) {
__ Add(temp, array, offset);
}
- __ Str(wzr, destination);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools
+ // emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Str(wzr, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
__ B(&done);
__ Bind(&non_zero);
}
@@ -2604,8 +2699,12 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
Register temp2 = temps.AcquireSameSizeAs(array);
// /* HeapReference<Class> */ temp = array->klass_
- __ Ldr(temp, HeapOperand(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Ldr(temp, HeapOperand(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
GetAssembler()->MaybeUnpoisonHeapReference(temp);
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -2646,10 +2745,14 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
if (!index.IsConstant()) {
__ Add(temp, array, offset);
}
- __ Str(source, destination);
+ {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Str(source, destination);
- if (!may_need_runtime_call_for_type_check) {
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (!may_need_runtime_call_for_type_check) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
}
@@ -3944,19 +4047,25 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope scratch_scope(masm);
- BlockPoolsScope block_pools(masm);
scratch_scope.Exclude(ip1);
__ Mov(ip1, invoke->GetDexMethodIndex());
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
if (receiver.IsStackSlot()) {
__ Ldr(temp.W(), StackOperandFrom(receiver));
- // /* HeapReference<Class> */ temp = temp->klass_
- __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
+ {
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // /* HeapReference<Class> */ temp = temp->klass_
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ }
} else {
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
// /* HeapReference<Class> */ temp = receiver->klass_
__ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
}
- codegen_->MaybeRecordImplicitNullCheck(invoke);
+
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// However this is not required in practice, as this is an
@@ -3973,10 +4082,16 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
__ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
- // lr();
- __ Blr(lr);
- DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+
+ {
+ // Ensure the pc position is recorded immediately after the `blr` instruction.
+ ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+
+ // lr();
+ __ blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ }
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4088,8 +4203,16 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
- // lr()
- __ Blr(lr);
+ {
+ // To ensure that the pc position is recorded immediately after the `blr` instruction
+ // BLR must be the last instruction emitted in this function.
+ // Recording the pc will occur right after returning from this function.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ // lr()
+ __ blr(lr);
+ }
break;
}
@@ -4109,12 +4232,15 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
Offset class_offset = mirror::Object::ClassOffset();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
- BlockPoolsScope block_pools(GetVIXLAssembler());
-
DCHECK(receiver.IsRegister());
- // /* HeapReference<Class> */ temp = receiver->klass_
- __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset));
- MaybeRecordImplicitNullCheck(invoke);
+
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // /* HeapReference<Class> */ temp = receiver->klass_
+ __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset));
+ MaybeRecordImplicitNullCheck(invoke);
+ }
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// intermediate/temporary reference and because the current
@@ -4126,8 +4252,14 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
__ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
- // lr();
- __ Blr(lr);
+ {
+ // To ensure that the pc position is recorded immediately after the `blr` instruction
+ // BLR should be the last instruction emitted in this function.
+ // Recording the pc will occur right after returning from this function.
+ ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+ // lr();
+ __ blr(lr);
+ }
}
void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -4340,7 +4472,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
return;
}
- BlockPoolsScope block_pools(GetVIXLAssembler());
+ // Ensure that between the BLR (emitted by GenerateStaticOrDirectCall) and RecordPcInfo there
+ // are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
@@ -4352,6 +4486,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
return;
}
+ // Ensure that between the BLR (emitted by GenerateVirtualCall) and RecordPcInfo there
+ // are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -4360,6 +4497,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -4390,6 +4530,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
LocationFrom(vixl::aarch64::x0));
+ DCHECK(calling_convention.GetRegisterAt(0).Is(vixl::aarch64::x0));
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -4407,6 +4548,22 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
+ if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ // Rely on the type resolution or initialization and marking to save everything we need.
+ // Note that IP0 may be clobbered by saving/restoring the live register (only one thanks
+ // to the custom calling convention) or by marking, so we shall use IP1.
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+ DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
+ RegisterFrom(calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Primitive::kPrimNot).GetCode());
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
+ } else {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ }
+ }
}
// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
@@ -4421,6 +4578,8 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
+ Register bss_entry_temp;
+ vixl::aarch64::Label* bss_entry_adrp_label = nullptr;
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
@@ -4470,18 +4629,23 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
// Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
dex::TypeIndex type_index = cls->GetTypeIndex();
- vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
- codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
+ // We can go to slow path even with non-zero reference and in that case marking
+ // can clobber IP0, so we need to use IP1 which shall be preserved.
+ bss_entry_temp = ip1;
+ UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
+ temps.Exclude(bss_entry_temp);
+ bss_entry_adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
+ codegen_->EmitAdrpPlaceholder(bss_entry_adrp_label, bss_entry_temp);
// Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
- cls->GetLocations()->Out(),
- out.X(),
- /* placeholder */ 0u,
+ out_loc,
+ bss_entry_temp,
+ /* offset placeholder */ 0u,
ldr_label,
- kCompilerReadBarrierOption);
+ read_barrier_option);
generate_null_check = true;
break;
}
@@ -4494,18 +4658,20 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
out.X(),
/* offset */ 0,
/* fixup_label */ nullptr,
- kCompilerReadBarrierOption);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
- if (generate_null_check || cls->MustGenerateClinitCheck()) {
+ bool do_clinit = cls->MustGenerateClinitCheck();
+ if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Cbz(out, slow_path->GetEntryLabel());
@@ -4573,7 +4739,9 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
locations->SetOut(Location::RequiresRegister());
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // Rely on the pResolveString and/or marking to save everything, including temps.
+ // Rely on the pResolveString and marking to save everything we need.
+ // Note that IP0 may be clobbered by saving/restoring the live register (only one thanks
+ // to the custom calling convention) or by marking, so we shall use IP1.
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -4624,8 +4792,11 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
const DexFile& dex_file = load->GetDexFile();
const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+ // We could use IP0 as the marking shall not clobber IP0 if the reference is null and
+ // that's when we need the slow path. But let's not rely on such details and use IP1.
+ Register temp = ip1;
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
- Register temp = temps.AcquireX();
+ temps.Exclude(temp);
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its PC-relative String patch.
@@ -4813,8 +4984,15 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
__ Ldr(XRegisterFrom(temp), MemOperand(tr, QUICK_ENTRY_POINT(pNewEmptyString)));
__ Ldr(lr, MemOperand(XRegisterFrom(temp), code_offset.Int32Value()));
- __ Blr(lr);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+
+ {
+ // Ensure the pc position is recorded immediately after the `blr` instruction.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ __ blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ }
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
@@ -4858,11 +5036,13 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
if (CanMoveNullCheckToUser(instruction)) {
return;
}
-
- BlockPoolsScope block_pools(GetVIXLAssembler());
- Location obj = instruction->GetLocations()->InAt(0);
- __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
- RecordPcInfo(instruction, instruction->GetDexPc());
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ Location obj = instruction->GetLocations()->InAt(0);
+ __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
+ RecordPcInfo(instruction, instruction->GetDexPc());
+ }
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
@@ -5599,10 +5779,14 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
DCHECK(obj.IsW());
uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- // /* int32_t */ monitor = obj->monitor_
- __ Ldr(temp, HeapOperand(obj, monitor_offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // /* int32_t */ monitor = obj->monitor_
+ __ Ldr(temp, HeapOperand(obj, monitor_offset));
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
}
// /* LockWord */ lock_word = LockWord(monitor)
static_assert(sizeof(LockWord) == sizeof(int32_t),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f6cb90a63a..5faf29a90f 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -43,6 +43,11 @@ class CodeGeneratorARM64;
// Use a local definition to prevent copying mistakes.
static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
+// These constants are used as an approximate margin when emission of veneer and literal pools
+// must be blocked.
+static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize;
+static constexpr int kInvokeCodeMarginSizeInBytes = 6 * kMaxMacroInstructionSizeInBytes;
+
static const vixl::aarch64::Register kParameterCoreRegisters[] = {
vixl::aarch64::x1,
vixl::aarch64::x2,
@@ -486,9 +491,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::CPURegister dst,
const vixl::aarch64::MemOperand& src,
bool needs_null_check);
- void StoreRelease(Primitive::Type type,
+ void StoreRelease(HInstruction* instruction,
+ Primitive::Type type,
vixl::aarch64::CPURegister src,
- const vixl::aarch64::MemOperand& dst);
+ const vixl::aarch64::MemOperand& dst,
+ bool needs_null_check);
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -502,8 +509,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
HInstruction* instruction,
SlowPathCode* slow_path);
- void GenerateInvokeRuntime(int32_t entry_point_offset);
-
ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f4d3ec54ee..5c4ca5bc17 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -400,12 +400,30 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location out = locations->Out();
+ constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ bool is_load_class_bss_entry =
+ (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
+ vixl32::Register entry_address;
+ if (is_load_class_bss_entry && call_saves_everything_except_r0) {
+ vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+ // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
+ // the kSaveEverything call.
+ bool temp_is_r0 = temp.Is(calling_convention.GetRegisterAt(0));
+ entry_address = temp_is_r0 ? RegisterFrom(out) : temp;
+ DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
+ if (temp_is_r0) {
+ __ Mov(entry_address, temp);
+ }
+ }
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -417,27 +435,28 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ if (is_load_class_bss_entry) {
+ if (call_saves_everything_except_r0) {
+ // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
+ __ Str(r0, MemOperand(entry_address));
+ } else {
+ // For non-Baker read barrier, we need to re-calculate the address of the string entry.
+ UseScratchRegisterScope temps(
+ down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
+ __ Str(r0, MemOperand(temp));
+ }
+ }
// Move the class to the desired location.
- Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
- // kSaveEverything and use a temporary for the .bss entry address in the fast path,
- // so that we can avoid another calculation here.
- UseScratchRegisterScope temps(down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(OutputRegister(cls_), MemOperand(temp));
- }
__ B(GetExitLabel());
}
@@ -462,12 +481,13 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
: SlowPathCodeARMVIXL(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ DCHECK(instruction_->IsLoadString());
+ DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
const dex::StringIndex string_index = load->GetStringIndex();
vixl32::Register out = OutputRegister(load);
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
@@ -476,12 +496,16 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
// In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call (or use `out` for the address after non-kSaveEverything call).
- bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
- vixl32::Register entry_address = temp_is_r0 ? out : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (call_saves_everything_except_r0 && temp_is_r0) {
- __ Mov(entry_address, temp);
+ // the kSaveEverything call.
+ vixl32::Register entry_address;
+ if (call_saves_everything_except_r0) {
+ vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+ bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
+ entry_address = temp_is_r0 ? out : temp;
+ DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
+ if (temp_is_r0) {
+ __ Mov(entry_address, temp);
+ }
}
__ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
@@ -494,10 +518,13 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ Str(r0, MemOperand(entry_address));
} else {
// For non-Baker read barrier, we need to re-calculate the address of the string entry.
+ UseScratchRegisterScope temps(
+ down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
arm_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, out);
- __ Str(r0, MemOperand(entry_address));
+ arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
+ __ Str(r0, MemOperand(temp));
}
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
@@ -4005,8 +4032,11 @@ void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5796,6 +5826,9 @@ void ParallelMoveResolverARMVIXL::RestoreScratch(int reg ATTRIBUTE_UNUSED) {
HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5826,6 +5859,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
LocationFrom(r0));
+ DCHECK(calling_convention.GetRegisterAt(0).Is(r0));
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -5843,6 +5877,22 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
+ if (load_kind == HLoadClass::LoadKind::kBssEntry) {
+ if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ // Rely on the type resolution or initialization and marking to save everything we need.
+ // Note that IP may be clobbered by saving/restoring the live register (only one thanks
+ // to the custom calling convention) or by marking, so we request a different temp.
+ locations->AddTemp(Location::RequiresRegister());
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+ // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+ // that the the kPrimNot result register is the same as the first argument register.
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
+ } else {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ }
+ }
}
// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
@@ -5900,10 +5950,13 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
break;
}
case HLoadClass::LoadKind::kBssEntry: {
+ vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
+ ? RegisterFrom(locations->GetTemp(0))
+ : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, out);
- GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ codegen_->EmitMovwMovtPlaceholder(labels, temp);
+ GenerateGcRootFieldLoad(cls, out_loc, temp, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -5912,10 +5965,11 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -6005,9 +6059,9 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // Rely on the pResolveString and/or marking to save everything, including temps.
- // Note that IP may theoretically be clobbered by saving/restoring the live register
- // (only one thanks to the custom calling convention), so we request a different temp.
+ // Rely on the pResolveString and marking to save everything we need, including temps.
+ // Note that IP may be clobbered by saving/restoring the live register (only one thanks
+ // to the custom calling convention) or by marking, so we request a different temp.
locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -6052,7 +6106,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
}
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+ vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
+ ? RegisterFrom(locations->GetTemp(0))
+ : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, temp);
@@ -7253,8 +7309,7 @@ vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
- if (slow_path->IsCoreRegisterSaved(RegisterFrom(location).GetCode())) {
+ if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(RegisterFrom(location).GetCode())) {
int stack_offset = slow_path->GetStackOffsetOfCoreRegister(RegisterFrom(location).GetCode());
GetAssembler()->LoadFromOffset(kLoadWord, temp, sp, stack_offset);
return temp;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index a095970a1e..c9dde7cc55 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -484,6 +484,8 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
@@ -704,9 +706,6 @@ bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const {
// (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
// into the path that creates a stack frame so that RA can be explicitly saved and restored.
// RA can't otherwise be saved/restored when it's the only spilled register.
- // TODO: Can this be improved? It causes creation of a stack frame (while RA might be
- // saved in an unused temporary register) and saving of RA and the current method pointer
- // in the frame.
return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
}
@@ -1160,6 +1159,67 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
// offset to `out` (e.g. lw, jialc, addiu).
}
+CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
+ const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ return &jit_string_patches_.back();
+}
+
+CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
+ const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ return &jit_class_patches_.back();
+}
+
+void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const CodeGeneratorMIPS::JitPatchInfo& info,
+ uint64_t index_in_table) const {
+ uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
+ // lui reg, addr32_high
+ DCHECK_EQ(code[literal_offset + 0], 0x34);
+ DCHECK_EQ(code[literal_offset + 1], 0x12);
+ DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00);
+ DCHECK_EQ(code[literal_offset + 3], 0x3C);
+ // lw reg, reg, addr32_low
+ DCHECK_EQ(code[literal_offset + 4], 0x78);
+ DCHECK_EQ(code[literal_offset + 5], 0x56);
+ DCHECK_EQ((code[literal_offset + 7] & 0xFC), 0x8C);
+ addr32 += (addr32 & 0x8000) << 1; // Account for sign extension in "lw reg, reg, addr32_low".
+ // lui reg, addr32_high
+ code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
+ code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
+ // lw reg, reg, addr32_low
+ code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0);
+ code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8);
+}
+
+void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
+ for (const JitPatchInfo& info : jit_string_patches_) {
+ const auto& it = jit_string_roots_.find(StringReference(&info.target_dex_file,
+ dex::StringIndex(info.index)));
+ DCHECK(it != jit_string_roots_.end());
+ PatchJitRootUse(code, roots_data, info, it->second);
+ }
+ for (const JitPatchInfo& info : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
+ dex::TypeIndex(info.index)));
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, info, it->second);
+ }
+}
+
void CodeGeneratorMIPS::MarkGCCard(Register object,
Register value,
bool value_can_be_null) {
@@ -1854,6 +1914,8 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
auto null_checker = GetImplicitNullChecker(instruction);
Primitive::Type type = instruction->GetType();
+ const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+ instruction->IsStringCharAt();
switch (type) {
case Primitive::kPrimBoolean: {
Register out = locations->Out().AsRegister<Register>();
@@ -1897,14 +1959,54 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimChar: {
Register out = locations->Out().AsRegister<Register>();
+ if (maybe_compressed_char_at) {
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
+ __ Sll(TMP, TMP, 31); // Extract compression flag into the most significant bit of TMP.
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
+ }
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset, null_checker);
+ int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+ if (maybe_compressed_char_at) {
+ MipsLabel uncompressed_load, done;
+ __ Bnez(TMP, &uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedByte,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_1));
+ __ B(&done);
+ __ Bind(&uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2));
+ __ Bind(&done);
+ } else {
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2),
+ null_checker);
+ }
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
- __ Addu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
+ Register index_reg = index.AsRegister<Register>();
+ if (maybe_compressed_char_at) {
+ MipsLabel uncompressed_load, done;
+ __ Bnez(TMP, &uncompressed_load);
+ __ Addu(TMP, obj, index_reg);
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ __ B(&done);
+ __ Bind(&uncompressed_load);
+ __ Sll(TMP, index_reg, TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ __ Bind(&done);
+ } else {
+ __ Sll(TMP, index_reg, TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
+ }
}
break;
}
@@ -1986,6 +2088,10 @@ void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out compression flag from String's array length.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ Srl(out, out, 1u);
+ }
}
Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
@@ -5225,8 +5331,7 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
+ fallback_load = false;
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
fallback_load = false;
@@ -5249,6 +5354,9 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
bool is_r6 = GetInstructionSetFeatures().IsR6();
bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
fallback_load = false;
break;
@@ -5265,8 +5373,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
+ fallback_load = false;
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
fallback_load = false;
@@ -5591,10 +5698,18 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
break;
}
case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
+ CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetClass());
+ bool reordering = __ SetReorder(false);
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -5730,6 +5845,18 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitTableAddress: {
+ CodeGeneratorMIPS::JitPatchInfo* info =
+ codegen_->NewJitRootStringPatch(load->GetDexFile(),
+ load->GetStringIndex(),
+ load->GetString());
+ bool reordering = __ SetReorder(false);
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
+ return;
+ }
default:
break;
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index e92eeef88f..47eba50248 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -352,6 +352,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
void MarkGCCard(Register object, Register value, bool value_can_be_null);
@@ -465,6 +466,31 @@ class CodeGeneratorMIPS : public CodeGenerator {
void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
+ // The JitPatchInfo is used for JIT string and class loads.
+ struct JitPatchInfo {
+ JitPatchInfo(const DexFile& dex_file, uint64_t idx)
+ : target_dex_file(dex_file), index(idx) { }
+ JitPatchInfo(JitPatchInfo&& other) = default;
+
+ const DexFile& target_dex_file;
+ // String/type index.
+ uint64_t index;
+ // Label for the instruction loading the most significant half of the address.
+ // The least significant half is loaded with the instruction that follows immediately.
+ MipsLabel high_label;
+ };
+
+ void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const JitPatchInfo& info,
+ uint64_t index_in_table) const;
+ JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
+ JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
+
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
@@ -512,6 +538,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
+ // Patches for string root accesses in JIT compiled code.
+ ArenaDeque<JitPatchInfo> jit_string_patches_;
+ // Patches for class root accesses in JIT compiled code.
+ ArenaDeque<JitPatchInfo> jit_class_patches_;
// PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods.
// This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e96e3d75e1..5be0da4011 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -91,9 +91,6 @@ Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Typ
// Space on the stack is reserved for all arguments.
stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
- // TODO: shouldn't we use a whole machine word per argument on the stack?
- // Implicit 4-byte method pointer (and such) will cause misalignment.
-
return next_location;
}
@@ -434,7 +431,11 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(StringReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(TypeReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -1055,6 +1056,49 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
// offset to `out` (e.g. ld, jialc, daddiu).
}
+Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ return jit_string_patches_.GetOrCreate(
+ StringReference(&dex_file, string_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ return jit_class_patches_.GetOrCreate(
+ TypeReference(&dex_file, type_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const Literal* literal,
+ uint64_t index_in_table) const {
+ uint32_t literal_offset = GetAssembler().GetLabelLocation(literal->GetLabel());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ reinterpret_cast<uint32_t*>(code + literal_offset)[0] = dchecked_integral_cast<uint32_t>(address);
+}
+
+void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
+ for (const auto& entry : jit_string_patches_) {
+ const auto& it = jit_string_roots_.find(entry.first);
+ DCHECK(it != jit_string_roots_.end());
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
+ }
+ for (const auto& entry : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(entry.first);
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
+ }
+}
+
void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
// ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
blocked_core_registers_[ZERO] = true;
@@ -1446,6 +1490,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
+ const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+ instruction->IsStringCharAt();
switch (type) {
case Primitive::kPrimBoolean: {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -1489,14 +1535,54 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimChar: {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (maybe_compressed_char_at) {
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ __ LoadFromOffset(kLoadWord, TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ Dext(TMP, TMP, 0, 1);
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
+ }
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+ if (maybe_compressed_char_at) {
+ Mips64Label uncompressed_load, done;
+ __ Bnezc(TMP, &uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedByte,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_1));
+ __ Bc(&done);
+ __ Bind(&uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2));
+ __ Bind(&done);
+ } else {
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2));
+ }
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
- __ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ GpuRegister index_reg = index.AsRegister<GpuRegister>();
+ if (maybe_compressed_char_at) {
+ Mips64Label uncompressed_load, done;
+ __ Bnezc(TMP, &uncompressed_load);
+ __ Daddu(TMP, obj, index_reg);
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ __ Bc(&done);
+ __ Bind(&uncompressed_load);
+ __ Dsll(TMP, index_reg, TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ __ Bind(&done);
+ } else {
+ __ Dsll(TMP, index_reg, TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ }
}
break;
}
@@ -1564,7 +1650,9 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (!maybe_compressed_char_at) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
@@ -1580,6 +1668,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction)
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out compression flag from String's array length.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ Srl(out, out, 1u);
+ }
}
void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
@@ -3309,8 +3401,6 @@ HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
break;
}
if (fallback_load) {
@@ -3326,6 +3416,9 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
}
bool fallback_load = false;
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -3341,8 +3434,6 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -3580,11 +3671,16 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
+ case HLoadClass::LoadKind::kJitTableAddress:
+ __ LoadLiteral(out,
+ kLoadUnsignedWord,
+ codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetClass()));
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0);
break;
- }
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -3685,6 +3781,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitTableAddress:
+ __ LoadLiteral(out,
+ kLoadUnsignedWord,
+ codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
+ load->GetStringIndex(),
+ load->GetString()));
+ GenerateGcRootFieldLoad(load, out_loc, out, 0);
+ return;
default:
break;
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 5ba8912134..26cc7dc788 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -52,7 +52,7 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
static constexpr GpuRegister kCoreCalleeSaves[] =
- { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; // TODO: review
+ { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA };
static constexpr FpuRegister kFpuCalleeSaves[] =
{ F24, F25, F26, F27, F28, F29, F30, F31 };
@@ -312,6 +312,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
@@ -425,10 +426,27 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out);
+ void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const Literal* literal,
+ uint64_t index_in_table) const;
+ Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
+ Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ Handle<mirror::Class> handle);
+
private:
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+ using StringToLiteralMap = ArenaSafeMap<StringReference,
+ Literal*,
+ StringReferenceValueComparator>;
+ using TypeToLiteralMap = ArenaSafeMap<TypeReference,
+ Literal*,
+ TypeReferenceValueComparator>;
using BootStringToLiteralMap = ArenaSafeMap<StringReference,
Literal*,
StringReferenceValueComparator>;
@@ -476,6 +494,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
+ // Patches for string root accesses in JIT compiled code.
+ StringToLiteralMap jit_string_patches_;
+ // Patches for class root accesses in JIT compiled code.
+ TypeToLiteralMap jit_class_patches_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1b7431612d..09612c8dbf 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4214,7 +4214,9 @@ void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -6022,6 +6024,9 @@ void ParallelMoveResolverX86::RestoreScratch(int reg) {
HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -6052,6 +6057,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Location::RegisterLocation(EAX));
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), EAX);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -6071,6 +6077,17 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
+ if (load_kind == HLoadClass::LoadKind::kBssEntry) {
+ if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ // Rely on the type resolution and/or initialization to save everything.
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
+ } else {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ }
+ }
}
Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
@@ -6153,10 +6170,11 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
Label* fixup_label = codegen_->NewJitRootClassPatch(
cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -6244,7 +6262,7 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // Rely on the pResolveString and/or marking to save everything.
+ // Rely on the pResolveString to save everything.
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -7130,9 +7148,10 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
//
- // root = *address;
- // if (Thread::Current()->GetIsGcMarking()) {
- // root = ReadBarrier::Mark(root)
+ // root = obj.field;
+ // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+ // if (temp != null) {
+ // root = temp(root)
// }
// /* GcRoot<mirror::Object> */ root = *address
@@ -7153,8 +7172,11 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
- __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value()),
- Immediate(0));
+ // Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`).
+ const int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(root.reg());
+ __ fs()->cmpl(Address::Absolute(entry_point_offset), Immediate(0));
+ // The entrypoint is null when the GC is not marking.
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index abd8246325..0879992e32 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -245,9 +245,8 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
SaveLiveRegisters(codegen, locations);
- InvokeRuntimeCallingConvention calling_convention;
- __ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
- Immediate(cls_->GetTypeIndex().index_));
+ // Custom calling convention: RAX serves as both input and output.
+ __ movl(CpuRegister(RAX), Immediate(cls_->GetTypeIndex().index_));
x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
instruction_,
dex_pc_,
@@ -5427,6 +5426,9 @@ void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5453,10 +5455,10 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
- InvokeRuntimeCallingConvention calling_convention;
+ // Custom calling convention: RAX serves as both input and output.
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(RAX),
Location::RegisterLocation(RAX));
return;
}
@@ -5475,6 +5477,17 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
+ if (load_kind == HLoadClass::LoadKind::kBssEntry) {
+ if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ // Rely on the type resolution and/or initialization to save everything.
+ // Custom calling convention: RAX serves as both input and output.
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(RAX));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
+ } else {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ }
+ }
}
Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
@@ -5550,7 +5563,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
Label* fixup_label =
codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
break;
}
default:
@@ -5626,7 +5639,7 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
locations->SetOut(Location::RequiresRegister());
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // Rely on the pResolveString and/or marking to save everything.
+ // Rely on the pResolveString to save everything.
// Custom calling convention: RAX serves as both input and output.
RegisterSet caller_saves = RegisterSet::Empty();
caller_saves.Add(Location::RegisterLocation(RAX));
@@ -6498,9 +6511,10 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
//
- // root = *address;
- // if (Thread::Current()->GetIsGcMarking()) {
- // root = ReadBarrier::Mark(root)
+ // root = obj.field;
+ // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+ // if (temp != null) {
+ // root = temp(root)
// }
// /* GcRoot<mirror::Object> */ root = *address
@@ -6521,9 +6535,11 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
- __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true),
- Immediate(0));
+ // Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint.
+ const int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg());
+ __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0));
+ // The entrypoint is null when the GC is not marking.
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 7772e8f973..b56ef0f866 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -198,9 +198,9 @@ static uint32_t FindMethodIndexIn(ArtMethod* method,
}
static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
- const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ const DexCompilationUnit& compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile& dex_file = *compilation_unit.GetDexFile();
dex::TypeIndex index;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
@@ -209,22 +209,19 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
DCHECK(cls->IsProxyClass()) << cls->PrettyClass();
// TODO: deal with proxy classes.
} else if (IsSameDexFile(cls->GetDexFile(), dex_file)) {
- DCHECK_EQ(cls->GetDexCache(), dex_cache.Get());
+ DCHECK_EQ(cls->GetDexCache(), compilation_unit.GetDexCache().Get());
index = cls->GetDexTypeIndex();
- // Update the dex cache to ensure the class is in. The generated code will
- // consider it is. We make it safe by updating the dex cache, as other
- // dex files might also load the class, and there is no guarantee the dex
- // cache of the dex file of the class will be updated.
- if (dex_cache->GetResolvedType(index) == nullptr) {
- dex_cache->SetResolvedType(index, cls);
- }
} else {
index = cls->FindTypeIndexInOtherDexFile(dex_file);
- // We cannot guarantee the entry in the dex cache will resolve to the same class,
+ // We cannot guarantee the entry will resolve to the same class,
// as there may be different class loaders. So only return the index if it's
- // the right class in the dex cache already.
- if (index.IsValid() && dex_cache->GetResolvedType(index) != cls) {
- index = dex::TypeIndex::Invalid();
+ // the right class already resolved with the class loader.
+ if (index.IsValid()) {
+ ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType(
+ index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
+ if (resolved != cls) {
+ index = dex::TypeIndex::Invalid();
+ }
}
}
@@ -451,9 +448,8 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
dex::TypeIndex class_index = FindClassIndexIn(
- GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
@@ -496,6 +492,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -558,9 +555,13 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
+ HLoadClass::LoadKind kind = HSharpening::SharpenClass(
+ load_class, codegen_, compiler_driver_, caller_compilation_unit_);
+ DCHECK(kind != HLoadClass::LoadKind::kInvalid)
+ << "We should always be able to reference a class for inline caches";
+ // Insert before setting the kind, as setting the kind affects the inputs.
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
- // Sharpen after adding the instruction, as the sharpening may remove inputs.
- HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
+ load_class->SetLoadKind(kind);
// TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -586,7 +587,6 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
bool one_target_inlined = false;
@@ -608,8 +608,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- dex::TypeIndex class_index = FindClassIndexIn(
- handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_);
HInstruction* return_replacement = nullptr;
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
@@ -665,6 +664,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guards typed.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -859,6 +859,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
// Run type propagation to get the guard typed.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -927,6 +928,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
// Actual return value has a more specific type than the method's declared
// return type. Run RTP again on the outer graph to propagate it.
ReferenceTypePropagation(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
@@ -1179,7 +1181,11 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex
/* dex_pc */ 0);
if (iget->GetType() == Primitive::kPrimNot) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
- ReferenceTypePropagation rtp(graph_, dex_cache, handles_, /* is_first_run */ false);
+ ReferenceTypePropagation rtp(graph_,
+ outer_compilation_unit_.GetClassLoader(),
+ dex_cache,
+ handles_,
+ /* is_first_run */ false);
rtp.Visit(iget);
}
return iget;
@@ -1225,7 +1231,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDeclaringClass()->GetClassLoader()));
DexCompilationUnit dex_compilation_unit(
- class_loader.ToJObject(),
+ class_loader,
class_linker,
callee_dex_file,
code_item,
@@ -1286,6 +1292,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDexFile(),
*code_item,
compiler_driver_,
+ codegen_,
inline_stats.get(),
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
dex_cache,
@@ -1341,6 +1348,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// are more specific than the declared ones, run RTP again on the inner graph.
if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
ReferenceTypePropagation(callee_graph,
+ outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
@@ -1416,10 +1424,13 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
return false;
}
- if (!same_dex_file && current->NeedsEnvironment()) {
+ if (current->NeedsEnvironment() &&
+ !CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
+ resolved_method)) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because " << current->DebugName()
- << " needs an environment and is in a different dex file";
+ << " needs an environment, is in a different dex file"
+ << ", and cannot be encoded in the stack maps.";
return false;
}
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index cac385ce3c..3aaf2ca102 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -22,6 +22,7 @@
#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
+#include "sharpening.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -668,11 +669,10 @@ static InvokeType GetInvokeTypeFromOpCode(Instruction::Code opcode) {
ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
// We fetch the referenced class eagerly (that is, the class pointed by in the MethodId
// at method_idx), as `CanAccessResolvedMethod` expects it be be in the dex cache.
@@ -847,7 +847,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
clinit_check = ProcessClinitCheckForInvoke(
- dex_pc, resolved_method, method_idx, &clinit_check_requirement);
+ dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -933,15 +933,8 @@ bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction
bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- if (outer_dex_cache.Get() != dex_cache.Get()) {
- // We currently do not support inlining allocations across dex files.
- return false;
- }
-
- HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* load_class = BuildLoadClass(type_index, dex_pc);
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
@@ -1005,39 +998,23 @@ bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
uint32_t dex_pc,
ArtMethod* resolved_method,
- uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
- Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- Handle<mirror::Class> resolved_method_class(hs.NewHandle(resolved_method->GetDeclaringClass()));
-
- // The index at which the method's class is stored in the DexCache's type array.
- dex::TypeIndex storage_index;
- bool is_outer_class = (resolved_method->GetDeclaringClass() == outer_class.Get());
- if (is_outer_class) {
- storage_index = outer_class->GetDexTypeIndex();
- } else if (outer_dex_cache.Get() == dex_cache.Get()) {
- // Get `storage_index` from IsClassOfStaticMethodAvailableToReferrer.
- compiler_driver_->IsClassOfStaticMethodAvailableToReferrer(outer_dex_cache.Get(),
- GetCompilingClass(),
- resolved_method,
- method_idx,
- &storage_index);
- }
+ Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
-
- if (IsInitialized(resolved_method_class)) {
+ if (IsInitialized(klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
- } else if (storage_index.IsValid()) {
- *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- HLoadClass* cls = BuildLoadClass(
- storage_index, dex_pc, /* check_access */ false, /* outer */ true);
- clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
- AppendInstruction(clinit_check);
+ } else {
+ HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
+ klass->GetDexFile(),
+ klass,
+ dex_pc,
+ /* needs_access_check */ false);
+ if (cls != nullptr) {
+ *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
+ clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+ AppendInstruction(clinit_check);
+ }
}
return clinit_check;
}
@@ -1216,9 +1193,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
}
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field =
- compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
-
+ ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
// Generate an explicit null check on the reference, unless the field access
// is unresolved. In that case, we rely on the runtime to perform various
@@ -1284,9 +1259,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
static mirror::Class* GetClassFrom(CompilerDriver* driver,
const DexCompilationUnit& compilation_unit) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(compilation_unit.GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
@@ -1302,10 +1275,9 @@ mirror::Class* HInstructionBuilder::GetCompilingClass() const {
bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1336,6 +1308,55 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in
}
}
+ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+
+ ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
+ Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
+
+ ArtField* resolved_field = class_linker->ResolveField(*dex_compilation_unit_->GetDexFile(),
+ field_idx,
+ dex_compilation_unit_->GetDexCache(),
+ class_loader,
+ is_static);
+
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+
+ // Check static/instance. The class linker has a fast path for looking into the dex cache
+ // and does not check static/instance if it hits it.
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ return nullptr;
+ }
+
+ // Check access.
+ if (compiling_class.Get() == nullptr) {
+ if (!resolved_field->IsPublic()) {
+ return nullptr;
+ }
+ } else if (!compiling_class->CanAccessResolvedField(resolved_field->GetDeclaringClass(),
+ resolved_field,
+ dex_compilation_unit_->GetDexCache().Get(),
+ field_idx)) {
+ return nullptr;
+ }
+
+ if (is_put &&
+ resolved_field->IsFinal() &&
+ (compiling_class.Get() != resolved_field->GetDeclaringClass())) {
+ // Final fields can only be updated within their own class.
+ // TODO: Only allow it in constructors. b/34966607.
+ return nullptr;
+ }
+
+ return resolved_field;
+}
+
bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1343,12 +1364,7 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
- ArtField* resolved_field = compiler_driver_->ResolveField(
- soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
if (resolved_field == nullptr) {
MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
@@ -1358,38 +1374,23 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- // The index at which the field's class is stored in the DexCache's type array.
- dex::TypeIndex storage_index;
- bool is_outer_class = (outer_class.Get() == resolved_field->GetDeclaringClass());
- if (is_outer_class) {
- storage_index = outer_class->GetDexTypeIndex();
- } else if (outer_dex_cache.Get() != dex_cache.Get()) {
- // The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
- return false;
- } else {
- // TODO: This is rather expensive. Perf it and cache the results if needed.
- std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
- outer_dex_cache.Get(),
- GetCompilingClass(),
- resolved_field,
- field_index,
- &storage_index);
- bool can_easily_access = is_put ? pair.second : pair.first;
- if (!can_easily_access) {
- MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
- BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
- return true;
- }
+ Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
+ HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
+ klass->GetDexFile(),
+ klass,
+ dex_pc,
+ /* needs_access_check */ false);
+
+ if (constant == nullptr) {
+ // The class cannot be referenced from this compiled code. Generate
+ // an unresolved access.
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
- HLoadClass* constant = BuildLoadClass(
- storage_index, dex_pc, /* check_access */ false, /* outer */ true);
-
HInstruction* cls = constant;
- Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
@@ -1497,7 +1498,7 @@ void HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t* args,
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
HInstruction* object = new (arena_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
@@ -1627,44 +1628,66 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
}
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
- uint32_t dex_pc,
- bool check_access,
- bool outer) {
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- const DexCompilationUnit* compilation_unit =
- outer ? outer_compilation_unit_ : dex_compilation_unit_;
- const DexFile& dex_file = *compilation_unit->GetDexFile();
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
- soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit));
+ soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_));
- bool is_accessible = false;
- if (!check_access) {
- is_accessible = true;
- } else if (klass.Get() != nullptr) {
+ bool needs_access_check = true;
+ if (klass.Get() != nullptr) {
if (klass->IsPublic()) {
- is_accessible = true;
+ needs_access_check = false;
} else {
mirror::Class* compiling_class = GetCompilingClass();
if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
- is_accessible = true;
+ needs_access_check = false;
}
}
}
+ return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
+}
+
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+ const DexFile& dex_file,
+ Handle<mirror::Class> klass,
+ uint32_t dex_pc,
+ bool needs_access_check) {
+ // Try to find a reference in the compiling dex file.
+ const DexFile* actual_dex_file = &dex_file;
+ if (!IsSameDexFile(dex_file, *dex_compilation_unit_->GetDexFile())) {
+ dex::TypeIndex local_type_index =
+ klass->FindTypeIndexInOtherDexFile(*dex_compilation_unit_->GetDexFile());
+ if (local_type_index.IsValid()) {
+ type_index = local_type_index;
+ actual_dex_file = dex_compilation_unit_->GetDexFile();
+ }
+ }
+
+ // Note: `klass` must be from `handles_`.
HLoadClass* load_class = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
- dex_file,
+ *actual_dex_file,
klass,
klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
dex_pc,
- !is_accessible);
+ needs_access_check);
+
+ HLoadClass::LoadKind load_kind = HSharpening::SharpenClass(load_class,
+ code_generator_,
+ compiler_driver_,
+ *dex_compilation_unit_);
+ if (load_kind == HLoadClass::LoadKind::kInvalid) {
+ // We actually cannot reference this class, we're forced to bail.
+ return nullptr;
+ }
+ // Append the instruction first, as setting the load kind affects the inputs.
AppendInstruction(load_class);
+ load_class->SetLoadKind(load_kind);
return load_class;
}
@@ -1674,7 +1697,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
dex::TypeIndex type_index,
uint32_t dex_pc) {
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
ScopedObjectAccess soa(Thread::Current());
TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
@@ -1692,17 +1715,9 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
}
}
-bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index,
- Handle<mirror::DexCache> dex_cache,
- bool* finalizable) const {
- return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index, finalizable);
-}
-
bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const {
- ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- return NeedsAccessCheck(type_index, dex_cache, finalizable);
+ return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
+ LookupReferrerClass(), LookupResolvedType(type_index, *dex_compilation_unit_), finalizable);
}
bool HInstructionBuilder::CanDecodeQuickenedInfo() const {
@@ -2498,7 +2513,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
AppendInstruction(new (arena_) HNewArray(cls, length, dex_pc));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
@@ -2673,7 +2688,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::CONST_CLASS: {
dex::TypeIndex type_index(instruction.VRegB_21c());
- BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ BuildLoadClass(type_index, dex_pc);
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -2742,4 +2757,18 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
return true;
} // NOLINT(readability/fn_size)
+ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
+ dex::TypeIndex type_index,
+ const DexCompilationUnit& compilation_unit) const {
+ return ClassLinker::LookupResolvedType(
+ type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
+}
+
+ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
+ // TODO: Cache the result in a Handle<mirror::Class>.
+ const DexFile::MethodId& method_id =
+ dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
+ return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
+}
+
} // namespace art
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 5efe95094c..e735a0c46d 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -31,6 +31,7 @@
namespace art {
+class CodeGenerator;
class Instruction;
class HInstructionBuilder : public ValueObject {
@@ -44,6 +45,7 @@ class HInstructionBuilder : public ValueObject {
DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* const outer_compilation_unit,
CompilerDriver* driver,
+ CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
Handle<mirror::DexCache> dex_cache,
@@ -61,6 +63,7 @@ class HInstructionBuilder : public ValueObject {
current_locals_(nullptr),
latest_result_(nullptr),
compiler_driver_(driver),
+ code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
interpreter_metadata_(interpreter_metadata),
@@ -103,11 +106,8 @@ class HInstructionBuilder : public ValueObject {
// Returns whether the current method needs access check for the type.
// Output parameter finalizable is set to whether the type is finalizable.
- bool NeedsAccessCheck(dex::TypeIndex type_index,
- Handle<mirror::DexCache> dex_cache,
- /*out*/bool* finalizable) const
+ bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const
REQUIRES_SHARED(Locks::mutator_lock_);
- bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const;
template<typename T>
void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
@@ -228,10 +228,14 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
// this method will use the outer class's dex file to lookup the type at
// `type_index`.
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
+
HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ const DexFile& dex_file,
+ Handle<mirror::Class> klass,
uint32_t dex_pc,
- bool check_access,
- bool outer = false);
+ bool needs_access_check)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the outer-most compiling method's class.
mirror::Class* GetOutermostCompilingClass() const;
@@ -275,7 +279,6 @@ class HInstructionBuilder : public ValueObject {
HClinitCheck* ProcessClinitCheckForInvoke(
uint32_t dex_pc,
ArtMethod* method,
- uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -290,6 +293,16 @@ class HInstructionBuilder : public ValueObject {
// not be resolved.
ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type);
+ // Try to resolve a field using the class linker. Return null if it could not
+ // be found.
+ ArtField* ResolveField(uint16_t field_idx, bool is_static, bool is_put);
+
+ ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_index,
+ const DexCompilationUnit& compilation_unit) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
+
ArenaAllocator* const arena_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
@@ -311,6 +324,8 @@ class HInstructionBuilder : public ValueObject {
CompilerDriver* const compiler_driver_;
+ CodeGenerator* const code_generator_;
+
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
DexCompilationUnit* const dex_compilation_unit_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index bbf826ce7e..1047d3beb6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -115,13 +115,18 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
MoveArguments(invoke_, codegen);
- if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- LocationFrom(kArtMethodRegister));
- } else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister));
+ {
+ // Ensure that between the BLR (emitted by Generate*Call) and RecordPcInfo there
+ // are no pools emitted.
+ vixl::EmissionCheckScope guard(codegen->GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+ LocationFrom(kArtMethodRegister));
+ } else {
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister));
+ }
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -980,11 +985,12 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke)
CreateIntIntIntIntToVoid(arena_, invoke);
}
-static void GenUnsafePut(LocationSummary* locations,
+static void GenUnsafePut(HInvoke* invoke,
Primitive::Type type,
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = codegen->GetVIXLAssembler();
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
@@ -1007,7 +1013,7 @@ static void GenUnsafePut(LocationSummary* locations,
}
if (is_volatile || is_ordered) {
- codegen->StoreRelease(type, source, mem_op);
+ codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
} else {
codegen->Store(type, source, mem_op);
}
@@ -1020,63 +1026,63 @@ static void GenUnsafePut(LocationSummary* locations,
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimInt,
/* is_volatile */ false,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimInt,
/* is_volatile */ false,
/* is_ordered */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimInt,
/* is_volatile */ true,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimNot,
/* is_volatile */ false,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimNot,
/* is_volatile */ false,
/* is_ordered */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimNot,
/* is_volatile */ true,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimLong,
/* is_volatile */ false,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimLong,
/* is_volatile */ false,
/* is_ordered */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimLong,
/* is_volatile */ true,
/* is_ordered */ false,
@@ -2825,9 +2831,13 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) {
}
__ Cbnz(temp0, slow_path->GetEntryLabel());
- // Fast path.
- __ Ldr(out, HeapOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ vixl::EmissionCheckScope guard(codegen_->GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // Fast path.
+ __ Ldr(out, HeapOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ }
codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 6cf9b83d44..64a68403e9 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2004,31 +2004,48 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) {
__ Lw(temp2, arg, class_offset);
__ Bne(temp1, temp2, &return_false);
- // Load lengths of this and argument strings.
+ // Load `count` fields of this and argument strings.
__ Lw(temp1, str, count_offset);
__ Lw(temp2, arg, count_offset);
- // Check if lengths are equal, return false if they're not.
+ // Check if `count` fields are equal, return false if they're not.
+ // Also compares the compression style, if differs return false.
__ Bne(temp1, temp2, &return_false);
- // Return true if both strings are empty.
+ // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
__ Beqz(temp1, &return_true);
// Don't overwrite input registers
__ Move(TMP, str);
__ Move(temp3, arg);
- // Assertions that must hold in order to compare strings 2 characters at a time.
+ // Assertions that must hold in order to compare strings 4 bytes at a time.
DCHECK_ALIGNED(value_offset, 4);
static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
- // Loop to compare strings 2 characters at a time starting at the beginning of the string.
- // Ok to do this because strings are zero-padded.
+ // For string compression, calculate the number of bytes to compare (not chars).
+ if (mirror::kUseStringCompression) {
+ // Extract compression flag.
+ if (IsR2OrNewer()) {
+ __ Ext(temp2, temp1, 0, 1);
+ } else {
+ __ Sll(temp2, temp1, 31);
+ __ Srl(temp2, temp2, 31);
+ }
+ __ Srl(temp1, temp1, 1); // Extract length.
+ __ Sllv(temp1, temp1, temp2); // Double the byte count if uncompressed.
+ }
+
+ // Loop to compare strings 4 bytes at a time starting at the beginning of the string.
+ // Ok to do this because strings are zero-padded to kObjectAlignment.
__ Bind(&loop);
__ Lw(out, TMP, value_offset);
__ Lw(temp2, temp3, value_offset);
__ Bne(out, temp2, &return_false);
__ Addiu(TMP, TMP, 4);
__ Addiu(temp3, temp3, 4);
- __ Addiu(temp1, temp1, -2);
+ // With string compression, we have compared 4 bytes, otherwise 2 chars.
+ __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -4 : -2);
__ Bgtz(temp1, &loop);
// Return true and exit the function.
@@ -2578,6 +2595,30 @@ void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
__ Addu(dstPtr, dstPtr, AT);
}
+ if (mirror::kUseStringCompression) {
+ MipsLabel uncompressed_copy, compressed_loop;
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ // Load count field and extract compression flag.
+ __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
+ __ Sll(TMP, TMP, 31);
+
+ // If string is uncompressed, use memcpy() path.
+ __ Bnez(TMP, &uncompressed_copy);
+
+ // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+ __ Addu(srcPtr, srcObj, srcBegin);
+ __ Bind(&compressed_loop);
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
+ __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
+ __ Addiu(numChrs, numChrs, -1);
+ __ Addiu(srcPtr, srcPtr, 1);
+ __ Addiu(dstPtr, dstPtr, 2);
+ __ Bnez(numChrs, &compressed_loop);
+
+ __ B(&done);
+ __ Bind(&uncompressed_copy);
+ }
+
// Calculate source address.
__ Addiu(srcPtr, srcObj, value_offset);
if (IsR6()) {
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 00a1fa11bb..3888828722 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1607,31 +1607,42 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
__ Lw(temp2, arg, class_offset);
__ Bnec(temp1, temp2, &return_false);
- // Load lengths of this and argument strings.
+ // Load `count` fields of this and argument strings.
__ Lw(temp1, str, count_offset);
__ Lw(temp2, arg, count_offset);
- // Check if lengths are equal, return false if they're not.
+ // Check if `count` fields are equal, return false if they're not.
+ // Also compares the compression style, if differs return false.
__ Bnec(temp1, temp2, &return_false);
- // Return true if both strings are empty.
+ // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
__ Beqzc(temp1, &return_true);
// Don't overwrite input registers
__ Move(TMP, str);
__ Move(temp3, arg);
- // Assertions that must hold in order to compare strings 4 characters at a time.
+ // Assertions that must hold in order to compare strings 8 bytes at a time.
DCHECK_ALIGNED(value_offset, 8);
static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
- // Loop to compare strings 4 characters at a time starting at the beginning of the string.
- // Ok to do this because strings are zero-padded to be 8-byte aligned.
+ if (mirror::kUseStringCompression) {
+ // For string compression, calculate the number of bytes to compare (not chars).
+ __ Dext(temp2, temp1, 0, 1); // Extract compression flag.
+ __ Srl(temp1, temp1, 1); // Extract length.
+ __ Sllv(temp1, temp1, temp2); // Double the byte count if uncompressed.
+ }
+
+ // Loop to compare strings 8 bytes at a time starting at the beginning of the string.
+ // Ok to do this because strings are zero-padded to kObjectAlignment.
__ Bind(&loop);
__ Ld(out, TMP, value_offset);
__ Ld(temp2, temp3, value_offset);
__ Bnec(out, temp2, &return_false);
__ Daddiu(TMP, TMP, 8);
__ Daddiu(temp3, temp3, 8);
- __ Addiu(temp1, temp1, -4);
+ // With string compression, we have compared 8 bytes, otherwise 4 chars.
+ __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -8 : -4);
__ Bgtzc(temp1, &loop);
// Return true and exit the function.
@@ -1912,6 +1923,30 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
__ Daddiu(dstPtr, dstObj, data_offset);
__ Dlsa(dstPtr, dstBegin, dstPtr, char_shift);
+ if (mirror::kUseStringCompression) {
+ Mips64Label uncompressed_copy, compressed_loop;
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ // Load count field and extract compression flag.
+ __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
+ __ Dext(TMP, TMP, 0, 1);
+
+ // If string is uncompressed, use memcpy() path.
+ __ Bnezc(TMP, &uncompressed_copy);
+
+ // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+ __ Daddu(srcPtr, srcObj, srcBegin);
+ __ Bind(&compressed_loop);
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
+ __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
+ __ Daddiu(numChrs, numChrs, -1);
+ __ Daddiu(srcPtr, srcPtr, 1);
+ __ Daddiu(dstPtr, dstPtr, 2);
+ __ Bnezc(numChrs, &compressed_loop);
+
+ __ Bc(&done);
+ __ Bind(&uncompressed_copy);
+ }
+
// Calculate source address.
__ Daddiu(srcPtr, srcObj, value_offset);
__ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 76900f23a9..abbb91a1a9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2464,16 +2464,15 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
}
}
-void HLoadClass::SetLoadKindInternal(LoadKind load_kind) {
- // Once sharpened, the load kind should not be changed again.
- // Also, kReferrersClass should never be overwritten.
- DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
+void HLoadClass::SetLoadKind(LoadKind load_kind) {
SetPackedField<LoadKindField>(load_kind);
- if (load_kind != LoadKind::kDexCacheViaMethod) {
+ if (load_kind != LoadKind::kDexCacheViaMethod &&
+ load_kind != LoadKind::kReferrersClass) {
RemoveAsUserOfInput(0u);
SetRawInputAt(0u, nullptr);
}
+
if (!NeedsEnvironment()) {
RemoveEnvironment();
SetSideEffects(SideEffects::None());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index acf14aa726..96f9abafbf 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4322,6 +4322,11 @@ class HInvokeInterface FINAL : public HInvoke {
return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
}
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ // The assembly stub currently needs it.
+ return true;
+ }
+
uint32_t GetImtIndex() const { return imt_index_; }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
@@ -5508,6 +5513,9 @@ class HLoadClass FINAL : public HInstruction {
public:
// Determines how to load the Class.
enum class LoadKind {
+ // We cannot load this class. See HSharpening::SharpenLoadClass.
+ kInvalid = -1,
+
// Use the Class* from the method's own ArtMethod*.
kReferrersClass,
@@ -5564,18 +5572,7 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
- void SetLoadKind(LoadKind load_kind) {
- SetLoadKindInternal(load_kind);
- }
-
- void SetLoadKindWithTypeReference(LoadKind load_kind,
- const DexFile& dex_file,
- dex::TypeIndex type_index) {
- DCHECK(HasTypeReference(load_kind));
- DCHECK(IsSameDexFile(dex_file_, dex_file));
- DCHECK_EQ(type_index_, type_index);
- SetLoadKindInternal(load_kind);
- }
+ void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
@@ -5694,6 +5691,11 @@ class HLoadClass FINAL : public HInstruction {
// for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
+ // A type index and dex file where the class can be accessed. The dex file can be:
+ // - The compiling method's dex file if the class is defined there too.
+ // - The compiling method's dex file if the class is referenced there.
+ // - The dex file where the class is defined. When the load kind can only be
+ // kBssEntry or kDexCacheViaMethod, we cannot emit code for this `HLoadClass`.
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1ab671022b..0375c66e42 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -306,7 +306,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const OVERRIDE;
@@ -375,7 +375,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -875,7 +875,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -946,11 +946,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
const uint8_t* interpreter_metadata = nullptr;
if (method == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(class_loader)));
method = compiler_driver->ResolveMethod(
- soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
+ soa, dex_cache, class_loader, &dex_compilation_unit, method_idx, invoke_type);
}
// For AOT compilation, we may not get a method, for example if its class is erroneous.
// JIT should always have a method.
@@ -959,16 +956,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
graph->SetArtMethod(method);
ScopedObjectAccess soa(Thread::Current());
interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize());
- dex::TypeIndex type_index = method->GetDeclaringClass()->GetDexTypeIndex();
-
- // Update the dex cache if the type is not in it yet. Note that under AOT,
- // the verifier must have set it, but under JIT, there's no guarantee, as we
- // don't necessarily run the verifier.
- // The compiler and the compiler driver assume the compiling class is
- // in the dex cache.
- if (dex_cache->GetResolvedType(type_index) == nullptr) {
- dex_cache->SetResolvedType(type_index, method->GetDeclaringClass());
- }
}
std::unique_ptr<CodeGenerator> codegen(
@@ -999,6 +986,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
&dex_file,
*code_item,
compiler_driver,
+ codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
dex_cache,
@@ -1048,7 +1036,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject jclass_loader,
+ Handle<mirror::ClassLoader> jclass_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
@@ -1133,6 +1121,25 @@ bool IsCompilingWithCoreImage() {
return false;
}
+bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
+ // Note: the runtime is null only for unit testing.
+ return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
+}
+
+bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // JIT can always encode methods in stack maps.
+ return true;
+ }
+ if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+ return true;
+ }
+ // TODO(ngeoffray): Support more AOT cases for inlining:
+ // - methods in multidex
+ // - methods in boot image for on-device non-PIC compilation.
+ return false;
+}
+
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
@@ -1143,7 +1150,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
DCHECK(method->IsCompilable());
- jobject jclass_loader = class_loader.ToJObject();
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
@@ -1167,7 +1173,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_cache,
method,
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 0c89da12e8..d8cea30a6b 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -17,10 +17,15 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
+#include "base/mutex.h"
+#include "globals.h"
+
namespace art {
+class ArtMethod;
class Compiler;
class CompilerDriver;
+class DexFile;
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
@@ -29,6 +34,10 @@ Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
// information for checking invariants.
bool IsCompilingWithCoreImage();
+bool EncodeArtMethodInInlineInfo(ArtMethod* method);
+bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index b02f2509ab..6e332ca59b 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -24,9 +24,8 @@
namespace art {
-static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
- const DexFile& dex_file,
- Handle<mirror::DexCache> hint_dex_cache)
+static inline ObjPtr<mirror::DexCache> FindDexCacheWithHint(
+ Thread* self, const DexFile& dex_file, Handle<mirror::DexCache> hint_dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) {
return hint_dex_cache.Get();
@@ -66,11 +65,13 @@ ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetThrowabl
class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
public:
RTPVisitor(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
HandleCache* handle_cache,
ArenaVector<HInstruction*>* worklist,
bool is_first_run)
: HGraphDelegateVisitor(graph),
+ class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handle_cache),
worklist_(worklist),
@@ -102,6 +103,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
bool is_exact);
private:
+ Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> hint_dex_cache_;
HandleCache* handle_cache_;
ArenaVector<HInstruction*>* worklist_;
@@ -109,11 +111,13 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
};
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
const char* name)
: HOptimization(graph, name),
+ class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
@@ -148,7 +152,12 @@ void ReferenceTypePropagation::ValidateTypes() {
}
void ReferenceTypePropagation::Visit(HInstruction* instruction) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ RTPVisitor visitor(graph_,
+ class_loader_,
+ hint_dex_cache_,
+ &handle_cache_,
+ &worklist_,
+ is_first_run_);
instruction->Accept(&visitor);
}
@@ -322,7 +331,12 @@ void ReferenceTypePropagation::Run() {
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ RTPVisitor visitor(graph_,
+ class_loader_,
+ hint_dex_cache_,
+ &handle_cache_,
+ &worklist_,
+ is_first_run_);
// Handle Phis first as there might be instructions in the same block who depend on them.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
VisitPhi(it.Current()->AsPhi());
@@ -542,9 +556,10 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
DCHECK_EQ(instr->GetType(), Primitive::kPrimNot);
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
- // Get type from dex cache assuming it was populated by the verifier.
- SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
+ ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
+ ObjPtr<mirror::Class> klass =
+ ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get());
+ SetClassAsTypeInfo(instr, klass, is_exact);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
@@ -557,25 +572,13 @@ void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
}
-static mirror::Class* GetClassFromDexCache(Thread* self,
- const DexFile& dex_file,
- dex::TypeIndex type_idx,
- Handle<mirror::DexCache> hint_dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
- // Get type from dex cache assuming it was populated by the verifier.
- return dex_cache->GetResolvedType(type_idx);
-}
-
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
- instr->GetDexFile(),
- instr->GetTypeIndex(),
- hint_dex_cache_);
- SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
+ UpdateReferenceTypeInfo(instr,
+ instr->GetTypeIndex(),
+ instr->GetDexFile(),
+ /* is_exact */ false);
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 4663471729..215e96786b 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -33,6 +33,7 @@ namespace art {
class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
@@ -105,6 +106,8 @@ class ReferenceTypePropagation : public HOptimization {
void ValidateTypes();
+ Handle<mirror::ClassLoader> class_loader_;
+
// Note: hint_dex_cache_ is usually, but not necessarily, the dex cache associated with
// graph_->GetDexFile(). Since we may look up also in other dex files, it's used only
// as a hint, to reduce the number of calls to the costly ClassLinker::FindDexCache().
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index b061c871b0..84a4bab1a9 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -38,6 +38,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
+ Handle<mirror::ClassLoader>(),
Handle<mirror::DexCache>(),
handles,
true,
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index e745c73091..f07f02a719 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -42,8 +42,6 @@ void HSharpening::Run() {
HInstruction* instruction = it.Current();
if (instruction->IsInvokeStaticOrDirect()) {
ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
- } else if (instruction->IsLoadClass()) {
- ProcessLoadClass(instruction->AsLoadClass());
} else if (instruction->IsLoadString()) {
ProcessLoadString(instruction->AsLoadString());
}
@@ -133,104 +131,93 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
invoke->SetDispatchInfo(dispatch_info);
}
-void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
- ScopedObjectAccess soa(Thread::Current());
- SharpenClass(load_class, codegen_, compiler_driver_);
-}
-
-void HSharpening::SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) {
+HLoadClass::LoadKind HSharpening::SharpenClass(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit) {
Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
+ HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
+
if (load_class->NeedsAccessCheck()) {
// We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
-
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ } else if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
// Loading from the ArtMethod* is the most efficient retrieval in code size.
// TODO: This may not actually be true for all architectures and
// locations of target classes. The additional register pressure
// for using the ArtMethod* should be considered.
- return;
- }
-
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
+ } else {
+ const DexFile& dex_file = load_class->GetDexFile();
+ dex::TypeIndex type_index = load_class->GetTypeIndex();
- bool is_in_boot_image = false;
- HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
- Runtime* runtime = Runtime::Current();
- if (codegen->GetCompilerOptions().IsBootImage()) {
- // Compiling boot image. Check if the class is a boot image class.
- DCHECK(!runtime->UseJitCompilation());
- if (!compiler_driver->GetSupportBootImageFixup()) {
- // compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
- is_in_boot_image = true;
- desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
- ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
- : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ bool is_in_boot_image = false;
+ HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
+ Runtime* runtime = Runtime::Current();
+ if (codegen->GetCompilerOptions().IsBootImage()) {
+ // Compiling boot image. Check if the class is a boot image class.
+ DCHECK(!runtime->UseJitCompilation());
+ if (!compiler_driver->GetSupportBootImageFixup()) {
+ // compiler_driver_test. Do not sharpen.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
+ dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+ is_in_boot_image = true;
+ desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
+ ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
+ : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ } else {
+ // Not a boot image class.
+ DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+ }
} else {
- // Not a boot image class.
- DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kBssEntry;
- }
- } else {
- is_in_boot_image = (klass.Get() != nullptr) &&
- runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
- if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- if (is_in_boot_image) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ is_in_boot_image = (klass.Get() != nullptr) &&
+ runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
+ if (runtime->UseJitCompilation()) {
+ // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
+ // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ if (is_in_boot_image) {
+ // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ } else if (klass.Get() != nullptr) {
+ desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ } else {
+ // Class not loaded yet. This happens when the dex code requesting
+ // this `HLoadClass` hasn't been executed in the interpreter.
+ // Fallback to the dex cache.
+ // TODO(ngeoffray): Generate HDeoptimize instead.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ }
+ } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
+ // AOT app compilation. Check if the class is in the boot image.
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- } else if (klass.Get() != nullptr) {
- desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
} else {
- // Class not loaded yet. This happens when the dex code requesting
- // this `HLoadClass` hasn't been executed in the interpreter.
- // Fallback to the dex cache.
- // TODO(ngeoffray): Generate HDeoptimize instead.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
- } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
- // AOT app compilation. Check if the class is in the boot image.
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- } else {
- // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
- desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
- }
- DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
+ DCHECK_NE(desired_load_kind, HLoadClass::LoadKind::kInvalid);
- if (is_in_boot_image) {
- load_class->MarkInBootImage();
+ if (is_in_boot_image) {
+ load_class->MarkInBootImage();
+ }
+ load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
}
- HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
- switch (load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBssEntry:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
- load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kJitTableAddress:
- load_class->SetLoadKind(load_kind);
- break;
- default:
- LOG(FATAL) << "Unexpected load kind: " << load_kind;
- UNREACHABLE();
+ if (!IsSameDexFile(load_class->GetDexFile(), *dex_compilation_unit.GetDexFile())) {
+ if ((load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) ||
+ (load_kind == HLoadClass::LoadKind::kBssEntry)) {
+ // We actually cannot reference this class, we're forced to bail.
+ // We cannot reference this class with Bss, as the entrypoint will lookup the class
+ // in the caller's dex file, but that dex file does not reference the class.
+ return HLoadClass::LoadKind::kInvalid;
+ }
}
+ return load_kind;
}
void HSharpening::ProcessLoadString(HLoadString* load_string) {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae3d83ef2c..4240b2f339 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
#define ART_COMPILER_OPTIMIZING_SHARPENING_H_
+#include "nodes.h"
#include "optimization.h"
namespace art {
@@ -24,7 +25,6 @@ namespace art {
class CodeGenerator;
class CompilerDriver;
class DexCompilationUnit;
-class HInvokeStaticOrDirect;
// Optimization that tries to improve the way we dispatch methods and access types,
// fields, etc. Besides actual method sharpening based on receiver type (for example
@@ -47,15 +47,15 @@ class HSharpening : public HOptimization {
static constexpr const char* kSharpeningPassName = "sharpening";
- // Used internally but also by the inliner.
- static void SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver)
+ // Used by the builder and the inliner.
+ static HLoadClass::LoadKind SharpenClass(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
- void ProcessLoadClass(HLoadClass* load_class);
void ProcessLoadString(HLoadString* load_string);
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ae1e369999..50ab11bc23 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -17,8 +17,10 @@
#include "ssa_builder.h"
#include "bytecode_utils.h"
+#include "mirror/class-inl.h"
#include "nodes.h"
#include "reference_type_propagation.h"
+#include "scoped_thread_state_change-inl.h"
#include "ssa_phi_elimination.h"
namespace art {
@@ -497,7 +499,11 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// 4) Compute type of reference type instructions. The pass assumes that
// NullConstant has been fixed up.
- ReferenceTypePropagation(graph_, dex_cache_, handles_, /* is_first_run */ true).Run();
+ ReferenceTypePropagation(graph_,
+ class_loader_,
+ dex_cache_,
+ handles_,
+ /* is_first_run */ true).Run();
// 5) HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 45dac54115..978f113ec4 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -48,9 +48,11 @@ namespace art {
class SsaBuilder : public ValueObject {
public:
SsaBuilder(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
+ class_loader_(class_loader),
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
@@ -115,6 +117,7 @@ class SsaBuilder : public ValueObject {
void RemoveRedundantUninitializedStrings();
HGraph* graph_;
+ Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> dex_cache_;
VariableSizedHandleScope* const handles_;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 668108daa4..f8e01b7537 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -18,8 +18,9 @@
#include <unordered_map>
+#include "art_method-inl.h"
#include "base/stl_util.h"
-#include "art_method.h"
+#include "optimizing/optimizing_compiler.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
@@ -107,11 +108,6 @@ void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t
current_dex_register_++;
}
-static bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
- // Note: the runtime is null only for unit testing.
- return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
-}
-
void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
uint32_t num_dex_registers,
@@ -157,56 +153,35 @@ CodeOffset StackMapStream::ComputeMaxNativePcCodeOffset() const {
}
size_t StackMapStream::PrepareForFillIn() {
- const size_t stack_mask_size_in_bits = stack_mask_max_ + 1; // Need room for max element too.
- const size_t number_of_stack_masks = PrepareStackMasks(stack_mask_size_in_bits);
- const size_t register_mask_size_in_bits = MinimumBitsToStore(register_mask_max_);
- const size_t number_of_register_masks = PrepareRegisterMasks();
- dex_register_maps_size_ = ComputeDexRegisterMapsSize();
- ComputeInlineInfoEncoding(); // needs dex_register_maps_size_.
- inline_info_size_ = inline_infos_.size() * inline_info_encoding_.GetEntrySize();
+ CodeInfoEncoding encoding;
+ encoding.dex_register_map.num_entries = 0; // TODO: Remove this field.
+ encoding.dex_register_map.num_bytes = ComputeDexRegisterMapsSize();
+ encoding.location_catalog.num_entries = location_catalog_entries_.size();
+ encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize();
+ encoding.inline_info.num_entries = inline_infos_.size();
+ ComputeInlineInfoEncoding(&encoding.inline_info.encoding,
+ encoding.dex_register_map.num_bytes);
CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
- // The stack map contains compressed native PC offsets.
- const size_t stack_map_size = stack_map_encoding_.SetFromSizes(
+ // Prepare the CodeInfo variable-sized encoding.
+ encoding.stack_mask.encoding.num_bits = stack_mask_max_ + 1; // Need room for max element too.
+ encoding.stack_mask.num_entries = PrepareStackMasks(encoding.stack_mask.encoding.num_bits);
+ encoding.register_mask.encoding.num_bits = MinimumBitsToStore(register_mask_max_);
+ encoding.register_mask.num_entries = PrepareRegisterMasks();
+ encoding.stack_map.num_entries = stack_maps_.size();
+ encoding.stack_map.encoding.SetFromSizes(
+ // The stack map contains compressed native PC offsets.
max_native_pc_offset.CompressedValue(),
dex_pc_max_,
- dex_register_maps_size_,
- inline_info_size_,
- number_of_register_masks,
- number_of_stack_masks);
- stack_maps_size_ = RoundUp(stack_maps_.size() * stack_map_size, kBitsPerByte) / kBitsPerByte;
- dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
- const size_t stack_masks_bits = number_of_stack_masks * stack_mask_size_in_bits;
- const size_t register_masks_bits = number_of_register_masks * register_mask_size_in_bits;
- // Register masks are last, stack masks are right before that last.
- // They are both bit packed / aligned.
- const size_t non_header_size =
- stack_maps_size_ +
- dex_register_location_catalog_size_ +
- dex_register_maps_size_ +
- inline_info_size_ +
- RoundUp(stack_masks_bits + register_masks_bits, kBitsPerByte) / kBitsPerByte;
-
- // Prepare the CodeInfo variable-sized encoding.
- CodeInfoEncoding code_info_encoding;
- code_info_encoding.non_header_size = non_header_size;
- code_info_encoding.number_of_stack_maps = stack_maps_.size();
- code_info_encoding.number_of_stack_masks = number_of_stack_masks;
- code_info_encoding.number_of_register_masks = number_of_register_masks;
- code_info_encoding.stack_mask_size_in_bits = stack_mask_size_in_bits;
- code_info_encoding.register_mask_size_in_bits = register_mask_size_in_bits;
- code_info_encoding.stack_map_encoding = stack_map_encoding_;
- code_info_encoding.inline_info_encoding = inline_info_encoding_;
- code_info_encoding.number_of_location_catalog_entries = location_catalog_entries_.size();
- code_info_encoding.Compress(&code_info_encoding_);
-
- // TODO: Move the catalog at the end. It is currently too expensive at runtime
- // to compute its size (note that we do not encode that size in the CodeInfo).
- dex_register_location_catalog_start_ = code_info_encoding_.size() + stack_maps_size_;
- dex_register_maps_start_ =
- dex_register_location_catalog_start_ + dex_register_location_catalog_size_;
- inline_infos_start_ = dex_register_maps_start_ + dex_register_maps_size_;
-
- needed_size_ = code_info_encoding_.size() + non_header_size;
+ encoding.dex_register_map.num_bytes,
+ encoding.inline_info.num_entries,
+ encoding.register_mask.num_entries,
+ encoding.stack_mask.num_entries);
+ DCHECK_EQ(code_info_encoding_.size(), 0u);
+ encoding.Compress(&code_info_encoding_);
+ encoding.ComputeTableOffsets();
+ // Compute table offsets so we can get the non header size.
+ DCHECK_EQ(encoding.HeaderSize(), code_info_encoding_.size());
+ needed_size_ = code_info_encoding_.size() + encoding.NonHeaderSize();
return needed_size_;
}
@@ -259,7 +234,8 @@ size_t StackMapStream::ComputeDexRegisterMapsSize() const {
return size;
}
-void StackMapStream::ComputeInlineInfoEncoding() {
+void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
+ size_t dex_register_maps_bytes) {
uint32_t method_index_max = 0;
uint32_t dex_pc_max = DexFile::kDexNoIndex;
uint32_t extra_data_max = 0;
@@ -285,10 +261,7 @@ void StackMapStream::ComputeInlineInfoEncoding() {
}
DCHECK_EQ(inline_info_index, inline_infos_.size());
- inline_info_encoding_.SetFromSizes(method_index_max,
- dex_pc_max,
- extra_data_max,
- dex_register_maps_size_);
+ encoding->SetFromSizes(method_index_max, dex_pc_max, extra_data_max, dex_register_maps_bytes);
}
void StackMapStream::FillIn(MemoryRegion region) {
@@ -303,19 +276,18 @@ void StackMapStream::FillIn(MemoryRegion region) {
// Write the CodeInfo header.
region.CopyFrom(0, MemoryRegion(code_info_encoding_.data(), code_info_encoding_.size()));
- MemoryRegion dex_register_locations_region = region.Subregion(
- dex_register_maps_start_, dex_register_maps_size_);
-
- MemoryRegion inline_infos_region = region.Subregion(
- inline_infos_start_, inline_info_size_);
-
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
- DCHECK_EQ(code_info.GetStackMapsSize(encoding), stack_maps_size_);
+ DCHECK_EQ(encoding.stack_map.num_entries, stack_maps_.size());
+
+ MemoryRegion dex_register_locations_region = region.Subregion(
+ encoding.dex_register_map.byte_offset,
+ encoding.dex_register_map.num_bytes);
// Set the Dex register location catalog.
MemoryRegion dex_register_location_catalog_region = region.Subregion(
- dex_register_location_catalog_start_, dex_register_location_catalog_size_);
+ encoding.location_catalog.byte_offset,
+ encoding.location_catalog.num_bytes);
DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
// Offset in `dex_register_location_catalog` where to store the next
// register location.
@@ -329,27 +301,27 @@ void StackMapStream::FillIn(MemoryRegion region) {
ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false, kArenaAllocStackMapStream);
uintptr_t next_dex_register_map_offset = 0;
- uintptr_t next_inline_info_offset = 0;
+ uintptr_t next_inline_info_index = 0;
for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {
StackMap stack_map = code_info.GetStackMapAt(i, encoding);
StackMapEntry entry = stack_maps_[i];
- stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
- stack_map.SetNativePcCodeOffset(stack_map_encoding_, entry.native_pc_code_offset);
- stack_map.SetRegisterMaskIndex(stack_map_encoding_, entry.register_mask_index);
- stack_map.SetStackMaskIndex(stack_map_encoding_, entry.stack_mask_index);
+ stack_map.SetDexPc(encoding.stack_map.encoding, entry.dex_pc);
+ stack_map.SetNativePcCodeOffset(encoding.stack_map.encoding, entry.native_pc_code_offset);
+ stack_map.SetRegisterMaskIndex(encoding.stack_map.encoding, entry.register_mask_index);
+ stack_map.SetStackMaskIndex(encoding.stack_map.encoding, entry.stack_mask_index);
if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
// No dex map available.
- stack_map.SetDexRegisterMapOffset(stack_map_encoding_, StackMap::kNoDexRegisterMap);
+ stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, StackMap::kNoDexRegisterMap);
} else {
// Search for an entry with the same dex map.
if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
// If we have a hit reuse the offset.
stack_map.SetDexRegisterMapOffset(
- stack_map_encoding_,
+ encoding.stack_map.encoding,
code_info.GetStackMapAt(entry.same_dex_register_map_as_, encoding)
- .GetDexRegisterMapOffset(stack_map_encoding_));
+ .GetDexRegisterMapOffset(encoding.stack_map.encoding));
} else {
// New dex registers maps should be added to the stack map.
MemoryRegion register_region = dex_register_locations_region.Subregion(
@@ -358,7 +330,8 @@ void StackMapStream::FillIn(MemoryRegion region) {
next_dex_register_map_offset += register_region.size();
DexRegisterMap dex_register_map(register_region);
stack_map.SetDexRegisterMapOffset(
- stack_map_encoding_, register_region.begin() - dex_register_locations_region.begin());
+ encoding.stack_map.encoding,
+ register_region.begin() - dex_register_locations_region.begin());
// Set the dex register location.
FillInDexRegisterMap(dex_register_map,
@@ -370,37 +343,37 @@ void StackMapStream::FillIn(MemoryRegion region) {
// Set the inlining info.
if (entry.inlining_depth != 0) {
- MemoryRegion inline_region = inline_infos_region.Subregion(
- next_inline_info_offset,
- entry.inlining_depth * inline_info_encoding_.GetEntrySize());
- next_inline_info_offset += inline_region.size();
- InlineInfo inline_info(inline_region);
+ InlineInfo inline_info = code_info.GetInlineInfo(next_inline_info_index, encoding);
- // Currently relative to the dex register map.
- stack_map.SetInlineDescriptorOffset(
- stack_map_encoding_, inline_region.begin() - dex_register_locations_region.begin());
+ // Fill in the index.
+ stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, next_inline_info_index);
+ DCHECK_EQ(next_inline_info_index, entry.inline_infos_start_index);
+ next_inline_info_index += entry.inlining_depth;
- inline_info.SetDepth(inline_info_encoding_, entry.inlining_depth);
+ inline_info.SetDepth(encoding.inline_info.encoding, entry.inlining_depth);
DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
+
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
if (inline_entry.method != nullptr) {
inline_info.SetMethodIndexAtDepth(
- inline_info_encoding_,
+ encoding.inline_info.encoding,
depth,
High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
inline_info.SetExtraDataAtDepth(
- inline_info_encoding_,
+ encoding.inline_info.encoding,
depth,
Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
} else {
- inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
- inline_info.SetExtraDataAtDepth(inline_info_encoding_, depth, 1);
+ inline_info.SetMethodIndexAtDepth(encoding.inline_info.encoding,
+ depth,
+ inline_entry.method_index);
+ inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
}
- inline_info.SetDexPcAtDepth(inline_info_encoding_, depth, inline_entry.dex_pc);
+ inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
if (inline_entry.num_dex_registers == 0) {
// No dex map available.
- inline_info.SetDexRegisterMapOffsetAtDepth(inline_info_encoding_,
+ inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
depth,
StackMap::kNoDexRegisterMap);
DCHECK(inline_entry.live_dex_registers_mask == nullptr);
@@ -412,8 +385,9 @@ void StackMapStream::FillIn(MemoryRegion region) {
next_dex_register_map_offset += register_region.size();
DexRegisterMap dex_register_map(register_region);
inline_info.SetDexRegisterMapOffsetAtDepth(
- inline_info_encoding_,
- depth, register_region.begin() - dex_register_locations_region.begin());
+ encoding.inline_info.encoding,
+ depth,
+ register_region.begin() - dex_register_locations_region.begin());
FillInDexRegisterMap(dex_register_map,
inline_entry.num_dex_registers,
@@ -421,30 +395,28 @@ void StackMapStream::FillIn(MemoryRegion region) {
inline_entry.dex_register_locations_start_index);
}
}
- } else {
- if (inline_info_size_ != 0) {
- stack_map.SetInlineDescriptorOffset(stack_map_encoding_, StackMap::kNoInlineInfo);
- }
+ } else if (encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
+ stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, StackMap::kNoInlineInfo);
}
}
// Write stack masks table.
- size_t stack_mask_bits = encoding.stack_mask_size_in_bits;
+ const size_t stack_mask_bits = encoding.stack_mask.encoding.BitSize();
if (stack_mask_bits > 0) {
size_t stack_mask_bytes = RoundUp(stack_mask_bits, kBitsPerByte) / kBitsPerByte;
- for (size_t i = 0; i < encoding.number_of_stack_masks; ++i) {
+ for (size_t i = 0; i < encoding.stack_mask.num_entries; ++i) {
MemoryRegion source(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes);
- BitMemoryRegion stack_mask = code_info.GetStackMask(encoding, i);
- for (size_t bit_index = 0; bit_index < encoding.stack_mask_size_in_bits; ++bit_index) {
+ BitMemoryRegion stack_mask = code_info.GetStackMask(i, encoding);
+ for (size_t bit_index = 0; bit_index < stack_mask_bits; ++bit_index) {
stack_mask.StoreBit(bit_index, source.LoadBit(bit_index));
}
}
}
// Write register masks table.
- for (size_t i = 0; i < encoding.number_of_register_masks; ++i) {
- BitMemoryRegion register_mask = code_info.GetRegisterMask(encoding, i);
- register_mask.StoreBits(0, register_masks_[i], encoding.register_mask_size_in_bits);
+ for (size_t i = 0; i < encoding.register_mask.num_entries; ++i) {
+ BitMemoryRegion register_mask = code_info.GetRegisterMask(i, encoding);
+ register_mask.StoreBits(0, register_masks_[i], encoding.register_mask.encoding.BitSize());
}
// Verify all written data in debug build.
@@ -546,7 +518,8 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info,
}
// Compare to the seen location.
if (expected.GetKind() == DexRegisterLocation::Kind::kNone) {
- DCHECK(!dex_register_map.IsValid() || !dex_register_map.IsDexRegisterLive(reg));
+ DCHECK(!dex_register_map.IsValid() || !dex_register_map.IsDexRegisterLive(reg))
+ << dex_register_map.IsValid() << " " << dex_register_map.IsDexRegisterLive(reg);
} else {
DCHECK(dex_register_map.IsDexRegisterLive(reg));
DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation(
@@ -599,7 +572,7 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
DCHECK_EQ(code_info.GetNumberOfStackMaps(encoding), stack_maps_.size());
for (size_t s = 0; s < stack_maps_.size(); ++s) {
const StackMap stack_map = code_info.GetStackMapAt(s, encoding);
- const StackMapEncoding& stack_map_encoding = encoding.stack_map_encoding;
+ const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
StackMapEntry entry = stack_maps_[s];
// Check main stack map fields.
@@ -633,18 +606,18 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
if (entry.inlining_depth != 0) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- DCHECK_EQ(inline_info.GetDepth(encoding.inline_info_encoding), entry.inlining_depth);
+ DCHECK_EQ(inline_info.GetDepth(encoding.inline_info.encoding), entry.inlining_depth);
for (size_t d = 0; d < entry.inlining_depth; ++d) {
size_t inline_info_index = entry.inline_infos_start_index + d;
DCHECK_LT(inline_info_index, inline_infos_.size());
InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
- DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, d),
+ DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, d),
inline_entry.dex_pc);
- if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, d)) {
- DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info_encoding, d),
+ if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, d)) {
+ DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d),
inline_entry.method);
} else {
- DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
+ DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info.encoding, d),
inline_entry.method_index);
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index b1069a17be..08c1d3e3c0 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -79,13 +79,6 @@ class StackMapStream : public ValueObject {
current_entry_(),
current_inline_info_(),
code_info_encoding_(allocator->Adapter(kArenaAllocStackMapStream)),
- inline_info_size_(0),
- dex_register_maps_size_(0),
- stack_maps_size_(0),
- dex_register_location_catalog_size_(0),
- dex_register_location_catalog_start_(0),
- dex_register_maps_start_(0),
- inline_infos_start_(0),
needed_size_(0),
current_dex_register_(0),
in_inline_frame_(false) {
@@ -160,7 +153,8 @@ class StackMapStream : public ValueObject {
size_t ComputeDexRegisterMapSize(uint32_t num_dex_registers,
const BitVector* live_dex_registers_mask) const;
size_t ComputeDexRegisterMapsSize() const;
- void ComputeInlineInfoEncoding();
+ void ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
+ size_t dex_register_maps_bytes);
CodeOffset ComputeMaxNativePcCodeOffset() const;
@@ -214,16 +208,7 @@ class StackMapStream : public ValueObject {
StackMapEntry current_entry_;
InlineInfoEntry current_inline_info_;
- StackMapEncoding stack_map_encoding_;
- InlineInfoEncoding inline_info_encoding_;
ArenaVector<uint8_t> code_info_encoding_;
- size_t inline_info_size_;
- size_t dex_register_maps_size_;
- size_t stack_maps_size_;
- size_t dex_register_location_catalog_size_;
- size_t dex_register_location_catalog_start_;
- size_t dex_register_maps_start_;
- size_t inline_infos_start_;
size_t needed_size_;
uint32_t current_dex_register_;
bool in_inline_frame_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index ce6d5c2b22..bd0aa6dea7 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -32,10 +32,10 @@ static bool CheckStackMask(
const StackMap& stack_map,
const BitVector& bit_vector) {
BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
- if (bit_vector.GetNumberOfBits() > encoding.stack_mask_size_in_bits) {
+ if (bit_vector.GetNumberOfBits() > encoding.stack_mask.encoding.BitSize()) {
return false;
}
- for (size_t i = 0; i < encoding.stack_mask_size_in_bits; ++i) {
+ for (size_t i = 0; i < encoding.stack_mask.encoding.BitSize(); ++i) {
if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) {
return false;
}
@@ -78,13 +78,13 @@ TEST(StackMapTest, Test1) {
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -123,7 +123,7 @@ TEST(StackMapTest, Test1) {
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
}
TEST(StackMapTest, Test2) {
@@ -193,13 +193,13 @@ TEST(StackMapTest, Test2) {
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -238,13 +238,13 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info.encoding));
+ ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
}
// Second stack map.
@@ -252,13 +252,13 @@ TEST(StackMapTest, Test2) {
StackMap stack_map = code_info.GetStackMapAt(1, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
- ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask2));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -298,7 +298,7 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(18, location0.GetValue());
ASSERT_EQ(3, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
}
// Third stack map.
@@ -306,13 +306,13 @@ TEST(StackMapTest, Test2) {
StackMap stack_map = code_info.GetStackMapAt(2, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
- ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask3));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -352,7 +352,7 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(6, location0.GetValue());
ASSERT_EQ(8, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
}
// Fourth stack map.
@@ -360,13 +360,13 @@ TEST(StackMapTest, Test2) {
StackMap stack_map = code_info.GetStackMapAt(3, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
- ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask4));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -406,7 +406,7 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(3, location0.GetValue());
ASSERT_EQ(1, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
}
}
@@ -442,11 +442,11 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
@@ -483,7 +483,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
}
// Generate a stack map whose dex register offset is
@@ -543,13 +543,13 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ASSERT_EQ(255u, dex_register_map0.Size());
StackMap stack_map1 = code_info.GetStackMapAt(1, encoding);
- ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map.encoding));
// ...the offset of the second Dex register map (relative to the
// beginning of the Dex register maps region) is 255 (i.e.,
// kNoDexRegisterMapSmallEncoding).
- ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map_encoding),
+ ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
StackMap::kNoDexRegisterMap);
- ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map_encoding), 0xFFu);
+ ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding), 0xFFu);
}
TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -602,12 +602,12 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci, encoding));
// Verify dex register map offsets.
- ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map_encoding),
- sm1.GetDexRegisterMapOffset(encoding.stack_map_encoding));
- ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map_encoding),
- sm2.GetDexRegisterMapOffset(encoding.stack_map_encoding));
- ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map_encoding),
- sm2.GetDexRegisterMapOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
+ sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+ ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
+ sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+ ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
+ sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
}
TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -641,22 +641,22 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
stack_map = code_info.GetStackMapAt(1, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
- ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+ ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
}
TEST(StackMapTest, InlineTest) {
@@ -743,11 +743,11 @@ TEST(StackMapTest, InlineTest) {
ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
- ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info.encoding));
+ ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -767,13 +767,13 @@ TEST(StackMapTest, InlineTest) {
ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
- ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info.encoding));
+ ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+ ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -783,7 +783,7 @@ TEST(StackMapTest, InlineTest) {
ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci, encoding));
ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
- ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 2));
}
{
@@ -793,7 +793,7 @@ TEST(StackMapTest, InlineTest) {
DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, encoding, 2);
ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
- ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map_encoding));
+ ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map.encoding));
}
{
@@ -805,15 +805,15 @@ TEST(StackMapTest, InlineTest) {
ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
- ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info.encoding));
+ ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+ ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+ ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
- ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 0));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, encoding, 1);
ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci, encoding));
@@ -865,8 +865,8 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4, encoding);
StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8, encoding);
- EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map_encoding),
- stack_map2.GetStackMaskIndex(encoding.stack_map_encoding));
+ EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map.encoding),
+ stack_map2.GetStackMaskIndex(encoding.stack_map.encoding));
}
} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 196d8d4220..192fc270f9 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1644,6 +1644,12 @@ class Dex2Oat FINAL {
dex_caches_.push_back(soa.AddLocalReference<jobject>(
class_linker->RegisterDexFile(*dex_file,
soa.Decode<mirror::ClassLoader>(class_loader_).Ptr())));
+ if (dex_caches_.back() == nullptr) {
+ soa.Self()->AssertPendingException();
+ soa.Self()->ClearException();
+ PLOG(ERROR) << "Failed to register dex file.";
+ return false;
+ }
// Pre-register dex files so that we can access verification results without locks during
// compilation and verification.
verification_results_->AddDexFile(dex_file);
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index c2275aca95..90b49556b7 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -231,7 +231,7 @@ class Dex2oatSwapTest : public Dex2oatTest {
}
virtual std::string GetTestDexFileName() {
- return GetDexSrc1();
+ return Dex2oatEnvironmentTest::GetTestDexFileName("VerifierDeps");
}
virtual void CheckResult(bool expect_use) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 0f02da77a1..147be4afa7 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -586,20 +586,26 @@ class OatDumper {
kByteKindQuickMethodHeader,
kByteKindCodeInfoLocationCatalog,
kByteKindCodeInfoDexRegisterMap,
- kByteKindCodeInfoInlineInfo,
kByteKindCodeInfoEncoding,
- kByteKindCodeInfoOther,
kByteKindCodeInfoStackMasks,
kByteKindCodeInfoRegisterMasks,
kByteKindStackMapNativePc,
kByteKindStackMapDexPc,
kByteKindStackMapDexRegisterMap,
- kByteKindStackMapInlineInfo,
+ kByteKindStackMapInlineInfoIndex,
kByteKindStackMapRegisterMaskIndex,
kByteKindStackMapStackMaskIndex,
+ kByteKindInlineInfoMethodIndex,
+ kByteKindInlineInfoDexPc,
+ kByteKindInlineInfoExtraData,
+ kByteKindInlineInfoDexRegisterMap,
+ kByteKindInlineInfoIsLast,
kByteKindCount,
- kByteKindStackMapFirst = kByteKindCodeInfoOther,
+ // Special ranges for std::accumulate convenience.
+ kByteKindStackMapFirst = kByteKindStackMapNativePc,
kByteKindStackMapLast = kByteKindStackMapStackMaskIndex,
+ kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndex,
+ kByteKindInlineInfoLast = kByteKindInlineInfoIsLast,
};
int64_t bits[kByteKindCount] = {};
// Since code has deduplication, seen tracks already seen pointers to avoid double counting
@@ -624,17 +630,17 @@ class OatDumper {
const int64_t sum = std::accumulate(bits, bits + kByteKindCount, 0u);
os.Stream() << "Dumping cumulative use of " << sum / kBitsPerByte << " accounted bytes\n";
if (sum > 0) {
- const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
- bits + kByteKindStackMapLast + 1,
- 0u);
Dump(os, "Code ", bits[kByteKindCode], sum);
Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum);
Dump(os, "CodeInfoEncoding ", bits[kByteKindCodeInfoEncoding], sum);
Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum);
Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum);
- Dump(os, "CodeInfoInlineInfo ", bits[kByteKindCodeInfoInlineInfo], sum);
Dump(os, "CodeInfoStackMasks ", bits[kByteKindCodeInfoStackMasks], sum);
Dump(os, "CodeInfoRegisterMasks ", bits[kByteKindCodeInfoRegisterMasks], sum);
+ // Stack map section.
+ const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
+ bits + kByteKindStackMapLast + 1,
+ 0u);
Dump(os, "CodeInfoStackMap ", stack_map_bits, sum);
{
ScopedIndentation indent1(&os);
@@ -654,8 +660,8 @@ class OatDumper {
stack_map_bits,
"stack map");
Dump(os,
- "StackMapInlineInfo ",
- bits[kByteKindStackMapInlineInfo],
+ "StackMapInlineInfoIndex ",
+ bits[kByteKindStackMapInlineInfoIndex],
stack_map_bits,
"stack map");
Dump(os,
@@ -669,6 +675,39 @@ class OatDumper {
stack_map_bits,
"stack map");
}
+ // Inline info section.
+ const int64_t inline_info_bits = std::accumulate(bits + kByteKindInlineInfoFirst,
+ bits + kByteKindInlineInfoLast + 1,
+ 0u);
+ Dump(os, "CodeInfoInlineInfo ", inline_info_bits, sum);
+ {
+ ScopedIndentation indent1(&os);
+ Dump(os,
+ "InlineInfoMethodIndex ",
+ bits[kByteKindInlineInfoMethodIndex],
+ inline_info_bits,
+ "inline info");
+ Dump(os,
+ "InlineInfoDexPc ",
+ bits[kByteKindStackMapDexPc],
+ inline_info_bits,
+ "inline info");
+ Dump(os,
+ "InlineInfoExtraData ",
+ bits[kByteKindInlineInfoExtraData],
+ inline_info_bits,
+ "inline info");
+ Dump(os,
+ "InlineInfoDexRegisterMap ",
+ bits[kByteKindInlineInfoDexRegisterMap],
+ inline_info_bits,
+ "inline info");
+ Dump(os,
+ "InlineInfoIsLast ",
+ bits[kByteKindInlineInfoIsLast],
+ inline_info_bits,
+ "inline info");
+ }
}
os.Stream() << "\n" << std::flush;
}
@@ -1434,6 +1473,7 @@ class OatDumper {
Runtime* const runtime = Runtime::Current();
Handle<mirror::DexCache> dex_cache(
hs->NewHandle(runtime->GetClassLinker()->RegisterDexFile(*dex_file, nullptr)));
+ CHECK(dex_cache.Get() != nullptr);
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -1461,8 +1501,8 @@ class OatDumper {
StackMap last = code_info_.GetStackMapAt(0u, encoding_);
for (size_t i = 1; i != number_of_stack_maps_; ++i) {
StackMap current = code_info_.GetStackMapAt(i, encoding_);
- if (last.GetNativePcOffset(encoding_.stack_map_encoding, instruction_set) >
- current.GetNativePcOffset(encoding_.stack_map_encoding, instruction_set)) {
+ if (last.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set) >
+ current.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set)) {
ordered = false;
break;
}
@@ -1478,16 +1518,16 @@ class OatDumper {
indexes_.end(),
[this](size_t lhs, size_t rhs) {
StackMap left = code_info_.GetStackMapAt(lhs, encoding_);
- uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map_encoding,
+ uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map.encoding,
instruction_set_);
StackMap right = code_info_.GetStackMapAt(rhs, encoding_);
- uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map_encoding,
+ uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map.encoding,
instruction_set_);
// If the PCs are the same, compare indexes to preserve the original order.
return (left_pc < right_pc) || (left_pc == right_pc && lhs < rhs);
});
}
- offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map_encoding,
+ offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map.encoding,
instruction_set_);
}
}
@@ -1512,7 +1552,7 @@ class OatDumper {
++stack_map_index_;
offset_ = (stack_map_index_ == number_of_stack_maps_)
? static_cast<uint32_t>(-1)
- : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map_encoding,
+ : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map.encoding,
instruction_set_);
}
@@ -1550,14 +1590,14 @@ class OatDumper {
StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
{
CodeInfoEncoding encoding(helper.GetEncoding());
- StackMapEncoding stack_map_encoding(encoding.stack_map_encoding);
- // helper.GetCodeInfo().GetStackMapAt(0, encoding).;
- const size_t num_stack_maps = encoding.number_of_stack_maps;
+ StackMapEncoding stack_map_encoding(encoding.stack_map.encoding);
+ const size_t num_stack_maps = encoding.stack_map.num_entries;
std::vector<uint8_t> size_vector;
encoding.Compress(&size_vector);
if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfoEncoding,
size_vector.size() * kBitsPerByte,
oat_method.GetVmapTable())) {
+ // Stack maps
stats_.AddBits(
Stats::kByteKindStackMapNativePc,
stack_map_encoding.GetNativePcEncoding().BitSize() * num_stack_maps);
@@ -1568,7 +1608,7 @@ class OatDumper {
Stats::kByteKindStackMapDexRegisterMap,
stack_map_encoding.GetDexRegisterMapEncoding().BitSize() * num_stack_maps);
stats_.AddBits(
- Stats::kByteKindStackMapInlineInfo,
+ Stats::kByteKindStackMapInlineInfoIndex,
stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps);
stats_.AddBits(
Stats::kByteKindStackMapRegisterMaskIndex,
@@ -1576,30 +1616,47 @@ class OatDumper {
stats_.AddBits(
Stats::kByteKindStackMapStackMaskIndex,
stack_map_encoding.GetStackMaskIndexEncoding().BitSize() * num_stack_maps);
+
+ // Stack masks
stats_.AddBits(
Stats::kByteKindCodeInfoStackMasks,
- helper.GetCodeInfo().GetNumberOfStackMaskBits(encoding) *
- encoding.number_of_stack_masks);
+ encoding.stack_mask.encoding.BitSize() * encoding.stack_mask.num_entries);
+
+ // Register masks
stats_.AddBits(
Stats::kByteKindCodeInfoRegisterMasks,
- encoding.register_mask_size_in_bits * encoding.number_of_stack_masks);
- const size_t stack_map_bytes = helper.GetCodeInfo().GetStackMapsSize(encoding);
+ encoding.register_mask.encoding.BitSize() * encoding.register_mask.num_entries);
+
+ // Location catalog
const size_t location_catalog_bytes =
helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding);
stats_.AddBits(Stats::kByteKindCodeInfoLocationCatalog,
kBitsPerByte * location_catalog_bytes);
+ // Dex register bytes.
const size_t dex_register_bytes =
helper.GetCodeInfo().GetDexRegisterMapsSize(encoding, code_item->registers_size_);
stats_.AddBits(
Stats::kByteKindCodeInfoDexRegisterMap,
kBitsPerByte * dex_register_bytes);
- const size_t inline_info_bytes =
- encoding.non_header_size -
- stack_map_bytes -
- location_catalog_bytes -
- dex_register_bytes;
- stats_.AddBits(Stats::kByteKindCodeInfoInlineInfo,
- inline_info_bytes * kBitsPerByte);
+
+ // Inline infos.
+ const size_t num_inline_infos = encoding.inline_info.num_entries;
+ if (num_inline_infos > 0u) {
+ stats_.AddBits(
+ Stats::kByteKindInlineInfoMethodIndex,
+ encoding.inline_info.encoding.GetMethodIndexEncoding().BitSize() * num_inline_infos);
+ stats_.AddBits(
+ Stats::kByteKindInlineInfoDexPc,
+ encoding.inline_info.encoding.GetDexPcEncoding().BitSize() * num_inline_infos);
+ stats_.AddBits(
+ Stats::kByteKindInlineInfoExtraData,
+ encoding.inline_info.encoding.GetExtraDataEncoding().BitSize() * num_inline_infos);
+ stats_.AddBits(
+ Stats::kByteKindInlineInfoDexRegisterMap,
+ encoding.inline_info.encoding.GetDexRegisterMapEncoding().BitSize() *
+ num_inline_infos);
+ stats_.AddBits(Stats::kByteKindInlineInfoIsLast, num_inline_infos);
+ }
}
}
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
@@ -2178,9 +2235,14 @@ class ImageDumper {
ScopedIndentation indent2(&state->vios_);
auto* resolved_types = dex_cache->GetResolvedTypes();
for (size_t i = 0; i < num_types; ++i) {
- auto* elem = resolved_types[i].Read();
+ auto pair = resolved_types[i].load(std::memory_order_relaxed);
size_t run = 0;
- for (size_t j = i + 1; j != num_types && elem == resolved_types[j].Read(); ++j) {
+ for (size_t j = i + 1; j != num_types; ++j) {
+ auto other_pair = resolved_types[j].load(std::memory_order_relaxed);
+ if (pair.index != other_pair.index ||
+ pair.object.Read() != other_pair.object.Read()) {
+ break;
+ }
++run;
}
if (run == 0) {
@@ -2190,12 +2252,13 @@ class ImageDumper {
i = i + run;
}
std::string msg;
+ auto* elem = pair.object.Read();
if (elem == nullptr) {
msg = "null";
} else {
msg = elem->PrettyClass();
}
- os << StringPrintf("%p %s\n", elem, msg.c_str());
+ os << StringPrintf("%p %u %s\n", elem, pair.index, msg.c_str());
}
}
}
@@ -2690,7 +2753,9 @@ static jobject InstallOatFile(Runtime* runtime,
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
- class_linker->RegisterDexFile(*dex_file, nullptr);
+ ObjPtr<mirror::DexCache> dex_cache =
+ class_linker->RegisterDexFile(*dex_file, nullptr);
+ CHECK(dex_cache != nullptr);
class_path->push_back(dex_file);
}
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 503cd4d581..c7c3ddd7cc 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -104,6 +104,7 @@ class OatDumpTest : public CommonRuntimeTest {
expected_prefixes.push_back("DEX CODE:");
expected_prefixes.push_back("CODE:");
expected_prefixes.push_back("CodeInfoEncoding");
+ expected_prefixes.push_back("CodeInfoInlineInfo");
}
if (mode == kModeArt) {
exec_argv.push_back("--image=" + core_art_location_);
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 9a73830f99..2546822613 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -643,8 +643,8 @@ void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots
if (orig_strings != nullptr) {
orig_dex_cache->FixupStrings(RelocatedCopyOf(orig_strings), RelocatedPointerVisitor(this));
}
- GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
- GcRoot<mirror::Class>* relocated_types = RelocatedAddressOfPointer(orig_types);
+ mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* relocated_types = RelocatedAddressOfPointer(orig_types);
copy_dex_cache->SetField64<false>(
mirror::DexCache::ResolvedTypesOffset(),
static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_types)));
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index a6c3cf067b..d395c170bf 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -94,6 +94,54 @@ class ProfileAssistantTest : public CommonRuntimeTest {
std::string error;
return ExecAndReturnCode(argv_str, &error);
}
+
+ bool CreateProfile(std::string class_file_contents, const std::string& filename) {
+ ScratchFile class_names_file;
+ File* file = class_names_file.GetFile();
+ EXPECT_TRUE(file->WriteFully(class_file_contents.c_str(), class_file_contents.length()));
+ EXPECT_EQ(0, file->Flush());
+ EXPECT_TRUE(file->ResetOffset());
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--create-profile-from=" + class_names_file.GetFilename());
+ argv_str.push_back("--reference-profile-file=" + filename);
+ argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
+ argv_str.push_back("--dex-location=classes.dex");
+ std::string error;
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
+ return true;
+ }
+
+ bool DumpClasses(const std::string& filename, std::string* file_contents) {
+ ScratchFile class_names_file;
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--dump-classes");
+ argv_str.push_back("--profile-file=" + filename);
+ argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
+ argv_str.push_back("--dex-location=classes.dex");
+ argv_str.push_back("--dump-output-to-fd=" + std::to_string(GetFd(class_names_file)));
+ std::string error;
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
+ File* file = class_names_file.GetFile();
+ EXPECT_EQ(0, file->Flush());
+ EXPECT_TRUE(file->ResetOffset());
+ int64_t length = file->GetLength();
+ std::unique_ptr<char[]> buf(new char[length]);
+ EXPECT_EQ(file->Read(buf.get(), length, 0), length);
+ *file_contents = std::string(buf.get(), length);
+ return true;
+ }
+
+ bool CreateAndDump(const std::string& input_file_contents, std::string* output_file_contents) {
+ ScratchFile profile_file;
+ EXPECT_TRUE(CreateProfile(input_file_contents, profile_file.GetFilename()));
+ profile_file.GetFile()->ResetOffset();
+ EXPECT_TRUE(DumpClasses(profile_file.GetFilename(), output_file_contents));
+ return true;
+ }
};
TEST_F(ProfileAssistantTest, AdviseCompilationEmptyReferences) {
@@ -307,4 +355,55 @@ TEST_F(ProfileAssistantTest, TestProfileGeneration) {
ASSERT_TRUE(info.Load(GetFd(profile)));
}
+TEST_F(ProfileAssistantTest, TestProfileCreationAllMatch) {
+ // Class names put here need to be in sorted order.
+ std::vector<std::string> class_names = {
+ "java.lang.Comparable",
+ "java.lang.Math",
+ "java.lang.Object"
+ };
+ std::string input_file_contents;
+ for (std::string& class_name : class_names) {
+ input_file_contents += class_name + std::string("\n");
+ }
+ std::string output_file_contents;
+ ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
+ ASSERT_EQ(output_file_contents, input_file_contents);
+}
+
+TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
+ // Class names put here need to be in sorted order.
+ std::vector<std::string> class_names = {
+ "doesnt.match.this.one",
+ "java.lang.Comparable",
+ "java.lang.Object"
+ };
+ std::string input_file_contents;
+ for (std::string& class_name : class_names) {
+ input_file_contents += class_name + std::string("\n");
+ }
+ std::string output_file_contents;
+ ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
+ std::string expected_contents =
+ class_names[1] + std::string("\n") + class_names[2] + std::string("\n");
+ ASSERT_EQ(output_file_contents, expected_contents);
+}
+
+TEST_F(ProfileAssistantTest, TestProfileCreationNoneMatched) {
+ // Class names put here need to be in sorted order.
+ std::vector<std::string> class_names = {
+ "doesnt.match.this.one",
+ "doesnt.match.this.one.either",
+ "nor.this.one"
+ };
+ std::string input_file_contents;
+ for (std::string& class_name : class_names) {
+ input_file_contents += class_name + std::string("\n");
+ }
+ std::string output_file_contents;
+ ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
+ std::string expected_contents("");
+ ASSERT_EQ(output_file_contents, expected_contents);
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index ffebb6a2ea..f6b145aa76 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -21,8 +21,11 @@
#include <sys/stat.h>
#include <unistd.h>
+#include <fstream>
#include <iostream>
+#include <set>
#include <string>
+#include <unordered_set>
#include <vector>
#include "android-base/stringprintf.h"
@@ -84,8 +87,10 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --dump-only: dumps the content of the specified profile files");
UsageError(" to standard output (default) in a human readable form.");
UsageError("");
- UsageError(" --dump-output-to-fd=<number>: redirects --dump-info-for output to a file");
- UsageError(" descriptor.");
+ UsageError(" --dump-output-to-fd=<number>: redirects --dump-only output to a file descriptor.");
+ UsageError("");
+ UsageError(" --dump-classes: dumps a sorted list of classes that are in the specified profile");
+ UsageError(" file to standard output (default) in a human readable form.");
UsageError("");
UsageError(" --profile-file=<filename>: specify profiler output file to use for compilation.");
UsageError(" Can be specified multiple time, in which case the data from the different");
@@ -103,6 +108,7 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --reference-profile-file-fd=<number>: same as --reference-profile-file but");
UsageError(" accepts a file descriptor. Cannot be used together with");
UsageError(" --reference-profile-file.");
+ UsageError("");
UsageError(" --generate-test-profile=<filename>: generates a random profile file for testing.");
UsageError(" --generate-test-profile-num-dex=<number>: number of dex files that should be");
UsageError(" included in the generated profile. Defaults to 20.");
@@ -111,12 +117,15 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --generate-test-profile-class-ratio=<number>: the percentage from the maximum");
UsageError(" number of classes that should be generated. Defaults to 5.");
UsageError("");
+ UsageError(" --create-profile-from=<filename>: creates a profile from a list of classes.");
+ UsageError("");
UsageError("");
UsageError(" --dex-location=<string>: location string to use with corresponding");
UsageError(" apk-fd to find dex files");
UsageError("");
UsageError(" --apk-fd=<number>: file descriptor containing an open APK to");
UsageError(" search for dex files");
+ UsageError(" --apk-=<filename>: an APK to search for dex files");
UsageError("");
exit(EXIT_FAILURE);
@@ -132,6 +141,7 @@ class ProfMan FINAL {
ProfMan() :
reference_profile_file_fd_(kInvalidFd),
dump_only_(false),
+ dump_classes_(false),
dump_output_to_fd_(kInvalidFd),
test_profile_num_dex_(kDefaultTestProfileNumDex),
test_profile_method_ratio_(kDefaultTestProfileMethodRatio),
@@ -164,6 +174,10 @@ class ProfMan FINAL {
}
if (option == "--dump-only") {
dump_only_ = true;
+ } else if (option == "--dump-classes") {
+ dump_classes_ = true;
+ } else if (option.starts_with("--create-profile-from=")) {
+ create_profile_from_file_ = option.substr(strlen("--create-profile-from=")).ToString();
} else if (option.starts_with("--dump-output-to-fd=")) {
ParseUintOption(option, "--dump-output-to-fd", &dump_output_to_fd_, Usage);
} else if (option.starts_with("--profile-file=")) {
@@ -178,6 +192,8 @@ class ProfMan FINAL {
dex_locations_.push_back(option.substr(strlen("--dex-location=")).ToString());
} else if (option.starts_with("--apk-fd=")) {
ParseFdForCollection(option, "--apk-fd", &apks_fd_);
+ } else if (option.starts_with("--apk=")) {
+ apk_files_.push_back(option.substr(strlen("--apk=")).ToString());
} else if (option.starts_with("--generate-test-profile=")) {
test_profile_ = option.substr(strlen("--generate-test-profile=")).ToString();
} else if (option.starts_with("--generate-test-profile-num-dex=")) {
@@ -213,14 +229,34 @@ class ProfMan FINAL {
}
return;
}
- // --dump-only may be specified with only --reference-profiles present.
- if (!dump_only_ && !has_profiles) {
+ if (!apk_files_.empty() && !apks_fd_.empty()) {
+ Usage("APK files should not be specified with both --apk-fd and --apk");
+ }
+ if (!create_profile_from_file_.empty()) {
+ if (apk_files_.empty() && apks_fd_.empty()) {
+ Usage("APK files must be specified");
+ }
+ if (dex_locations_.empty()) {
+ Usage("DEX locations must be specified");
+ }
+ if (reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
+ Usage("Reference profile must be specified with --reference-profile-file or "
+ "--reference-profile-file-fd");
+ }
+ if (has_profiles) {
+ Usage("Profile must be specified with --reference-profile-file or "
+ "--reference-profile-file-fd");
+ }
+ return;
+ }
+ // --dump-only and --dump-classes may be specified with only --reference-profiles present.
+ if (!dump_only_ && !dump_classes_ && !has_profiles) {
Usage("No profile files specified.");
}
if (!profile_files_.empty() && !profile_files_fd_.empty()) {
Usage("Profile files should not be specified with both --profile-file-fd and --profile-file");
}
- if (!dump_only_ && !has_reference_profile) {
+ if (!dump_only_ && !dump_classes_ && !has_reference_profile) {
Usage("No reference profile file specified.");
}
if (!reference_profile_file_.empty() && FdIsValid(reference_profile_file_fd_)) {
@@ -248,18 +284,61 @@ class ProfMan FINAL {
return result;
}
- int DumpOneProfile(const std::string& banner, const std::string& filename, int fd,
- const std::vector<const DexFile*>* dex_files, std::string* dump) {
+ void OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ bool use_apk_fd_list = !apks_fd_.empty();
+ if (use_apk_fd_list) {
+ CHECK(apk_files_.empty());
+ CHECK_EQ(dex_locations_.size(), apks_fd_.size());
+ } else {
+ CHECK_EQ(dex_locations_.size(), apk_files_.size());
+ CHECK(!apk_files_.empty());
+ }
+ static constexpr bool kVerifyChecksum = true;
+ for (size_t i = 0; i < dex_locations_.size(); ++i) {
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files_for_location;
+ if (use_apk_fd_list) {
+ if (DexFile::OpenZip(apks_fd_[i],
+ dex_locations_[i],
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
+ } else {
+ LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
+ continue;
+ }
+ } else {
+ if (DexFile::Open(apk_files_[i].c_str(),
+ dex_locations_[i],
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
+ } else {
+ LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
+ continue;
+ }
+ }
+ for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
+ dex_files->emplace_back(std::move(dex_file));
+ }
+ }
+ }
+
+ int DumpOneProfile(const std::string& banner,
+ const std::string& filename,
+ int fd,
+ const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::string* dump) {
if (!filename.empty()) {
fd = open(filename.c_str(), O_RDWR);
if (fd < 0) {
- std::cerr << "Cannot open " << filename << strerror(errno);
+ LOG(ERROR) << "Cannot open " << filename << strerror(errno);
return -1;
}
}
ProfileCompilationInfo info;
if (!info.Load(fd)) {
- std::cerr << "Cannot load profile info from fd=" << fd << "\n";
+ LOG(ERROR) << "Cannot load profile info from fd=" << fd << "\n";
return -1;
}
std::string this_dump = banner + "\n" + info.DumpInfo(dex_files) + "\n";
@@ -277,26 +356,8 @@ class ProfMan FINAL {
// Open apk/zip files and and read dex files.
MemMap::Init(); // for ZipArchive::OpenFromFd
- std::vector<const DexFile*> dex_files;
- assert(dex_locations_.size() == apks_fd_.size());
- static constexpr bool kVerifyChecksum = true;
- for (size_t i = 0; i < dex_locations_.size(); ++i) {
- std::string error_msg;
- std::vector<std::unique_ptr<const DexFile>> dex_files_for_location;
- if (DexFile::OpenZip(apks_fd_[i],
- dex_locations_[i],
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
- } else {
- LOG(WARNING) << "OpenFromZip failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
- }
- for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
- dex_files.push_back(dex_file.release());
- }
- }
-
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ OpenApkFilesFromLocations(&dex_files);
std::string dump;
// Dump individual profile files.
if (!profile_files_fd_.empty()) {
@@ -355,17 +416,207 @@ class ProfMan FINAL {
return dump_only_;
}
+ bool GetClassNames(int fd,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::set<std::string>* class_names) {
+ ProfileCompilationInfo profile_info;
+ if (!profile_info.Load(fd)) {
+ LOG(ERROR) << "Cannot load profile info";
+ return false;
+ }
+ profile_info.GetClassNames(dex_files, class_names);
+ return true;
+ }
+
+ bool GetClassNames(std::string profile_file,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::set<std::string>* class_names) {
+ int fd = open(profile_file.c_str(), O_RDONLY);
+ if (!FdIsValid(fd)) {
+ LOG(ERROR) << "Cannot open " << profile_file << strerror(errno);
+ return false;
+ }
+ if (!GetClassNames(fd, dex_files, class_names)) {
+ return false;
+ }
+ if (close(fd) < 0) {
+ PLOG(WARNING) << "Failed to close descriptor";
+ }
+ return true;
+ }
+
+ int DumpClasses() {
+ // Open apk/zip files and and read dex files.
+ MemMap::Init(); // for ZipArchive::OpenFromFd
+ // Open the dex files to get the names for classes.
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ OpenApkFilesFromLocations(&dex_files);
+ // Build a vector of class names from individual profile files.
+ std::set<std::string> class_names;
+ if (!profile_files_fd_.empty()) {
+ for (int profile_file_fd : profile_files_fd_) {
+ if (!GetClassNames(profile_file_fd, &dex_files, &class_names)) {
+ return -1;
+ }
+ }
+ }
+ if (!profile_files_.empty()) {
+ for (const std::string& profile_file : profile_files_) {
+ if (!GetClassNames(profile_file, &dex_files, &class_names)) {
+ return -1;
+ }
+ }
+ }
+ // Concatenate class names from reference profile file.
+ if (FdIsValid(reference_profile_file_fd_)) {
+ if (!GetClassNames(reference_profile_file_fd_, &dex_files, &class_names)) {
+ return -1;
+ }
+ }
+ if (!reference_profile_file_.empty()) {
+ if (!GetClassNames(reference_profile_file_, &dex_files, &class_names)) {
+ return -1;
+ }
+ }
+ // Dump the class names.
+ std::string dump;
+ for (const std::string& class_name : class_names) {
+ dump += class_name + std::string("\n");
+ }
+ if (!FdIsValid(dump_output_to_fd_)) {
+ std::cout << dump;
+ } else {
+ unix_file::FdFile out_fd(dump_output_to_fd_, false /*check_usage*/);
+ if (!out_fd.WriteFully(dump.c_str(), dump.length())) {
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ bool ShouldOnlyDumpClasses() {
+ return dump_classes_;
+ }
+
+ // Read lines from the given file, dropping comments and empty lines. Post-process each line with
+ // the given function.
+ template <typename T>
+ static T* ReadCommentedInputFromFile(
+ const char* input_filename, std::function<std::string(const char*)>* process) {
+ std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
+ if (input_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open input file " << input_filename;
+ return nullptr;
+ }
+ std::unique_ptr<T> result(
+ ReadCommentedInputStream<T>(*input_file, process));
+ input_file->close();
+ return result.release();
+ }
+
+ // Read lines from the given stream, dropping comments and empty lines. Post-process each line
+ // with the given function.
+ template <typename T>
+ static T* ReadCommentedInputStream(
+ std::istream& in_stream,
+ std::function<std::string(const char*)>* process) {
+ std::unique_ptr<T> output(new T());
+ while (in_stream.good()) {
+ std::string dot;
+ std::getline(in_stream, dot);
+ if (android::base::StartsWith(dot, "#") || dot.empty()) {
+ continue;
+ }
+ if (process != nullptr) {
+ std::string descriptor((*process)(dot.c_str()));
+ output->insert(output->end(), descriptor);
+ } else {
+ output->insert(output->end(), dot);
+ }
+ }
+ return output.release();
+ }
+
+ int CreateProfile() {
+ MemMap::Init(); // for ZipArchive::OpenFromFd
+ // Open the profile output file if needed.
+ int fd = reference_profile_file_fd_;
+ if (!FdIsValid(fd)) {
+ CHECK(!reference_profile_file_.empty());
+ fd = open(reference_profile_file_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ if (fd < 0) {
+ LOG(ERROR) << "Cannot open " << reference_profile_file_ << strerror(errno);
+ return -1;
+ }
+ }
+ // Read the user-specified list of classes (dot notation rather than descriptors).
+ std::unique_ptr<std::unordered_set<std::string>>
+ user_class_list(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+ create_profile_from_file_.c_str(), nullptr)); // No post-processing.
+ std::unordered_set<std::string> matched_user_classes;
+ // Open the dex files to look up class names.
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ OpenApkFilesFromLocations(&dex_files);
+ // Iterate over the dex files looking for class names in the input stream.
+ std::set<DexCacheResolvedClasses> resolved_class_set;
+ for (auto& dex_file : dex_files) {
+ // Compute the set of classes to be added for this dex file first. This
+ // avoids creating an entry in the profile information for dex files that
+ // contribute no classes.
+ std::unordered_set<dex::TypeIndex> classes_to_be_added;
+ for (const auto& klass : *user_class_list) {
+ std::string descriptor = DotToDescriptor(klass.c_str());
+ const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor.c_str());
+ if (type_id == nullptr) {
+ continue;
+ }
+ classes_to_be_added.insert(dex_file->GetIndexForTypeId(*type_id));
+ matched_user_classes.insert(klass);
+ }
+ if (classes_to_be_added.empty()) {
+ continue;
+ }
+ // Insert the DexCacheResolved Classes into the set expected for
+ // AddMethodsAndClasses.
+ std::set<DexCacheResolvedClasses>::iterator dex_resolved_classes =
+ resolved_class_set.emplace(dex_file->GetLocation(),
+ dex_file->GetBaseLocation(),
+ dex_file->GetLocationChecksum()).first;
+ dex_resolved_classes->AddClasses(classes_to_be_added.begin(), classes_to_be_added.end());
+ }
+ // Warn the user if we didn't find matches for every class.
+ for (const auto& klass : *user_class_list) {
+ if (matched_user_classes.find(klass) == matched_user_classes.end()) {
+ LOG(WARNING) << "requested class '" << klass << "' was not matched in any dex file";
+ }
+ }
+ // Generate the profile data structure.
+ ProfileCompilationInfo info;
+ std::vector<MethodReference> methods; // No methods for now.
+ info.AddMethodsAndClasses(methods, resolved_class_set);
+ // Write the profile file.
+ CHECK(info.Save(fd));
+ if (close(fd) < 0) {
+ PLOG(WARNING) << "Failed to close descriptor";
+ }
+ return 0;
+ }
+
+ bool ShouldCreateProfile() {
+ return !create_profile_from_file_.empty();
+ }
+
int GenerateTestProfile() {
int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (profile_test_fd < 0) {
- std::cerr << "Cannot open " << test_profile_ << strerror(errno);
+ LOG(ERROR) << "Cannot open " << test_profile_ << strerror(errno);
return -1;
}
bool result = ProfileCompilationInfo::GenerateTestProfile(profile_test_fd,
- test_profile_num_dex_,
- test_profile_method_ratio_,
- test_profile_class_ratio_);
+ test_profile_num_dex_,
+ test_profile_method_ratio_,
+ test_profile_class_ratio_);
close(profile_test_fd); // ignore close result.
return result ? 0 : -1;
}
@@ -402,12 +653,15 @@ class ProfMan FINAL {
std::vector<std::string> profile_files_;
std::vector<int> profile_files_fd_;
std::vector<std::string> dex_locations_;
+ std::vector<std::string> apk_files_;
std::vector<int> apks_fd_;
std::string reference_profile_file_;
int reference_profile_file_fd_;
bool dump_only_;
+ bool dump_classes_;
int dump_output_to_fd_;
std::string test_profile_;
+ std::string create_profile_from_file_;
uint16_t test_profile_num_dex_;
uint16_t test_profile_method_ratio_;
uint16_t test_profile_class_ratio_;
@@ -427,6 +681,12 @@ static int profman(int argc, char** argv) {
if (profman.ShouldOnlyDumpProfile()) {
return profman.DumpProfileInfo();
}
+ if (profman.ShouldOnlyDumpClasses()) {
+ return profman.DumpClasses();
+ }
+ if (profman.ShouldCreateProfile()) {
+ return profman.CreateProfile();
+ }
// Process profile information and assess if we need to do a profile guided compilation.
// This operation involves I/O.
return profman.ProcessProfiles();
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 276f3043d9..9585ba2d8e 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -208,6 +208,7 @@ cc_defaults {
"verifier/reg_type_cache.cc",
"verifier/register_line.cc",
"verifier/verifier_deps.cc",
+ "verify_object.cc",
"well_known_classes.cc",
"zip_archive.cc",
@@ -379,6 +380,10 @@ cc_defaults {
},
cflags: ["-DBUILDING_LIBART=1"],
generated_sources: ["art_operator_srcs"],
+ // asm_support_gen.h (used by asm_support.h) is generated with cpp-define-generator
+ generated_headers: ["cpp-define-generator-asm-support"],
+ // export our headers so the libart-gtest targets can use it as well.
+ export_generated_headers: ["cpp-define-generator-asm-support"],
clang: true,
include_dirs: [
"art/cmdline",
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index ed36436120..cfe8406fbf 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -965,9 +965,27 @@ ENTRY \name
END \name
.endm
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+// Macro for string and type resolution and initialization.
+.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_EVERYTHING_FRAME r1 @ save everything in case of GC
+ mov r1, r9 @ pass Thread::Current
+ bl \entrypoint @ (uint32_t index, Thread*)
+ cbz r0, 1f @ If result is null, deliver the OOME.
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
+ bx lr
+ .cfi_restore_state
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END \name
+.endm
+
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
/*
* Called by managed code to resolve a static field and load a non-wide value.
@@ -1066,31 +1084,36 @@ ENTRY art_quick_set64_static
DELIVER_PENDING_EXCEPTION
END art_quick_set64_static
- /*
- * Entry from managed code to resolve a string, this stub will
- * check the dex cache for a matching string (the fast path), and if not found,
- * it will allocate a String and deliver an exception on error.
- * On success the String is returned. R0 holds the string index.
- */
-
-ENTRY art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME r1 @ save everything in case of GC
- mov r1, r9 @ pass Thread::Current
- bl artResolveStringFromCode @ (uint32_t type_idx, Thread*)
- cbz r0, 1f @ If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
- bx lr
- .cfi_restore_state
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END art_quick_resolve_string
-
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+// Comment out allocators that have arm specific asm.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_resolved_rosalloc
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
// Fast path rosalloc allocation.
// r0: type/return value, r9: Thread::Current
// r1, r2, r3, r12: free.
@@ -1099,13 +1122,13 @@ ENTRY art_quick_alloc_object_resolved_rosalloc
// TODO: consider using ldrd.
ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1119,7 +1142,7 @@ ENTRY art_quick_alloc_object_resolved_rosalloc
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ cbz r3, .Lslow_path\c_name
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1164,16 +1187,20 @@ ENTRY art_quick_alloc_object_resolved_rosalloc
mov r0, r3 // Set the return value and return.
bx lr
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
mov r1, r9 @ pass Thread::Current
- bl artAllocObjectFromCodeResolvedRosAlloc @ (mirror::Class* cls, Thread*)
+ bl \cxx_name @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_rosalloc
+END \c_name
+.endm
-// The common fast path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
+// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
// Need to preserve r0 to the slow path.
@@ -1212,41 +1239,173 @@ END art_quick_alloc_object_resolved_rosalloc
bx lr
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_resolved_tlab
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+ENTRY \name
// Fast path tlab allocation.
// r0: type, r9: Thread::Current
// r1, r2, r3, r12: free.
-#if defined(USE_READ_BARRIER)
- mvn r0, #0 // Read barrier not supported here.
- bx lr // Return -1.
-#endif
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name
+.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
mov r1, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class* klass, Thread*)
+ bl \entrypoint // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_tlab
+END \name
+.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_resolved_region_tlab
- // Fast path tlab allocation.
- // r0: type, r9: Thread::Current, r1, r2, r3, r12: free.
-#if !defined(USE_READ_BARRIER)
- eor r0, r0, r0 // Read barrier must be enabled here.
- sub r0, r0, #1 // Return -1.
- bx lr
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+
+
+// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
+// and art_quick_alloc_array_resolved/initialized_region_tlab.
+//
+// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// Need to preserve r0 and r1 to the slow path.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
+ and r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED // Apply alignemnt mask
+ // (addr + 7) & ~7.
+
+ // Load thread_local_pos (r3) and
+ // thread_local_end (r12) with ldrd.
+ // Check constraints for ldrd.
+#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
+#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r1, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeResolvedRegionTLAB // (mirror::Class* klass, Thread*)
+ ldrd r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+ sub r12, r12, r3 // Compute the remaining buf size.
+ cmp r2, r12 // Check if the total_size fits.
+ bhi \slowPathLabel
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+ add r2, r2, r3
+ str r2, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add r2, r2, #1
+ str r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF r0
+ str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ str r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // object size load.)
+ mov r0, r3
+ dmb ish
+ bx lr
+.endm
+
+.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
+ENTRY \name
+ // Fast path array allocation for region tlab allocation.
+ // r0: mirror::Class* type
+ // r1: int32_t component_count
+ // r9: thread
+ // r2, r3, r12: free.
+ \size_setup .Lslow_path\name
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
+.Lslow_path\name:
+ // r0: mirror::Class* klass
+ // r1: int32_t component_count
+ // r2: Thread* self
+ SETUP_SAVE_REFS_ONLY_FRAME r2 // save callee saves in case of GC
+ mov r2, r9 // pass Thread::Current
+ bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_region_tlab
+END \name
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
+ bkpt // We should never enter here.
+ // Code below is for reference.
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8)
+ cmp r1, r2
+ bhi \slow_path
+ // Array classes are never finalizable
+ // or uninitialized, no need to check.
+ ldr r3, [r0, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
+ UNPOISON_HEAP_REF r3
+ ldr r3, [r3, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+ lsr r3, r3, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
+ // bits.
+ lsl r2, r1, r3 // Calculate data size
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+ add r3, r3, #1 // Add 4 to the length only if the
+ // component size shift is 3
+ // (for 64 bit alignment).
+ and r3, r3, #4
+ add r2, r2, r3
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_8 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET)
+ cmp r1, r2
+ bhi \slow_path
+ // Add array data offset and alignment.
+ add r2, r1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_16 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2)
+ cmp r1, r2
+ bhi \slow_path
+ lsl r2, r1, #1
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_32 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4)
+ cmp r1, r2
+ bhi \slow_path
+ lsl r2, r1, #2
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_64 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8)
+ cmp r1, r2
+ bhi \slow_path
+ lsl r2, r1, #3
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+# TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm, remove
+# the entrypoint once all backends have been updated to use the size variants.
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -1895,7 +2054,9 @@ ENTRY \name
beq .Lret_forwarding_address\name
.Lslow_rb_\name:
- // Save IP: the kSaveEverything entrypoint art_quick_resolve_string makes a tail call here.
+ // Save IP: The kSaveEverything entrypoint art_quick_resolve_string used to
+ // make a tail call here. Currently, it serves only for stack alignment but
+ // we may reintroduce kSaveEverything calls here in the future.
push {r0-r4, r9, ip, lr} @ save return address, core caller-save registers and ip
.cfi_adjust_cfa_offset 32
.cfi_rel_offset r0, 0
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6a2034fd68..bfbe4816ba 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1553,6 +1553,24 @@ ENTRY \name
END \name
.endm
+// Macro for string and type resolution and initialization.
+.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_EVERYTHING_FRAME // save everything for stack crawl
+ mov x1, xSELF // pass Thread::Current
+ bl \entrypoint // (int32_t index, Thread* self)
+ cbz w0, 1f // If result is null, deliver the OOME.
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
+ ret // return
+ .cfi_restore_state
+ .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END \name
+.endm
+
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
cbz w0, 1f // result zero branch over
ret // return
@@ -1571,10 +1589,11 @@ TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode,
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
@@ -1604,29 +1623,8 @@ THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCod
THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
- /*
- * Entry from managed code to resolve a string, this stub will
- * check the dex cache for a matching string (the fast path), and if not found,
- * it will allocate a String and deliver an exception on error.
- * On success the String is returned. R0 holds the string index.
- */
-
-ENTRY art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME // save everything for stack crawl
- mov x1, xSELF // pass Thread::Current
- bl artResolveStringFromCode // (int32_t string_idx, Thread* self)
- cbz w0, 1f // If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
- ret // return
- .cfi_restore_state
- .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END art_quick_resolve_string
-
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
// Comment out allocators that have arm64 specific asm.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
@@ -1640,8 +1638,20 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_resolved_rosalloc
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
+
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
// Fast path rosalloc allocation.
// x0: type, xSELF(x19): Thread::Current
// x1-x7: free.
@@ -1650,13 +1660,13 @@ ENTRY art_quick_alloc_object_resolved_rosalloc
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization
// checks.
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1669,7 +1679,7 @@ ENTRY art_quick_alloc_object_resolved_rosalloc
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ cbz x3, .Lslow_path\c_name
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1713,59 +1723,19 @@ ENTRY art_quick_alloc_object_resolved_rosalloc
mov x0, x3 // Set the return value and return.
ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x1, xSELF // pass Thread::Current
- bl artAllocObjectFromCodeResolvedRosAlloc // (mirror::Class* klass, Thread*)
+ bl \cxx_name
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_rosalloc
-
-.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
- and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask
- // (addr + 7) & ~7. The mask must
- // be 64 bits to keep high bits in
- // case of overflow.
- // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value.
- // Negative ints become large 64 bit unsigned ints which will always be larger than max signed
- // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int.
- cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow
- bhs \slowPathLabel // path.
-
- ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that
- // we use (end - begin) to handle
- // negative size arrays. It is
- // assumed that a negative size will
- // always be greater unsigned than
- // region size.
- ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET]
- sub \xTemp2, \xTemp2, \xTemp0
- cmp \xTemp1, \xTemp2
- bhi \slowPathLabel
- // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
- // Move old thread_local_pos to x0
- // for the return value.
- mov x0, \xTemp0
- add \xTemp0, \xTemp0, \xTemp1
- str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
- ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
- add \xTemp0, \xTemp0, #1
- str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF \wClass
- str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
- str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
- // Fence.
- dmb ishst
- ret
+END \c_name
.endm
-// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
-// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
- ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
-.endm
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
@@ -1792,36 +1762,13 @@ END art_quick_alloc_object_resolved_rosalloc
ret
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB).
-ENTRY art_quick_alloc_object_resolved_tlab
- // Fast path tlab allocation.
- // x0: type, xSELF(x19): Thread::Current
- // x1-x7: free.
-#if defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier not supported here.
- ret // Return -1.
-#endif
- ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x1, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class*, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_tlab
-
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
ENTRY \name
// Fast path region tlab allocation.
// x0: type, xSELF(x19): Thread::Current
// x1-x7: free.
-#if !defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier must be enabled here.
- ret // Return -1.
-#endif
-.Ldo_allocation\name:
- \fast_path .Lslow_path\name
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lslow_path\name
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
mov x1, xSELF // Pass Thread::Current.
@@ -1831,21 +1778,55 @@ ENTRY \name
END \name
.endm
-GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED
-GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
-// TODO: We could use this macro for the normal tlab allocator too.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask
+ // (addr + 7) & ~7. The mask must
+ // be 64 bits to keep high bits in
+ // case of overflow.
+ // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value.
+ // Negative ints become large 64 bit unsigned ints which will always be larger than max signed
+ // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int.
+ cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow
+ bhs \slowPathLabel // path.
+
+ ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that
+ // we use (end - begin) to handle
+ // negative size arrays. It is
+ // assumed that a negative size will
+ // always be greater unsigned than
+ // region size.
+ ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET]
+ sub \xTemp2, \xTemp2, \xTemp0
+ cmp \xTemp1, \xTemp2
+ bhi \slowPathLabel
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
+ // Move old thread_local_pos to x0
+ // for the return value.
+ mov x0, \xTemp0
+ add \xTemp0, \xTemp0, \xTemp1
+ str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add \xTemp0, \xTemp0, #1
+ str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF \wClass
+ str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
+ // Fence.
+ dmb ishst
+ ret
+.endm
-.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, size_setup
+.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
ENTRY \name
// Fast path array allocation for region tlab allocation.
// x0: mirror::Class* type
// x1: int32_t component_count
// x2-x7: free.
-#if !defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier must be enabled here.
- ret // Return -1.
-#endif
mov x3, x0
\size_setup x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
@@ -1904,17 +1885,21 @@ END \name
.macro COMPUTE_ARRAY_SIZE_64 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
lsl \xTemp1, \xCount, #3
// Add array data offset and alignment.
- // Add 4 to the size for 64 bit alignment.
- add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK + 4)
+ add \xTemp1, \xTemp1, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
.endm
# TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm64, remove
# the entrypoint once all backends have been updated to use the size variants.
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
/*
* Called by managed code when the thread has been asked to suspend.
@@ -2393,13 +2378,6 @@ END art_quick_indexof
ENTRY \name
// Reference is null, no work to do at all.
cbz \wreg, .Lret_rb_\name
- /*
- * Allocate 46 stack slots * 8 = 368 bytes:
- * - 20 slots for core registers X0-X19
- * - 24 slots for floating-point registers D0-D7 and D16-D31
- * - 1 slot for return address register XLR
- * - 1 padding slot for 16-byte stack alignment
- */
// Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler.
ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lnot_marked_rb_\name
@@ -2411,10 +2389,15 @@ ENTRY \name
cmp wzr, wIP0, lsr #30
beq .Lret_forwarding_address\name
.Lslow_rb_\name:
- // We must not clobber IP0 since art_quick_resolve_string makes a tail call here and relies on
- // IP0 being restored.
+ /*
+ * Allocate 44 stack slots * 8 = 352 bytes:
+ * - 20 slots for core registers X0-15, X17-X19, LR
+ * - 24 slots for floating-point registers D0-D7 and D16-D31
+ */
+ // We must not clobber IP1 since code emitted for HLoadClass and HLoadString
+ // relies on IP1 being preserved.
// Save all potentially live caller-save core registers.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 352
SAVE_TWO_REGS x2, x3, 16
SAVE_TWO_REGS x4, x5, 32
SAVE_TWO_REGS x6, x7, 48
@@ -2422,8 +2405,8 @@ ENTRY \name
SAVE_TWO_REGS x10, x11, 80
SAVE_TWO_REGS x12, x13, 96
SAVE_TWO_REGS x14, x15, 112
- SAVE_TWO_REGS x16, x17, 128
- SAVE_TWO_REGS x18, x19, 144
+ SAVE_TWO_REGS x17, x18, 128 // Skip x16, i.e. IP0.
+ SAVE_TWO_REGS x19, xLR, 144 // Save also return address.
// Save all potentially live caller-save floating-point registers.
stp d0, d1, [sp, #160]
stp d2, d3, [sp, #176]
@@ -2437,9 +2420,6 @@ ENTRY \name
stp d26, d27, [sp, #304]
stp d28, d29, [sp, #320]
stp d30, d31, [sp, #336]
- // Save return address.
- // (sp + #352 is a padding slot)
- SAVE_REG xLR, 360
.ifnc \wreg, w0
mov w0, \wreg // Pass arg1 - obj from `wreg`
@@ -2459,8 +2439,8 @@ ENTRY \name
POP_REGS_NE x10, x11, 80, \xreg
POP_REGS_NE x12, x13, 96, \xreg
POP_REGS_NE x14, x15, 112, \xreg
- POP_REGS_NE x16, x17, 128, \xreg
- POP_REGS_NE x18, x19, 144, \xreg
+ POP_REGS_NE x17, x18, 128, \xreg
+ POP_REGS_NE x19, xLR, 144, \xreg // Restore also return address.
// Restore floating-point registers.
ldp d0, d1, [sp, #160]
ldp d2, d3, [sp, #176]
@@ -2474,9 +2454,8 @@ ENTRY \name
ldp d26, d27, [sp, #304]
ldp d28, d29, [sp, #320]
ldp d30, d31, [sp, #336]
- // Restore return address and remove padding.
- RESTORE_REG xLR, 360
- DECREASE_FRAME 368
+ // Remove frame and return.
+ DECREASE_FRAME 352
ret
.Lret_forwarding_address\name:
mvn wIP0, wIP0
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 2d5eca003d..ec8ae85722 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1576,8 +1576,87 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+// A hand-written override for:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
+ # Fast path rosalloc allocation
+ # a0: type
+ # s1: Thread::Current
+ # -----------------------------
+ # t1: object size
+ # t2: rosalloc run
+ # t3: thread stack top offset
+ # t4: thread stack bottom offset
+ # v0: free list head
+ #
+ # t5, t6 : temps
+ lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
+ lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
+ bgeu $t3, $t4, .Lslow_path_\c_name
+
+ lw $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1).
+ li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
+ # allocation. Also does the
+ # initialized and finalizable checks.
+ bgtu $t1, $t5, .Lslow_path_\c_name
+
+ # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
+ # combine the two shifts together.
+ srl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+
+ addu $t2, $t1, $s1
+ lw $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2).
+
+ # Load the free list head (v0).
+ # NOTE: this will be the return val.
+ lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+ beqz $v0, .Lslow_path_\c_name
+ nop
+
+ # Load the next pointer of the head and update the list head with the next pointer.
+ lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
+ sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+
+ # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
+ # asserted to match.
+
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+
+ POISON_HEAP_REF $a0
+ sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
+
+ # Push the new object onto the thread local allocation stack and increment the thread local
+ # allocation stack top.
+ sw $v0, 0($t3)
+ addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
+ sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
+
+ # Decrement the size of the free list.
+ lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+ addiu $t5, $t5, -1
+ sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+
+ sync # Fence.
+
+ jalr $zero, $ra
+ nop
+
+ .Lslow_path_\c_name:
+ SETUP_SAVE_REFS_ONLY_FRAME
+ la $t9, \cxx_name
+ jalr $t9
+ move $a1, $s1 # Pass self as argument.
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \c_name
+.endm
+
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
@@ -1963,67 +2042,158 @@ ENTRY_NO_GP art_quick_indexof
/* $a0 holds address of "this" */
/* $a1 holds "ch" */
/* $a2 holds "fromIndex" */
- lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- slt $t1, $a2, $zero # if fromIndex < 0
+#if (STRING_COMPRESSION_FEATURE)
+ lw $a3, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
+#else
+ lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+#endif
+ slt $t1, $a2, $zero # if fromIndex < 0
#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
- seleqz $a2, $a2, $t1 # fromIndex = 0;
+ seleqz $a2, $a2, $t1 # fromIndex = 0;
#else
- movn $a2, $zero, $t1 # fromIndex = 0;
+ movn $a2, $zero, $t1 # fromIndex = 0;
#endif
- subu $t0, $t0, $a2 # this.length() - fromIndex
- blez $t0, 6f # if this.length()-fromIndex <= 0
- li $v0, -1 # return -1;
-
- sll $v0, $a2, 1 # $a0 += $a2 * 2
- addu $a0, $a0, $v0 # " ditto "
- move $v0, $a2 # Set i to fromIndex.
+#if (STRING_COMPRESSION_FEATURE)
+ srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
+#endif
+ subu $t0, $t0, $a2 # this.length() - fromIndex
+ blez $t0, 6f # if this.length()-fromIndex <= 0
+ li $v0, -1 # return -1;
+
+#if (STRING_COMPRESSION_FEATURE)
+ sll $a3, $a3, 31 # Extract compression flag.
+ beqz $a3, .Lstring_indexof_compressed
+ move $t2, $a0 # Save a copy in $t2 to later compute result (in branch delay slot).
+#endif
+ sll $v0, $a2, 1 # $a0 += $a2 * 2
+ addu $a0, $a0, $v0 # " ditto "
+ move $v0, $a2 # Set i to fromIndex.
1:
- lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
- beq $t3, $a1, 6f # return i;
- addu $a0, $a0, 2 # i++
- subu $t0, $t0, 1 # this.length() - i
- bnez $t0, 1b # while this.length() - i > 0
- addu $v0, $v0, 1 # i++
+ lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
+ beq $t3, $a1, 6f # return i;
+ addu $a0, $a0, 2 # i++
+ subu $t0, $t0, 1 # this.length() - i
+ bnez $t0, 1b # while this.length() - i > 0
+ addu $v0, $v0, 1 # i++
- li $v0, -1 # if this.length() - i <= 0
- # return -1;
+ li $v0, -1 # if this.length() - i <= 0
+ # return -1;
6:
- j $ra
- nop
+ j $ra
+ nop
+
+#if (STRING_COMPRESSION_FEATURE)
+.Lstring_indexof_compressed:
+ addu $a0, $a0, $a2 # $a0 += $a2
+
+.Lstring_indexof_compressed_loop:
+ lbu $t3, MIRROR_STRING_VALUE_OFFSET($a0)
+ beq $t3, $a1, .Lstring_indexof_compressed_matched
+ subu $t0, $t0, 1
+ bgtz $t0, .Lstring_indexof_compressed_loop
+ addu $a0, $a0, 1
+
+.Lstring_indexof_nomatch:
+ jalr $zero, $ra
+ li $v0, -1 # return -1;
+
+.Lstring_indexof_compressed_matched:
+ jalr $zero, $ra
+ subu $v0, $a0, $t2 # return (current - start);
+#endif
END art_quick_indexof
/* java.lang.String.compareTo(String anotherString) */
ENTRY_NO_GP art_quick_string_compareto
/* $a0 holds address of "this" */
/* $a1 holds address of "anotherString" */
- beq $a0, $a1, 9f # this and anotherString are the same object
- move $v0, $zero
+ beq $a0, $a1, .Lstring_compareto_length_diff # this and anotherString are the same object
+ move $a3, $a2 # trick to return 0 (it returns a2 - a3)
+
+#if (STRING_COMPRESSION_FEATURE)
+ lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
+ lw $t1, MIRROR_STRING_COUNT_OFFSET($a1) # 'count' field of anotherString
+ sra $a2, $t0, 1 # this.length()
+ sra $a3, $t1, 1 # anotherString.length()
+#else
+ lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
+#endif
- lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
- MINu $t2, $a2, $a3
-# $t2 now holds min(this.length(),anotherString.length())
+ MINu $t2, $a2, $a3
+ # $t2 now holds min(this.length(),anotherString.length())
- beqz $t2, 9f # while min(this.length(),anotherString.length())-i != 0
- subu $v0, $a2, $a3 # if $t2==0 return
- # (this.length() - anotherString.length())
-1:
- lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
- lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bne $t0, $t1, 9f # if this.charAt(i) != anotherString.charAt(i)
- subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
- addiu $a0, $a0, 2 # point at this.charAt(i++)
- subu $t2, $t2, 1 # new value of
- # min(this.length(),anotherString.length())-i
- bnez $t2, 1b
- addiu $a1, $a1, 2 # point at anotherString.charAt(i++)
- subu $v0, $a2, $a3
-
-9:
- j $ra
- nop
+ # while min(this.length(),anotherString.length())-i != 0
+ beqz $t2, .Lstring_compareto_length_diff # if $t2==0
+ nop # return (this.length() - anotherString.length())
+
+#if (STRING_COMPRESSION_FEATURE)
+ # Differ cases:
+ sll $t3, $t0, 31
+ beqz $t3, .Lstring_compareto_this_is_compressed
+ sll $t3, $t1, 31 # In branch delay slot.
+ beqz $t3, .Lstring_compareto_that_is_compressed
+ nop
+ b .Lstring_compareto_both_not_compressed
+ nop
+
+.Lstring_compareto_this_is_compressed:
+ beqz $t3, .Lstring_compareto_both_compressed
+ nop
+ /* If (this->IsCompressed() && that->IsCompressed() == false) */
+.Lstring_compareto_loop_comparison_this_compressed:
+ lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
+ lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0, $t1, .Lstring_compareto_char_diff
+ addiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_loop_comparison_this_compressed
+ addiu $a1, $a1, 2 # point at anotherString.charAt(i++) - uncompressed
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+
+.Lstring_compareto_that_is_compressed:
+ lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
+ lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0, $t1, .Lstring_compareto_char_diff
+ addiu $a0, $a0, 2 # point at this.charAt(i++) - uncompressed
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_that_is_compressed
+ addiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+
+.Lstring_compareto_both_compressed:
+ lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
+ lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0, $t1, .Lstring_compareto_char_diff
+ addiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_both_compressed
+ addiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+#endif
+
+.Lstring_compareto_both_not_compressed:
+ lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
+ lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i)
+ # return (this.charAt(i) - anotherString.charAt(i))
+ addiu $a0, $a0, 2 # point at this.charAt(i++)
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_both_not_compressed
+ addiu $a1, $a1, 2 # point at anotherString.charAt(i++)
+
+.Lstring_compareto_length_diff:
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+
+.Lstring_compareto_char_diff:
+ jalr $zero, $ra
+ subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
END art_quick_string_compareto
.extern artInvokePolymorphic
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index f3629d90d3..28d7c77938 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1533,7 +1533,85 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// A hand-written override for:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
+ # Fast path rosalloc allocation
+ # a0: type
+ # s1: Thread::Current
+ # -----------------------------
+ # t1: object size
+ # t2: rosalloc run
+ # t3: thread stack top offset
+ # a4: thread stack bottom offset
+ # v0: free list head
+ #
+ # a5, a6 : temps
+ ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
+ ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
+ bgeuc $t3, $a4, .Lslow_path_\c_name
+
+ lwu $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1).
+ li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
+ # allocation. Also does the initialized
+ # and finalizable checks.
+ bltuc $a5, $t1, .Lslow_path_\c_name
+
+ # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
+ # combine the two shifts together.
+ dsrl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+
+ daddu $t2, $t1, $s1
+ ld $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2).
+
+ # Load the free list head (v0).
+ # NOTE: this will be the return val.
+ ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+ beqzc $v0, .Lslow_path_\c_name
+
+ # Load the next pointer of the head and update the list head with the next pointer.
+ ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
+ sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+
+ # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
+ # asserted to match.
+
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+
+ POISON_HEAP_REF $a0
+ sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
+
+ # Push the new object onto the thread local allocation stack and increment the thread local
+ # allocation stack top.
+ sd $v0, 0($t3)
+ daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
+ sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
+
+ # Decrement the size of the free list.
+ lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+ addiu $a5, $a5, -1
+ sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+
+ sync # Fence.
+
+ jalr $zero, $ra
+ .cpreturn # Restore gp from t8 in branch delay slot.
+
+.Lslow_path_\c_name:
+ SETUP_SAVE_REFS_ONLY_FRAME
+ jal \cxx_name
+ move $a1 ,$s1 # Pass self as argument.
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \c_name
+.endm
+
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
@@ -1822,32 +1900,91 @@ END art_quick_deoptimize_from_compiled_code
ENTRY_NO_GP art_quick_string_compareto
/* $a0 holds address of "this" */
/* $a1 holds address of "anotherString" */
- beq $a0,$a1,9f # this and anotherString are the same object
- move $v0,$zero
+ move $a2, $zero
+ beq $a0, $a1, .Lstring_compareto_length_diff # this and anotherString are the same object
+ move $a3, $zero # return 0 (it returns a2 - a3)
+
+#if (STRING_COMPRESSION_FEATURE)
+ lw $a4, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
+ lw $a5, MIRROR_STRING_COUNT_OFFSET($a1) # 'count' field of anotherString
+ sra $a2, $a4, 1 # this.length()
+ sra $a3, $a5, 1 # anotherString.length()
+#else
+ lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
+#endif
- lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
- MINu $t2, $a2, $a3
-# $t2 now holds min(this.length(),anotherString.length())
+ MINu $t2, $a2, $a3
+ # $t2 now holds min(this.length(),anotherString.length())
- beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0
- subu $v0,$a2,$a3 # if $t2==0 return
- # (this.length() - anotherString.length())
-1:
- lhu $t0,MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
- lhu $t1,MIRROR_STRING_VALUE_OFFSET($a1)
- bne $t0,$t1,9f # if this.charAt(i) != anotherString.charAt(i)
- subu $v0,$t0,$t1 # return (this.charAt(i) - anotherString.charAt(i))
- daddiu $a0,$a0,2 # point at this.charAt(i++)
- subu $t2,$t2,1 # new value of
- # min(this.length(),anotherString.length())-i
- bnez $t2,1b
- daddiu $a1,$a1,2 # point at anotherString.charAt(i++)
- subu $v0,$a2,$a3
-
-9:
- j $ra
- nop
+ # while min(this.length(),anotherString.length())-i != 0
+ beqzc $t2, .Lstring_compareto_length_diff # if $t2==0
+ # return (this.length() - anotherString.length())
+
+#if (STRING_COMPRESSION_FEATURE)
+ # Differ cases:
+ dext $a6, $a4, 0, 1
+ beqz $a6, .Lstring_compareto_this_is_compressed
+ dext $a6, $a5, 0, 1 # In branch delay slot.
+ beqz $a6, .Lstring_compareto_that_is_compressed
+ nop
+ b .Lstring_compareto_both_not_compressed
+ nop
+
+.Lstring_compareto_this_is_compressed:
+ beqzc $a6, .Lstring_compareto_both_compressed
+ /* If (this->IsCompressed() && that->IsCompressed() == false) */
+.Lstring_compareto_loop_comparison_this_compressed:
+ lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
+ lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bnec $t0, $t1, .Lstring_compareto_char_diff
+ daddiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_loop_comparison_this_compressed
+ daddiu $a1, $a1, 2 # point at anotherString.charAt(i++) - uncompressed
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+
+.Lstring_compareto_that_is_compressed:
+ lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
+ lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bnec $t0, $t1, .Lstring_compareto_char_diff
+ daddiu $a0, $a0, 2 # point at this.charAt(i++) - uncompressed
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_that_is_compressed
+ daddiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+
+.Lstring_compareto_both_compressed:
+ lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
+ lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bnec $t0, $t1, .Lstring_compareto_char_diff
+ daddiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_both_compressed
+ daddiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+#endif
+
+.Lstring_compareto_both_not_compressed:
+ lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
+ lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bnec $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i)
+ # return (this.charAt(i) - anotherString.charAt(i))
+ daddiu $a0, $a0, 2 # point at this.charAt(i++)
+ subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
+ bnez $t2, .Lstring_compareto_both_not_compressed
+ daddiu $a1, $a1, 2 # point at anotherString.charAt(i++)
+
+.Lstring_compareto_length_diff:
+ jalr $zero, $ra
+ subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
+
+.Lstring_compareto_char_diff:
+ jalr $zero, $ra
+ subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
END art_quick_string_compareto
/* java.lang.String.indexOf(int ch, int fromIndex=0) */
@@ -1855,31 +1992,64 @@ ENTRY_NO_GP art_quick_indexof
/* $a0 holds address of "this" */
/* $a1 holds "ch" */
/* $a2 holds "fromIndex" */
- lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- slt $at, $a2, $zero # if fromIndex < 0
- seleqz $a2, $a2, $at # fromIndex = 0;
- subu $t0,$t0,$a2 # this.length() - fromIndex
- blez $t0,6f # if this.length()-fromIndex <= 0
- li $v0,-1 # return -1;
+#if (STRING_COMPRESSION_FEATURE)
+ lw $a3, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
+#else
+ lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+#endif
+ slt $at, $a2, $zero # if fromIndex < 0
+ seleqz $a2, $a2, $at # fromIndex = 0;
+#if (STRING_COMPRESSION_FEATURE)
+ srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
+#endif
+ subu $t0, $t0, $a2 # this.length() - fromIndex
+ blez $t0, 6f # if this.length()-fromIndex <= 0
+ li $v0, -1 # return -1;
- sll $v0,$a2,1 # $a0 += $a2 * 2
- daddu $a0,$a0,$v0 # " ditto "
- move $v0,$a2 # Set i to fromIndex.
+#if (STRING_COMPRESSION_FEATURE)
+ dext $a3, $a3, 0, 1 # Extract compression flag.
+ beqzc $a3, .Lstring_indexof_compressed
+#endif
+
+ sll $v0, $a2, 1 # $a0 += $a2 * 2
+ daddu $a0, $a0, $v0 # " ditto "
+ move $v0, $a2 # Set i to fromIndex.
1:
- lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
- beq $t3,$a1,6f # return i;
- daddu $a0,$a0,2 # i++
- subu $t0,$t0,1 # this.length() - i
- bnez $t0,1b # while this.length() - i > 0
- addu $v0,$v0,1 # i++
+ lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
+ beq $t3, $a1, 6f # return i;
+ daddu $a0, $a0, 2 # i++
+ subu $t0, $t0, 1 # this.length() - i
+ bnez $t0, 1b # while this.length() - i > 0
+ addu $v0, $v0, 1 # i++
- li $v0,-1 # if this.length() - i <= 0
- # return -1;
+ li $v0, -1 # if this.length() - i <= 0
+ # return -1;
6:
- j $ra
- nop
+ j $ra
+ nop
+
+#if (STRING_COMPRESSION_FEATURE)
+.Lstring_indexof_compressed:
+ move $a4, $a0 # Save a copy in $a4 to later compute result.
+ daddu $a0, $a0, $a2 # $a0 += $a2
+
+.Lstring_indexof_compressed_loop:
+ lbu $t3, MIRROR_STRING_VALUE_OFFSET($a0)
+ beq $t3, $a1, .Lstring_indexof_compressed_matched
+ subu $t0, $t0, 1
+ bgtz $t0, .Lstring_indexof_compressed_loop
+ daddu $a0, $a0, 1
+
+.Lstring_indexof_nomatch:
+ jalr $zero, $ra
+ li $v0, -1 # return -1;
+
+.Lstring_indexof_compressed_matched:
+ jalr $zero, $ra
+ dsubu $v0, $a0, $a4 # return (current - start);
+#endif
END art_quick_indexof
.extern artInvokePolymorphic
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 9204d85279..2b3525b189 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -145,7 +145,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMa
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_rosalloc, RosAlloc)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9e75cbabd7..0bf08a6d97 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1051,7 +1051,7 @@ TEST_F(StubTest, AllocObjectArray) {
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
reinterpret_cast<size_t>(nullptr),
- StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
self);
EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1071,7 +1071,7 @@ TEST_F(StubTest, AllocObjectArray) {
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
GB, // that should fail...
reinterpret_cast<size_t>(nullptr),
- StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
self);
EXPECT_TRUE(self->IsExceptionPending());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 47dc34a355..8c907e0790 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -922,6 +922,31 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
+// Macro for string and type resolution and initialization.
+MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ subl MACRO_LITERAL(8), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(8)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ testl %eax, %eax // If result is null, deliver the OOME.
+ jz 1f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX // restore frame up to return address
+ ret // return
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testl %eax, %eax // eax == 0 ?
jz 1f // if eax == 0 goto 1
@@ -947,10 +972,37 @@ MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
END_MACRO
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+
+// Comment out allocators that have x86 specific asm.
+// Region TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+// Normal TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
+MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
// Fast path rosalloc allocation.
// eax: type/return value
// ecx, ebx, edx: free
@@ -959,14 +1011,14 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// stack has room
movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx
cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx
- jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jae .Lslow_path\c_name
movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size (ecx)
// Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization check.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx
- ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ja .Lslow_path\c_name
shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index
// from object size.
// Load thread local rosalloc run (ebx)
@@ -977,7 +1029,7 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Load free_list head (edi),
// this will be the return value.
movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx
- jecxz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jecxz .Lslow_path\c_name
// Point of no slow path. Won't go to
// the slow path from here on.
// Load the next pointer of the head
@@ -1008,7 +1060,7 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// No fence needed for x86.
movl %ecx, %eax // Move object to return register
ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
@@ -1020,10 +1072,14 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_resolved_rosalloc
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
-// The common fast path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
// EAX: type/return_value
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
@@ -1047,8 +1103,8 @@ MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+// The common slow path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
POP edi
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
@@ -1065,59 +1121,159 @@ MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called
-// for CC if the GC is not marking.
-DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
+MACRO2(ART_QUICK_ALLOC_OBJECT_TLAB, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
// Fast path tlab allocation.
// EAX: type
// EBX, ECX, EDX: free.
PUSH edi
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
- ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
-END_FUNCTION art_quick_alloc_object_resolved_tlab
-
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
- // Fast path region tlab allocation.
- // EAX: type/return value
- // EBX, ECX, EDX: free.
-#if !defined(USE_READ_BARRIER)
- int3
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\c_name
+.Lslow_path\c_name:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH RAW_VAR(cxx_name)
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+
+// The fast path code for art_quick_alloc_array_region_tlab.
+// Inputs: EAX: the class, ECX: int32_t component_count, EDX: total_size
+// Free temp: EBX
+// Output: EAX: return value.
+MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE, slowPathLabel)
+ mov %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
+ // Mask out the unaligned part to make sure we are 8 byte aligned.
+ andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %edx
+ movl THREAD_LOCAL_END_OFFSET(%ebx), %edi
+ subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi
+ cmpl %edi, %edx // Check if it fits.
+ ja RAW_VAR(slowPathLabel)
+ movl THREAD_LOCAL_POS_OFFSET(%ebx), %edi
+ addl %edi, %edx // Add the object size.
+ movl %edx, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos_
+ addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
+ // Store the class pointer in the
+ // header.
+ // No fence needed for x86.
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edi)
+ movl %ecx, MIRROR_ARRAY_LENGTH_OFFSET(%edi)
+ movl %edi, %eax
+ POP edi
+ ret // Fast path succeeded.
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_UNKNOWN, slow_path)
+ // We should never enter here. Code is provided for reference.
int3
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8), %ecx
+ ja RAW_VAR(slow_path)
+ PUSH ecx
+ movl %ecx, %edx
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%eax), %ecx // Load component type.
+ UNPOISON_HEAP_REF ecx
+ movl MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET(%ecx), %ecx // Load primitive type.
+ shr MACRO_LITERAL(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT), %ecx // Get component size shift.
+ sall %cl, %edx // Calculate array count shifted.
+ // Add array header + alignment rounding.
+ add MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %edx
+ // Add 4 extra bytes if we are doing a long array.
+ add MACRO_LITERAL(1), %ecx
+ and MACRO_LITERAL(4), %ecx
+#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
#endif
- PUSH edi
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
- ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
-END_FUNCTION art_quick_alloc_object_resolved_region_tlab
+ addl %ecx, %edx
+ POP ecx
+END_MACRO
+MACRO1(COMPUTE_ARRAY_SIZE_8, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)(%ecx), %edx
+END_MACRO
-DEFINE_FUNCTION art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
+MACRO1(COMPUTE_ARRAY_SIZE_16, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal ((MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 2)(%ecx), %edx
+ sall MACRO_LITERAL(1), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_32, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal ((MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 4)(%ecx), %edx
+ sall MACRO_LITERAL(2), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_64, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal ((MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 8)(%ecx), %edx
+ sall MACRO_LITERAL(3), %edx
+END_MACRO
+
+MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
+ DEFINE_FUNCTION VAR(c_entrypoint)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ PUSH edi
+ CALL_MACRO(size_setup) .Lslow_path\c_entrypoint
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\c_entrypoint
+.Lslow_path\c_entrypoint:
+ POP edi
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl LITERAL(8), %esp // push padding
- CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ PUSH eax // alignment padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass arg1
- call SYMBOL(artResolveStringFromCode)
- addl LITERAL(16), %esp // pop arguments
+ PUSH ecx
+ PUSH eax
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- testl %eax, %eax // If result is null, deliver the OOME.
- jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX
- ret
- CFI_RESTORE_STATE
- CFI_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END_FUNCTION art_quick_resolve_string
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ END_FUNCTION VAR(c_entrypoint)
+END_MACRO
+
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 10f9047bf3..f1be52eeb6 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -950,6 +950,26 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
+// Macro for string and type resolution and initialization.
+MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_EVERYTHING_FRAME // save everything for GC
+ // Outgoing argument set up
+ movl %eax, %edi // pass string index
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
+ testl %eax, %eax // If result is null, deliver the OOME.
+ jz 1f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
+ ret
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
@@ -1006,7 +1026,8 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
+MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
// Fast path rosalloc allocation.
// RDI: mirror::Class*, RAX: return value
// RSI, RDX, RCX, R8, R9: free.
@@ -1015,14 +1036,14 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
- jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jae .Lslow_path\c_name
// Load the object size
movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax
// Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
- ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ja .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size.
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
@@ -1036,7 +1057,7 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
- jz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jz .Lslow_path\c_name
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
// Push the new object onto the thread
// local allocation stack and
@@ -1063,25 +1084,19 @@ DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
// No fence necessary for x86.
ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_rosalloc
-
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
-MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
- jz RAW_VAR(slowPathLabel)
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
+ END_FUNCTION VAR(c_name)
END_MACRO
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
// The common fast path code for art_quick_alloc_object_resolved_region_tlab.
// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
@@ -1220,12 +1235,7 @@ MACRO0(COMPUTE_ARRAY_SIZE_64)
movq %rsi, %r9
salq MACRO_LITERAL(3), %r9
// Add array header + alignment rounding.
- // Add 4 extra bytes for array data alignment
- addq MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK + 4), %r9
-END_MACRO
-
-// The slow path code for art_quick_alloc_array_*tlab.
-MACRO1(ALLOC_ARRAY_TLAB_SLOW_PATH, cxx_name)
+ addq MACRO_LITERAL(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
END_MACRO
MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
@@ -1280,27 +1290,10 @@ DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedRegionTLAB
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
-DEFINE_FUNCTION art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME
- // Outgoing argument set up
- movl %eax, %edi // pass string index
- movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, Thread*)
-
- testl %eax, %eax // If result is null, deliver the OOME.
- jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
- ret
- CFI_RESTORE_STATE
- CFI_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END_FUNCTION art_quick_resolve_string
-
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 80af8e7bde..16b73c681f 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -311,6 +311,8 @@ inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) {
template <bool kResolve>
inline ObjPtr<mirror::Class> ArtField::GetType() {
+ // TODO: Refactor this function into two functions, ResolveType() and LookupType()
+ // so that we can properly annotate it with no-suspension possible / suspension possible.
const uint32_t field_index = GetDexFieldIndex();
ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
if (UNLIKELY(declaring_class->IsProxyClass())) {
@@ -320,9 +322,16 @@ inline ObjPtr<mirror::Class> ArtField::GetType() {
const DexFile* const dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(field_id.type_idx_);
- if (kResolve && UNLIKELY(type == nullptr)) {
- type = ResolveGetType(field_id.type_idx_);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ if (UNLIKELY(type == nullptr)) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (kResolve) {
+ type = class_linker->ResolveType(*dex_file, field_id.type_idx_, declaring_class);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ } else {
+ type = class_linker->LookupResolvedType(
+ *dex_file, field_id.type_idx_, dex_cache, declaring_class->GetClassLoader());
+ DCHECK(!Thread::Current()->IsExceptionPending());
+ }
}
return type;
}
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index a4a6e5a4fb..7e131040be 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -48,10 +48,6 @@ ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
}
-ObjPtr<mirror::Class> ArtField::ResolveGetType(dex::TypeIndex type_idx) {
- return Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
-}
-
ObjPtr<mirror::String> ArtField::ResolveGetStringName(Thread* self,
const DexFile& dex_file,
dex::StringIndex string_idx,
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 427e103749..75dd981136 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -217,8 +217,6 @@ class ArtField FINAL {
private:
ObjPtr<mirror::Class> ProxyFindSystemClass(const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_);
- ObjPtr<mirror::Class> ResolveGetType(dex::TypeIndex type_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<mirror::String> ResolveGetStringName(Thread* self,
const DexFile& dex_file,
dex::StringIndex string_idx,
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 950f1aa9f4..473d9cf74e 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -175,12 +175,19 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerS
}
inline mirror::Class* ArtMethod::GetClassFromTypeIndex(dex::TypeIndex type_idx, bool resolve) {
+ // TODO: Refactor this function into two functions, Resolve...() and Lookup...()
+ // so that we can properly annotate it with no-suspension possible / suspension possible.
ObjPtr<mirror::DexCache> dex_cache = GetDexCache();
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
- if (UNLIKELY(type == nullptr) && resolve) {
+ if (UNLIKELY(type == nullptr)) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- type = class_linker->ResolveType(type_idx, this);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ if (resolve) {
+ type = class_linker->ResolveType(type_idx, this);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ } else {
+ type = class_linker->LookupResolvedType(
+ *dex_cache->GetDexFile(), type_idx, dex_cache, GetClassLoader());
+ }
}
return type.Ptr();
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index c796bd1c09..4902ad42d7 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -445,6 +445,8 @@ static const OatFile::OatMethod FindOatMethodFor(ArtMethod* method,
PointerSize pointer_size,
bool* found)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // We shouldn't be calling this with obsolete methods.
+ DCHECK(!method->IsObsolete());
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
diff --git a/runtime/art_method.h b/runtime/art_method.h
index d4a65c8c38..383630363e 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -201,6 +201,10 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccCompileDontBother) == 0;
}
+ void SetDontCompile() {
+ AddAccessFlags(kAccCompileDontBother);
+ }
+
// A default conflict method is a special sentinel method that stands for a conflict between
// multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one
// attempts to do so.
@@ -226,7 +230,7 @@ class ArtMethod FINAL {
void SetIsObsolete() {
// TODO We should really support redefining intrinsic if possible.
DCHECK(!IsIntrinsic());
- SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod);
+ AddAccessFlags(kAccObsoleteMethod);
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 46f2c08663..c7a94a90dc 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -72,7 +72,7 @@ ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
// Import platform-independent constant defines from our autogenerated list.
// Export new defines (for assembly use) by editing cpp-define-generator def files.
#define DEFINE_CHECK_EQ ADD_TEST_EQ
-#include "generated/asm_support_gen.h"
+#include "asm_support_gen.h"
// Offset of field Thread::tlsPtr_.exception.
#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
diff --git a/runtime/atomic.h b/runtime/atomic.h
index e2a7259784..45c3165b18 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -235,6 +235,11 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
this->store(desired, std::memory_order_seq_cst);
}
+ // Atomically replace the value with desired value.
+ T ExchangeRelaxed(T desired_value) {
+ return this->exchange(desired_value, std::memory_order_relaxed);
+ }
+
// Atomically replace the value with desired value if it matches the expected value.
// Participates in total ordering of atomic operations.
bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
@@ -283,6 +288,10 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
}
+ T FetchAndSubRelaxed(const T value) {
+ return this->fetch_sub(value, std::memory_order_relaxed); // Return old value.
+ }
+
T FetchAndOrSequentiallyConsistent(const T value) {
return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value.
}
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 9fdb0cc9d0..db433194d3 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -146,7 +146,9 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
}
#pragma GCC diagnostic push
+#if __clang_major__ >= 4
#pragma GCC diagnostic ignored "-Winstantiation-after-specialization"
+#endif
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
#pragma GCC diagnostic pop
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index e05a85a116..7bba944ca8 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -49,7 +49,6 @@ Mutex* Locks::intern_table_lock_ = nullptr;
Mutex* Locks::jni_function_table_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
-Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
MutatorMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
@@ -73,6 +72,7 @@ Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -147,7 +147,10 @@ class ScopedContentionRecorder FINAL : public ValueObject {
const uint64_t start_nano_time_;
};
-BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
+BaseMutex::BaseMutex(const char* name, LockLevel level)
+ : level_(level),
+ name_(name),
+ should_respond_to_empty_checkpoint_request_(false) {
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
@@ -378,6 +381,9 @@ void Mutex::ExclusiveLock(Thread* self) {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
num_contenders_++;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
@@ -520,6 +526,18 @@ std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
return os;
}
+void Mutex::WakeupToRespondToEmptyCheckpoint() {
+#if ART_USE_FUTEXES
+ // Wake up all the waiters so they will respond to the emtpy checkpoint.
+ DCHECK(should_respond_to_empty_checkpoint_request_);
+ if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ }
+#else
+ LOG(FATAL) << "Non futex case isn't supported.";
+#endif
+}
+
ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
: BaseMutex(name, level)
#if ART_USE_FUTEXES
@@ -564,6 +582,9 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
@@ -640,6 +661,9 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
@@ -678,6 +702,9 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta
// Owner holds it exclusively, hang up.
ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
++num_pending_readers_;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN && errno != EINTR) {
PLOG(FATAL) << "futex wait failed for " << name_;
@@ -750,6 +777,19 @@ std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
return os;
}
+void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
+#if ART_USE_FUTEXES
+ // Wake up all the waiters so they will respond to the emtpy checkpoint.
+ DCHECK(should_respond_to_empty_checkpoint_request_);
+ if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
+ num_pending_writers_.LoadRelaxed() > 0)) {
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ }
+#else
+ LOG(FATAL) << "Non futex case isn't supported.";
+#endif
+}
+
ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
: name_(name), guard_(guard) {
#if ART_USE_FUTEXES
@@ -1116,16 +1156,18 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
- UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
- DCHECK(mem_maps_lock_ == nullptr);
- mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
#undef UPDATE_CURRENT_LOCK_LEVEL
+ // List of mutexes that we may hold when accessing a weak ref.
+ dex_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(dex_lock_);
+ classlinker_classes_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(classlinker_classes_lock_);
+
InitConditions();
}
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 21dd437711..9b6938f9bf 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -55,7 +55,6 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
- kMemMapsLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
@@ -153,6 +152,16 @@ class BaseMutex {
static void DumpAll(std::ostream& os);
+ bool ShouldRespondToEmptyCheckpointRequest() const {
+ return should_respond_to_empty_checkpoint_request_;
+ }
+
+ void SetShouldRespondToEmptyCheckpointRequest(bool value) {
+ should_respond_to_empty_checkpoint_request_ = value;
+ }
+
+ virtual void WakeupToRespondToEmptyCheckpoint() = 0;
+
protected:
friend class ConditionVariable;
@@ -169,6 +178,7 @@ class BaseMutex {
const LockLevel level_; // Support for lock hierarchy.
const char* const name_;
+ bool should_respond_to_empty_checkpoint_request_;
// A log entry that records contention but makes no guarantee that either tid will be held live.
struct ContentionLogEntry {
@@ -267,6 +277,8 @@ class LOCKABLE Mutex : public BaseMutex {
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
+ void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+
private:
#if ART_USE_FUTEXES
// 0 is unheld, 1 is held.
@@ -387,6 +399,8 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
+ void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+
private:
#if ART_USE_FUTEXES
// Out-of-inline path for handling contention for a SharedLock.
@@ -712,11 +726,14 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
- // Guards the maps in mem_map.
- static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
+ // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
+ // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
+ // encounter an unexpected mutex on accessing weak refs,
+ // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
+ static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
};
class Roles {
diff --git a/runtime/bit_memory_region.h b/runtime/bit_memory_region.h
index c3b5be458e..3a696f1969 100644
--- a/runtime/bit_memory_region.h
+++ b/runtime/bit_memory_region.h
@@ -40,6 +40,10 @@ class BitMemoryRegion FINAL : public ValueObject {
return region_.size_in_bits();
}
+ ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_size) const {
+ return BitMemoryRegion(region_, bit_start_ + bit_offset, bit_size);
+ }
+
// Load a single bit in the region. The bit at offset 0 is the least
// significant bit in the first byte.
ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
diff --git a/runtime/cha.cc b/runtime/cha.cc
index e6681d559c..eaba01b2ce 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -16,6 +16,7 @@
#include "cha.h"
+#include "art_method-inl.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "runtime.h"
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 3438810069..bd510ca0e1 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -78,6 +78,18 @@ inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
return string.Ptr();
}
+inline ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(
+ dex::TypeIndex type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
+ if (type == nullptr) {
+ type = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ *dex_cache->GetDexFile(), type_idx, dex_cache, class_loader);
+ }
+ return type;
+}
+
inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
if (kIsDebugBuild) {
@@ -91,25 +103,6 @@ inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMetho
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
- }
- return resolved_type.Ptr();
-}
-
-inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtField* referrer) {
- Thread::PoisonObjectPointersIfDebug();
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ObjPtr<mirror::DexCache> dex_cache_ptr = declaring_class->GetDexCache();
- ObjPtr<mirror::Class> resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == nullptr)) {
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type.Ptr();
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index edd6e3b522..9380588576 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1171,6 +1171,23 @@ static void CopyNonNull(const T* src, size_t count, T* dst, const NullPred& pred
}
}
+template <typename T>
+static void CopyDexCachePairs(const std::atomic<mirror::DexCachePair<T>>* src,
+ size_t count,
+ std::atomic<mirror::DexCachePair<T>>* dst) {
+ DCHECK_NE(count, 0u);
+ DCHECK(!src[0].load(std::memory_order_relaxed).object.IsNull() ||
+ src[0].load(std::memory_order_relaxed).index != 0u);
+ for (size_t i = 0; i < count; ++i) {
+ DCHECK_EQ(dst[i].load(std::memory_order_relaxed).index, 0u);
+ DCHECK(dst[i].load(std::memory_order_relaxed).object.IsNull());
+ mirror::DexCachePair<T> source = src[i].load(std::memory_order_relaxed);
+ if (source.index != 0u || !source.object.IsNull()) {
+ dst[i].store(source, std::memory_order_relaxed);
+ }
+ }
+}
+
bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1224,7 +1241,10 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
- const size_t num_types = dex_file->NumTypeIds();
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (dex_file->NumTypeIds() < num_types) {
+ num_types = dex_file->NumTypeIds();
+ }
const size_t num_methods = dex_file->NumMethodIds();
const size_t num_fields = dex_file->NumFieldIds();
size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
@@ -1243,28 +1263,14 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
mirror::StringDexCacheType* const image_resolved_strings = dex_cache->GetStrings();
mirror::StringDexCacheType* const strings =
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- for (size_t j = 0; j < num_strings; ++j) {
- DCHECK_EQ(strings[j].load(std::memory_order_relaxed).index, 0u);
- DCHECK(strings[j].load(std::memory_order_relaxed).object.IsNull());
- strings[j].store(image_resolved_strings[j].load(std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
- mirror::StringDexCachePair::Initialize(strings);
+ CopyDexCachePairs(image_resolved_strings, num_strings, strings);
dex_cache->SetStrings(strings);
}
if (num_types != 0u) {
- GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes();
- GcRoot<mirror::Class>* const types =
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
- for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) {
- DCHECK(types[j].IsNull());
- }
- CopyNonNull(image_resolved_types,
- num_types,
- types,
- [](const GcRoot<mirror::Class>& elem) {
- return elem.IsNull();
- });
+ mirror::TypeDexCacheType* const image_resolved_types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types =
+ reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
+ CopyDexCachePairs(image_resolved_types, num_types, types);
dex_cache->SetResolvedTypes(types);
}
if (num_methods != 0u) {
@@ -1305,15 +1311,7 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
mirror::MethodTypeDexCacheType* const method_types =
reinterpret_cast<mirror::MethodTypeDexCacheType*>(
raw_arrays + layout.MethodTypesOffset());
- for (size_t j = 0; j < num_method_types; ++j) {
- DCHECK_EQ(method_types[j].load(std::memory_order_relaxed).index, 0u);
- DCHECK(method_types[j].load(std::memory_order_relaxed).object.IsNull());
- method_types[j].store(
- image_resolved_method_types[j].load(std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
-
- mirror::MethodTypeDexCachePair::Initialize(method_types);
+ CopyDexCachePairs(image_resolved_method_types, num_method_types, method_types);
dex_cache->SetResolvedMethodTypes(method_types);
}
}
@@ -1322,24 +1320,16 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
// Make sure to do this after we update the arrays since we store the resolved types array
// in DexCacheData in RegisterDexFileLocked. We need the array pointer to be the one in the
// BSS.
- ObjPtr<mirror::DexCache> existing_dex_cache = FindDexCacheLocked(self,
- *dex_file,
- /*allow_failure*/true);
- CHECK(existing_dex_cache == nullptr);
- StackHandleScope<1> hs3(self);
- Handle<mirror::DexCache> h_dex_cache = hs3.NewHandle(dex_cache);
- RegisterDexFileLocked(*dex_file, h_dex_cache);
- if (kIsDebugBuild) {
- dex_cache.Assign(h_dex_cache.Get()); // Update dex_cache, used below in debug build.
- }
+ CHECK(!FindDexCacheDataLocked(*dex_file).IsValid());
+ RegisterDexFileLocked(*dex_file, dex_cache, class_loader.Get());
}
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
- GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
const size_t num_types = dex_cache->NumResolvedTypes();
- for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
+ for (size_t j = 0; j != num_types; ++j) {
// The image space is not yet added to the heap, avoid read barriers.
- ObjPtr<mirror::Class> klass = types[j].Read();
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (space->HasAddress(klass.Ptr())) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
auto it = new_class_set->Find(ClassTable::TableSlot(klass));
@@ -1675,11 +1665,9 @@ bool ClassLinker::AddImageSpace(
return false;
}
- StackHandleScope<1> hs2(self);
- MutableHandle<mirror::DexCache> h_dex_cache(hs2.NewHandle<mirror::DexCache>(nullptr));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- h_dex_cache.Assign(dex_caches->Get(i));
- std::string dex_file_location(h_dex_cache->GetLocation()->ToModifiedUtf8());
+ ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+ std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
// TODO: Only store qualified paths.
// If non qualified, qualify it.
if (dex_file_location.find('/') == std::string::npos) {
@@ -1699,10 +1687,10 @@ bool ClassLinker::AddImageSpace(
if (app_image) {
// The current dex file field is bogus, overwrite it so that we can get the dex file in the
// loop below.
- h_dex_cache->SetDexFile(dex_file.get());
- GcRoot<mirror::Class>* const types = h_dex_cache->GetResolvedTypes();
- for (int32_t j = 0, num_types = h_dex_cache->NumResolvedTypes(); j < num_types; j++) {
- ObjPtr<mirror::Class> klass = types[j].Read();
+ dex_cache->SetDexFile(dex_file.get());
+ mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
+ for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (klass != nullptr) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
}
@@ -1711,11 +1699,11 @@ bool ClassLinker::AddImageSpace(
if (kSanityCheckObjects) {
ImageSanityChecks::CheckPointerArray(heap,
this,
- h_dex_cache->GetResolvedMethods(),
- h_dex_cache->NumResolvedMethods());
+ dex_cache->GetResolvedMethods(),
+ dex_cache->NumResolvedMethods());
}
// Register dex files, keep track of existing ones that are conflicts.
- AppendToBootClassPath(*dex_file.get(), h_dex_cache);
+ AppendToBootClassPath(*dex_file.get(), dex_cache);
}
out_dex_files->push_back(std::move(dex_file));
}
@@ -2656,7 +2644,7 @@ mirror::Class* ClassLinker::DefineClass(Thread* self,
}
ObjPtr<mirror::DexCache> dex_cache = RegisterDexFile(*new_dex_file, class_loader.Get());
if (dex_cache == nullptr) {
- self->AssertPendingOOMException();
+ self->AssertPendingException();
return nullptr;
}
klass->SetDexCache(dex_cache);
@@ -3264,28 +3252,27 @@ void ClassLinker::LoadMethod(const DexFile& dex_file,
}
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
- StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocAndInitializeDexCache(
+ ObjPtr<mirror::DexCache> dex_cache = AllocAndInitializeDexCache(
self,
dex_file,
- Runtime::Current()->GetLinearAlloc())));
- CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
- << dex_file.GetLocation();
+ Runtime::Current()->GetLinearAlloc());
+ CHECK(dex_cache != nullptr) << "Failed to allocate dex cache for " << dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
}
void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) {
- CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
+ ObjPtr<mirror::DexCache> dex_cache) {
+ CHECK(dex_cache != nullptr) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
- RegisterDexFile(dex_file, dex_cache);
+ RegisterBootClassPathDexFile(dex_file, dex_cache);
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) {
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
Thread* const self = Thread::Current();
Locks::dex_lock_->AssertExclusiveHeld(self);
- CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
+ CHECK(dex_cache != nullptr) << dex_file.GetLocation();
// For app images, the dex cache location may be a suffix of the dex file location since the
// dex file location is an absolute path.
const std::string dex_cache_location = dex_cache->GetLocation()->ToModifiedUtf8();
@@ -3313,25 +3300,49 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
++it;
}
}
- jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache.Get());
+ jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache);
dex_cache->SetDexFile(&dex_file);
DexCacheData data;
data.weak_root = dex_cache_jweak;
data.dex_file = dex_cache->GetDexFile();
data.resolved_methods = dex_cache->GetResolvedMethods();
+ data.class_table = ClassTableForClassLoader(class_loader);
+ DCHECK(data.class_table != nullptr);
dex_caches_.push_back(data);
}
-mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file,
- ObjPtr<mirror::ClassLoader> class_loader) {
+ObjPtr<mirror::DexCache> ClassLinker::DecodeDexCache(Thread* self, const DexCacheData& data) {
+ return data.IsValid()
+ ? ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root))
+ : nullptr;
+}
+
+ObjPtr<mirror::DexCache> ClassLinker::EnsureSameClassLoader(
+ Thread* self,
+ ObjPtr<mirror::DexCache> dex_cache,
+ const DexCacheData& data,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ DCHECK_EQ(dex_cache->GetDexFile(), data.dex_file);
+ if (data.class_table != ClassTableForClassLoader(class_loader)) {
+ self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+ "Attempt to register dex file %s with multiple class loaders",
+ data.dex_file->GetLocation().c_str());
+ return nullptr;
+ }
+ return dex_cache;
+}
+
+ObjPtr<mirror::DexCache> ClassLinker::RegisterDexFile(const DexFile& dex_file,
+ ObjPtr<mirror::ClassLoader> class_loader) {
Thread* self = Thread::Current();
+ DexCacheData old_data;
{
ReaderMutexLock mu(self, *Locks::dex_lock_);
- ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
- if (dex_cache != nullptr) {
- // TODO: Check if the dex file was registered with the same class loader. Bug: 34193123
- return dex_cache.Ptr();
- }
+ old_data = FindDexCacheDataLocked(dex_file);
+ }
+ ObjPtr<mirror::DexCache> old_dex_cache = DecodeDexCache(self, old_data);
+ if (old_dex_cache != nullptr) {
+ return EnsureSameClassLoader(self, old_dex_cache, old_data, class_loader);
}
LinearAlloc* const linear_alloc = GetOrCreateAllocatorForClassLoader(class_loader);
DCHECK(linear_alloc != nullptr);
@@ -3343,7 +3354,8 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file,
// Don't alloc while holding the lock, since allocation may need to
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
- StackHandleScope<2> hs(self);
+ StackHandleScope<3> hs(self);
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
ObjPtr<mirror::String> location;
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(/*out*/&location,
self,
@@ -3351,75 +3363,92 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file,
Handle<mirror::String> h_location(hs.NewHandle(location));
{
WriterMutexLock mu(self, *Locks::dex_lock_);
- ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
- if (dex_cache != nullptr) {
- // Another thread managed to initialize the dex cache faster, so use that DexCache.
- // If this thread encountered OOME, ignore it.
- DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
- self->ClearException();
- return dex_cache.Ptr();
- }
- if (h_dex_cache.Get() == nullptr) {
- self->AssertPendingOOMException();
- return nullptr;
- }
- // Do InitializeDexCache while holding dex lock to make sure two threads don't call it at the
- // same time with the same dex cache. Since the .bss is shared this can cause failing DCHECK
- // that the arrays are null.
- mirror::DexCache::InitializeDexCache(self,
- h_dex_cache.Get(),
- h_location.Get(),
- &dex_file,
- linear_alloc,
- image_pointer_size_);
- RegisterDexFileLocked(dex_file, h_dex_cache);
+ old_data = FindDexCacheDataLocked(dex_file);
+ old_dex_cache = DecodeDexCache(self, old_data);
+ if (old_dex_cache == nullptr && h_dex_cache.Get() != nullptr) {
+ // Do InitializeDexCache while holding dex lock to make sure two threads don't call it at the
+ // same time with the same dex cache. Since the .bss is shared this can cause failing DCHECK
+ // that the arrays are null.
+ mirror::DexCache::InitializeDexCache(self,
+ h_dex_cache.Get(),
+ h_location.Get(),
+ &dex_file,
+ linear_alloc,
+ image_pointer_size_);
+ RegisterDexFileLocked(dex_file, h_dex_cache.Get(), h_class_loader.Get());
+ }
+ }
+ if (old_dex_cache != nullptr) {
+ // Another thread managed to initialize the dex cache faster, so use that DexCache.
+ // If this thread encountered OOME, ignore it.
+ DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
+ self->ClearException();
+ // We cannot call EnsureSameClassLoader() while holding the dex_lock_.
+ return EnsureSameClassLoader(self, old_dex_cache, old_data, h_class_loader.Get());
+ }
+ if (h_dex_cache.Get() == nullptr) {
+ self->AssertPendingOOMException();
+ return nullptr;
}
table->InsertStrongRoot(h_dex_cache.Get());
return h_dex_cache.Get();
}
-void ClassLinker::RegisterDexFile(const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) {
+void ClassLinker::RegisterBootClassPathDexFile(const DexFile& dex_file,
+ ObjPtr<mirror::DexCache> dex_cache) {
WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
- RegisterDexFileLocked(dex_file, dex_cache);
+ RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
+}
+
+bool ClassLinker::IsDexFileRegistered(Thread* self, const DexFile& dex_file) {
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
+ return DecodeDexCache(self, FindDexCacheDataLocked(dex_file)) != nullptr;
}
-mirror::DexCache* ClassLinker::FindDexCache(Thread* self,
- const DexFile& dex_file,
- bool allow_failure) {
+ObjPtr<mirror::DexCache> ClassLinker::FindDexCache(Thread* self, const DexFile& dex_file) {
ReaderMutexLock mu(self, *Locks::dex_lock_);
- return FindDexCacheLocked(self, dex_file, allow_failure);
+ ObjPtr<mirror::DexCache> dex_cache = DecodeDexCache(self, FindDexCacheDataLocked(dex_file));
+ if (dex_cache != nullptr) {
+ return dex_cache;
+ }
+ // Failure, dump diagnostic and abort.
+ std::string location(dex_file.GetLocation());
+ for (const DexCacheData& data : dex_caches_) {
+ if (DecodeDexCache(self, data) != nullptr) {
+ LOG(ERROR) << "Registered dex file " << data.dex_file->GetLocation();
+ }
+ }
+ LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
+ UNREACHABLE();
}
-mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
- const DexFile& dex_file,
- bool allow_failure) {
+ClassTable* ClassLinker::FindClassTable(Thread* self, ObjPtr<mirror::DexCache> dex_cache) {
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ DCHECK(dex_file != nullptr);
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
// Search assuming unique-ness of dex file.
for (const DexCacheData& data : dex_caches_) {
// Avoid decoding (and read barriers) other unrelated dex caches.
- if (data.dex_file == &dex_file) {
- ObjPtr<mirror::DexCache> dex_cache =
- ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
- if (dex_cache != nullptr) {
- return dex_cache.Ptr();
+ if (data.dex_file == dex_file) {
+ ObjPtr<mirror::DexCache> registered_dex_cache = DecodeDexCache(self, data);
+ if (registered_dex_cache != nullptr) {
+ CHECK_EQ(registered_dex_cache, dex_cache) << dex_file->GetLocation();
+ return data.class_table;
}
- break;
}
}
- if (allow_failure) {
- return nullptr;
- }
- std::string location(dex_file.GetLocation());
- // Failure, dump diagnostic and abort.
+ return nullptr;
+}
+
+ClassLinker::DexCacheData ClassLinker::FindDexCacheDataLocked(const DexFile& dex_file) {
+ // Search assuming unique-ness of dex file.
for (const DexCacheData& data : dex_caches_) {
- ObjPtr<mirror::DexCache> dex_cache =
- ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
- if (dex_cache != nullptr) {
- LOG(ERROR) << "Registered dex file " << dex_cache->GetDexFile()->GetLocation();
+ // Avoid decoding (and read barriers) other unrelated dex caches.
+ if (data.dex_file == &dex_file) {
+ return data;
}
}
- LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
- UNREACHABLE();
+ return DexCacheData();
}
void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
@@ -5679,14 +5708,7 @@ class LinkVirtualHashTable {
const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
-// b/30419309
-#if defined(__i386__)
-#define X86_OPTNONE __attribute__((optnone))
-#else
-#define X86_OPTNONE
-#endif
-
-X86_OPTNONE bool ClassLinker::LinkVirtualMethods(
+bool ClassLinker::LinkVirtualMethods(
Thread* self,
Handle<mirror::Class> klass,
/*out*/std::unordered_map<size_t, ClassLinker::MethodTranslation>* default_translations) {
@@ -7698,7 +7720,9 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
uint32_t utf16_length;
const char* utf8_data = dex_file.StringDataAndUtf16LengthByIdx(string_idx, &utf16_length);
ObjPtr<mirror::String> string = intern_table_->InternStrong(utf16_length, utf8_data);
- dex_cache->SetResolvedString(string_idx, string);
+ if (string != nullptr) {
+ dex_cache->SetResolvedString(string_idx, string);
+ }
return string.Ptr();
}
@@ -7739,11 +7763,16 @@ ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(const DexFile& dex_file,
// Find the class in the loaded classes table.
type = LookupClass(self, descriptor, hash, class_loader.Ptr());
}
+ if (type != nullptr) {
+ if (type->IsResolved()) {
+ dex_cache->SetResolvedType(type_idx, type);
+ } else {
+ type = nullptr;
+ }
+ }
}
- if (type != nullptr && type->IsResolved()) {
- return type.Ptr();
- }
- return nullptr;
+ DCHECK(type == nullptr || type->IsResolved());
+ return type;
}
mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
@@ -7763,6 +7792,12 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == nullptr) {
+ // TODO: Avoid this lookup as it duplicates work done in FindClass(). It is here
+ // as a workaround for FastNative JNI to avoid AssertNoPendingException() when
+ // trying to resolve annotations while an exception may be pending. Bug: 34659969
+ resolved = LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get());
+ }
+ if (resolved == nullptr) {
Thread* self = Thread::Current();
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
resolved = FindClass(self, descriptor, class_loader);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 5042fb7609..a880a10eb8 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -262,10 +262,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- mirror::Class* ResolveType(dex::TypeIndex type_idx, ArtField* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
-
// Look up a resolved type with the given ID from the DexFile. The ClassLoader is used to search
// for the type, since it may be referenced from but not contained within the given DexFile.
ObjPtr<mirror::Class> LookupResolvedType(const DexFile& dex_file,
@@ -273,6 +269,10 @@ class ClassLinker {
ObjPtr<mirror::DexCache> dex_cache,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a type with the given ID from the DexFile, storing the
// result in DexCache. The ClassLoader is used to search for the
@@ -382,11 +382,11 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- mirror::DexCache* RegisterDexFile(const DexFile& dex_file,
- ObjPtr<mirror::ClassLoader> class_loader)
+ ObjPtr<mirror::DexCache> RegisterDexFile(const DexFile& dex_file,
+ ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+ void RegisterBootClassPathDexFile(const DexFile& dex_file, ObjPtr<mirror::DexCache> dex_cache)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -413,9 +413,13 @@ class ClassLinker {
REQUIRES(!Locks::dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* FindDexCache(Thread* self,
- const DexFile& dex_file,
- bool allow_failure = false)
+ bool IsDexFileRegistered(Thread* self, const DexFile& dex_file)
+ REQUIRES(!Locks::dex_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::DexCache> FindDexCache(Thread* self, const DexFile& dex_file)
+ REQUIRES(!Locks::dex_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ ClassTable* FindClassTable(Thread* self, ObjPtr<mirror::DexCache> dex_cache)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
@@ -655,6 +659,18 @@ class ClassLinker {
REQUIRES(!Locks::dex_lock_);
struct DexCacheData {
+ // Construct an invalid data object.
+ DexCacheData()
+ : weak_root(nullptr),
+ dex_file(nullptr),
+ resolved_methods(nullptr),
+ class_table(nullptr) { }
+
+ // Check if the data is valid.
+ bool IsValid() const {
+ return dex_file != nullptr;
+ }
+
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
jweak weak_root;
@@ -663,6 +679,11 @@ class ClassLinker {
// class unloading.)
const DexFile* dex_file;
ArtMethod** resolved_methods;
+ // Identify the associated class loader's class table. This is used to make sure that
+ // the Java call to native DexCache.setResolvedType() inserts the resolved type in that
+ // class table. It is also used to make sure we don't register the same dex cache with
+ // multiple class loaders.
+ ClassTable* class_table;
};
private:
@@ -749,7 +770,7 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+ void AppendToBootClassPath(const DexFile& dex_file, ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
@@ -810,12 +831,24 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+ void RegisterDexFileLocked(const DexFile& dex_file,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* FindDexCacheLocked(Thread* self, const DexFile& dex_file, bool allow_failure)
+ DexCacheData FindDexCacheDataLocked(const DexFile& dex_file)
REQUIRES(Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::DexCache> DecodeDexCache(Thread* self, const DexCacheData& data)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Called to ensure that the dex cache has been registered with the same class loader.
+ // If yes, returns the dex cache, otherwise throws InternalError and returns null.
+ ObjPtr<mirror::DexCache> EnsureSameClassLoader(Thread* self,
+ ObjPtr<mirror::DexCache> dex_cache,
+ const DexCacheData& data,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES(!Locks::dex_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread* self,
Handle<mirror::Class> klass,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 17510bb598..de1cd6d807 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -439,7 +439,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
TestRootVisitor visitor;
class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots);
// Verify the dex cache has resolution methods in all resolved method slots
- mirror::DexCache* dex_cache = class_linker_->FindDexCache(Thread::Current(), dex);
+ ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(Thread::Current(), dex);
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0, num_methods = dex_cache->NumResolvedMethods(); i != num_methods; ++i) {
EXPECT_TRUE(
@@ -914,7 +914,7 @@ TEST_F(ClassLinkerTest, LookupResolvedType) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
klass);
// Zero out the resolved type and make sure LookupResolvedType still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
@@ -949,7 +949,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeArray) {
class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
array_klass);
// Zero out the resolved type and make sure LookupResolvedType() still finds it.
- dex_cache->SetResolvedType(array_idx, nullptr);
+ dex_cache->ClearResolvedType(array_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(array_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
@@ -972,7 +972,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
klass.Get());
// Zero out the resolved type and make sure LookupResolvedType still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
@@ -990,7 +990,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
klass.Get());
// Zero out the resolved type and make sure LookupResolvedType() still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
@@ -1454,7 +1454,7 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) {
{
WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
// Check that inserting with a UTF16 name works.
- class_linker->RegisterDexFileLocked(*dex_file, dex_cache);
+ class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr);
}
}
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index ff846a718e..af4f998fdf 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -55,10 +55,6 @@ mirror::Class* ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) {
return nullptr;
}
-// Bug: http://b/31104323 Ignore -Wunreachable-code from the for loop below
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunreachable-code"
-
mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
WriterMutexLock mu(Thread::Current(), lock_);
// Should only be updating latest table.
@@ -84,8 +80,6 @@ mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* kl
return existing;
}
-#pragma clang diagnostic pop // http://b/31104323
-
size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
const ClassSet& set) const {
size_t count = 0;
@@ -123,6 +117,19 @@ mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
return nullptr;
}
+ObjPtr<mirror::Class> ClassTable::TryInsert(ObjPtr<mirror::Class> klass) {
+ TableSlot slot(klass);
+ WriterMutexLock mu(Thread::Current(), lock_);
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.Find(slot);
+ if (it != class_set.end()) {
+ return it->Read();
+ }
+ }
+ classes_.back().Insert(slot);
+ return klass;
+}
+
void ClassTable::Insert(ObjPtr<mirror::Class> klass) {
const uint32_t hash = TableSlot::HashDescriptor(klass);
WriterMutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index c8ec28eca4..711eae45b8 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -192,6 +192,12 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Try to insert a class and return the inserted class if successful. If another class
+ // with the same descriptor is already in the table, return the existing entry.
+ ObjPtr<mirror::Class> TryInsert(ObjPtr<mirror::Class> klass)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void Insert(ObjPtr<mirror::Class> klass)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 28aca6c905..3bc49b8506 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -709,10 +709,10 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx,
return resolved_method;
} else if (type == kSuper) {
// TODO This lookup is rather slow.
- dex::TypeIndex method_type_idx =
- referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
- mirror::Class* method_reference_class =
- referrer->GetDexCache()->GetResolvedType(method_type_idx);
+ ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
+ dex::TypeIndex method_type_idx = dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
+ ObjPtr<mirror::Class> method_reference_class = ClassLinker::LookupResolvedType(
+ method_type_idx, dex_cache, referrer->GetClassLoader());
if (method_reference_class == nullptr) {
// Need to do full type resolution...
return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 06c11f5101..fb8139b7c6 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -204,12 +204,12 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+ if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
caller = GetResolvedMethod(outer_method,
inline_info,
- encoding.inline_info_encoding,
- inline_info.GetDepth(encoding.inline_info_encoding) - 1);
+ encoding.inline_info.encoding,
+ inline_info.GetDepth(encoding.inline_info.encoding) - 1);
}
}
if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 5b1b2871c2..699cf91c70 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -53,13 +53,18 @@ static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Lock
}
}
+constexpr Runtime::CalleeSaveType kInitEntrypointSaveType =
+ // TODO: Change allocation entrypoints on MIPS and MIPS64 to kSaveEverything.
+ (kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
+ : Runtime::kSaveEverything;
+
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
@@ -73,7 +78,7 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* s
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
@@ -88,7 +93,7 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
@@ -101,11 +106,7 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
- self,
- // TODO: Change art_quick_resolve_string on MIPS and MIPS64 to kSaveEverything.
- (kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
- : Runtime::kSaveEverything);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
if (LIKELY(result != nullptr)) {
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 670dadcd4d..158c1d6348 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -20,7 +20,7 @@
#include "indirect_reference_table.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index bde9009f7b..3ef47c427e 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -346,12 +346,12 @@ class QuickArgumentVisitor {
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+ if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- return inline_info.GetDexPcAtDepth(encoding.inline_info_encoding,
- inline_info.GetDepth(encoding.inline_info_encoding)-1);
+ return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
+ inline_info.GetDepth(encoding.inline_info.encoding)-1);
} else {
- return stack_map.GetDexPc(encoding.stack_map_encoding);
+ return stack_map.GetDexPc(encoding.stack_map.encoding);
}
} else {
return current_code->ToDexPc(*caller_sp, outer_pc);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index e18a955251..122f7799df 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -292,7 +292,7 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self,
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
new_record_condition_.WaitHoldingLocks(self);
}
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 7c649525e4..854d0a58ff 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -22,6 +22,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/region_space.h"
+#include "mirror/object-inl.h"
#include "lock_word.h"
namespace art {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 0819ba04f7..f18ffb4aef 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -835,65 +835,9 @@ void ConcurrentCopying::ProcessFalseGrayStack() {
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- Barrier* barrier = thread_list->EmptyCheckpointBarrier();
- barrier->Init(self, 0);
- std::vector<uint32_t> runnable_thread_ids; // Used in debug build only
- size_t barrier_count = thread_list->RunEmptyCheckpoint(runnable_thread_ids);
- // If there are no threads to wait which implys that all the checkpoint functions are finished,
- // then no need to release the mutator lock.
- if (barrier_count == 0) {
- return;
- }
// Release locks then wait for all mutator threads to pass the barrier.
Locks::mutator_lock_->SharedUnlock(self);
- {
- ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
- if (kIsDebugBuild) {
- static constexpr uint64_t kEmptyCheckpointTimeoutMs = 600 * 1000; // 10 minutes.
- bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointTimeoutMs);
- if (timed_out) {
- std::ostringstream ss;
- ss << "Empty checkpoint timeout\n";
- ss << "Barrier count " << barrier->GetCount(self) << "\n";
- ss << "Runnable thread IDs";
- for (uint32_t tid : runnable_thread_ids) {
- ss << " " << tid;
- }
- ss << "\n";
- Locks::mutator_lock_->Dump(ss);
- ss << "\n";
- LOG(FATAL_WITHOUT_ABORT) << ss.str();
- // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
- // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
- {
- ScopedObjectAccess soa(self);
- MutexLock mu1(self, *Locks::thread_list_lock_);
- for (Thread* thread : thread_list->GetList()) {
- uint32_t tid = thread->GetThreadId();
- bool is_in_runnable_thread_ids =
- std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
- runnable_thread_ids.end();
- if (is_in_runnable_thread_ids &&
- thread->ReadFlag(kEmptyCheckpointRequest)) {
- // Found a runnable thread that hasn't responded to the empty checkpoint request.
- // Assume it's stuck and safe to dump its stack.
- thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
- /*dump_native_stack*/ true,
- /*backtrace_map*/ nullptr,
- /*force_dump_stack*/ true);
- }
- }
- }
- LOG(FATAL_WITHOUT_ABORT)
- << "Dumped runnable threads that haven't responded to empty checkpoint.";
- // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
- thread_list->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
- LOG(FATAL) << "Dumped all threads.";
- }
- } else {
- barrier->Increment(self, barrier_count);
- }
- }
+ thread_list->RunEmptyCheckpoint();
Locks::mutator_lock_->SharedLock(self);
}
@@ -1875,8 +1819,10 @@ class ConcurrentCopying::RefFieldsVisitor {
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
- if (kDisallowReadBarrierDuringScan) {
+ if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
// Avoid all read barriers during visit references to help performance.
+ // Don't do this in transaction mode because we may read the old value of an field which may
+ // trigger read barriers.
Thread::Current()->ModifyDebugDisallowReadBarrier(1);
}
DCHECK(!region_space_->IsInFromSpace(to_ref));
@@ -1885,7 +1831,7 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
- if (kDisallowReadBarrierDuringScan) {
+ if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 54f221056a..394e541fd8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -34,7 +34,7 @@
#include "handle_scope-inl.h"
#include "thread-inl.h"
#include "utils.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index aa15714595..0a45fcedae 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -78,6 +78,7 @@
#include "scoped_thread_state_change-inl.h"
#include "handle_scope-inl.h"
#include "thread_list.h"
+#include "verify_object-inl.h"
#include "well_known_classes.h"
namespace art {
@@ -127,8 +128,6 @@ static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
-static constexpr size_t kNativeAllocationHistogramBuckets = 16;
-
// Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
// config.
static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
@@ -194,18 +193,12 @@ Heap::Heap(size_t initial_size,
capacity_(capacity),
growth_limit_(growth_limit),
max_allowed_footprint_(initial_size),
- native_footprint_gc_watermark_(initial_size),
- native_need_to_run_finalization_(false),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
- native_bytes_allocated_(0),
- native_histogram_lock_("Native allocation lock"),
- native_allocation_histogram_("Native allocation sizes",
- 1U,
- kNativeAllocationHistogramBuckets),
- native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
+ new_native_bytes_allocated_(0),
+ old_native_bytes_allocated_(0),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -544,6 +537,12 @@ Heap::Heap(size_t initial_size,
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
+ native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
+ native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
+ *native_blocking_gc_lock_));
+ native_blocking_gc_in_progress_ = false;
+ native_blocking_gcs_finished_ = 0;
+
thread_flip_lock_ = new Mutex("GC thread flip lock");
thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
*thread_flip_lock_));
@@ -1111,19 +1110,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
rosalloc_space_->DumpStats(os);
}
- {
- MutexLock mu(Thread::Current(), native_histogram_lock_);
- if (native_allocation_histogram_.SampleSize() > 0u) {
- os << "Histogram of native allocation ";
- native_allocation_histogram_.DumpBins(os);
- os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
- }
- if (native_free_histogram_.SampleSize() > 0u) {
- os << "Histogram of native free ";
- native_free_histogram_.DumpBins(os);
- os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
- }
- }
+ os << "Registered native bytes allocated: "
+ << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
+ << "\n";
BaseMutex::DumpAll(os);
}
@@ -1208,6 +1197,7 @@ Heap::~Heap() {
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
+ delete native_blocking_gc_lock_;
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
@@ -2655,6 +2645,13 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// Approximate heap size.
ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
+ if (gc_type == NonStickyGcType()) {
+ // Move all bytes from new_native_bytes_allocated_ to
+ // old_native_bytes_allocated_ now that GC has been triggered, resetting
+ // new_native_bytes_allocated_ to zero in the process.
+ old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+ }
+
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -3514,18 +3511,6 @@ bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
return false;
}
-void Heap::UpdateMaxNativeFootprint() {
- size_t native_size = native_bytes_allocated_.LoadRelaxed();
- // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
- size_t target_size = native_size / GetTargetHeapUtilization();
- if (target_size > native_size + max_free_) {
- target_size = native_size + max_free_;
- } else if (target_size < native_size + min_free_) {
- target_size = native_size + min_free_;
- }
- native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
-}
-
collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
for (const auto& collector : garbage_collectors_) {
if (collector->GetCollectorType() == collector_type_ &&
@@ -3565,11 +3550,9 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
target_size = bytes_allocated + delta * multiplier;
target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
- native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
- collector::GcType non_sticky_gc_type =
- HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ collector::GcType non_sticky_gc_type = NonStickyGcType();
// Find what the next non sticky collector will be.
collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
// If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3720,7 +3703,7 @@ void Heap::ConcurrentGC(Thread* self, bool force_full) {
collector::GcType next_gc_type = next_gc_type_;
// If forcing full and next gc type is sticky, override with a non-sticky type.
if (force_full && next_gc_type == collector::kGcTypeSticky) {
- next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ next_gc_type = NonStickyGcType();
}
if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
collector::kGcTypeNone) {
@@ -3877,70 +3860,81 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- Thread* self = ThreadForEnv(env);
- {
- MutexLock mu(self, native_histogram_lock_);
- native_allocation_histogram_.AddValue(bytes);
- }
- if (native_need_to_run_finalization_) {
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- UpdateMaxNativeFootprint();
- native_need_to_run_finalization_ = false;
- }
- // Total number of native bytes allocated.
- size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
- new_native_bytes_allocated += bytes;
- if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
- collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
- collector::kGcTypeFull;
-
- // The second watermark is higher than the gc watermark. If you hit this it means you are
- // allocating native objects faster than the GC can keep up with.
- if (new_native_bytes_allocated > growth_limit_) {
- if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
- // Just finished a GC, attempt to run finalizers.
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- CHECK(!env->ExceptionCheck());
- // Native bytes allocated may be updated by finalization, refresh it.
- new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
- }
- // If we still are over the watermark, attempt a GC for alloc and run finalizers.
- if (new_native_bytes_allocated > growth_limit_) {
- CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- native_need_to_run_finalization_ = false;
- CHECK(!env->ExceptionCheck());
+ // See the REDESIGN section of go/understanding-register-native-allocation
+ // for an explanation of how RegisterNativeAllocation works.
+ size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
+ if (new_value > NativeAllocationBlockingGcWatermark()) {
+ // Wait for a new GC to finish and finalizers to run, because the
+ // allocation rate is too high.
+ Thread* self = ThreadForEnv(env);
+
+ bool run_gc = false;
+ {
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
+ if (native_blocking_gc_in_progress_) {
+ // A native blocking GC is in progress from the last time the native
+ // allocation blocking GC watermark was exceeded. Wait for that GC to
+ // finish before addressing the fact that we exceeded the blocking
+ // watermark again.
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
+ initial_gcs_finished++;
}
- // We have just run finalizers, update the native watermark since it is very likely that
- // finalizers released native managed allocations.
- UpdateMaxNativeFootprint();
- } else if (!IsGCRequestPending()) {
- if (IsGcConcurrent()) {
- RequestConcurrentGC(self, true); // Request non-sticky type.
- } else {
- CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
+
+ // It's possible multiple threads have seen that we exceeded the
+ // blocking watermark. Ensure that only one of those threads runs the
+ // blocking GC. The rest of the threads should instead wait for the
+ // blocking GC to complete.
+ if (native_blocking_gcs_finished_ == initial_gcs_finished) {
+ if (native_blocking_gc_in_progress_) {
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
+ } else {
+ native_blocking_gc_in_progress_ = true;
+ run_gc = true;
+ }
}
}
+
+ if (run_gc) {
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
+ CHECK(!env->ExceptionCheck());
+
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_in_progress_ = false;
+ native_blocking_gcs_finished_++;
+ native_blocking_gc_cond_->Broadcast(self);
+ }
+ } else if (new_value > NativeAllocationGcWatermark() && !IsGCRequestPending()) {
+ // Trigger another GC because there have been enough native bytes
+ // allocated since the last GC.
+ if (IsGcConcurrent()) {
+ RequestConcurrentGC(ThreadForEnv(env), /*force_full*/true);
+ } else {
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ }
}
}
-void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
- size_t expected_size;
- {
- MutexLock mu(Thread::Current(), native_histogram_lock_);
- native_free_histogram_.AddValue(bytes);
- }
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+ // Take the bytes freed out of new_native_bytes_allocated_ first. If
+ // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
+ // out of old_native_bytes_allocated_ to ensure all freed bytes are
+ // accounted for.
+ size_t allocated;
+ size_t new_freed_bytes;
do {
- expected_size = native_bytes_allocated_.LoadRelaxed();
- if (UNLIKELY(bytes > expected_size)) {
- ScopedObjectAccess soa(env);
- env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
- StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
- "registered as allocated", bytes, expected_size).c_str());
- break;
- }
- } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
- expected_size - bytes));
+ allocated = new_native_bytes_allocated_.LoadRelaxed();
+ new_freed_bytes = std::min(allocated, bytes);
+ } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
+ allocated - new_freed_bytes));
+ if (new_freed_bytes < bytes) {
+ old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
+ }
}
size_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3a8e29b08a..a4d300b110 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -260,9 +260,8 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
- void RegisterNativeFree(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+ void RegisterNativeFree(JNIEnv* env, size_t bytes);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
@@ -562,7 +561,7 @@ class Heap {
space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
@@ -679,7 +678,7 @@ class Heap {
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os)
- REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+ REQUIRES(!*gc_complete_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Thread pool.
@@ -979,10 +978,6 @@ class Heap {
void PostGcVerificationPaused(collector::GarbageCollector* gc)
REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
- // Update the watermark for the native allocated bytes based on the current number of native
- // bytes allocated and the target utilization ratio.
- void UpdateMaxNativeFootprint();
-
// Find a collector based on GC type.
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
@@ -1066,6 +1061,31 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
+ collector::GcType NonStickyGcType() const {
+ return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ }
+
+ // How large new_native_bytes_allocated_ can grow before we trigger a new
+ // GC.
+ ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
+ // Reuse max_free_ for the native allocation gc watermark, so that the
+ // native heap is treated in the same way as the Java heap in the case
+ // where the gc watermark update would exceed max_free_. Using max_free_
+ // instead of the target utilization means the watermark doesn't depend on
+ // the current number of registered native allocations.
+ return max_free_;
+ }
+
+ // How large new_native_bytes_allocated_ can grow while GC is in progress
+ // before we block the allocating thread to allow GC to catch up.
+ ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
+ // Historically the native allocations were bounded by growth_limit_. This
+ // uses that same value, dividing growth_limit_ by 2 to account for
+ // the fact that now the bound is relative to the number of retained
+ // registered native allocations rather than absolute.
+ return growth_limit_ / 2;
+ }
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
@@ -1184,12 +1204,6 @@ class Heap {
// a GC should be triggered.
size_t max_allowed_footprint_;
- // The watermark at which a concurrent GC is requested by registerNativeAllocation.
- size_t native_footprint_gc_watermark_;
-
- // Whether or not we need to run finalizers in the next native allocation.
- bool native_need_to_run_finalization_;
-
// When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
// it completes ahead of an allocation failing.
size_t concurrent_start_bytes_;
@@ -1203,13 +1217,25 @@ class Heap {
// Number of bytes allocated. Adjusted after each allocation and free.
Atomic<size_t> num_bytes_allocated_;
- // Bytes which are allocated and managed by native code but still need to be accounted for.
- Atomic<size_t> native_bytes_allocated_;
-
- // Native allocation stats.
- Mutex native_histogram_lock_;
- Histogram<uint64_t> native_allocation_histogram_;
- Histogram<uint64_t> native_free_histogram_;
+ // Number of registered native bytes allocated since the last time GC was
+ // triggered. Adjusted after each RegisterNativeAllocation and
+ // RegisterNativeFree. Used to determine when to trigger GC for native
+ // allocations.
+ // See the REDESIGN section of go/understanding-register-native-allocation.
+ Atomic<size_t> new_native_bytes_allocated_;
+
+ // Number of registered native bytes allocated prior to the last time GC was
+ // triggered, for debugging purposes. The current number of registered
+ // native bytes is determined by taking the sum of
+ // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+ Atomic<size_t> old_native_bytes_allocated_;
+
+ // Used for synchronization of blocking GCs triggered by
+ // RegisterNativeAllocation.
+ Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
+ bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
+ uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c1548365c7..86b152211c 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -104,7 +104,7 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
}
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
condition_.WaitHoldingLocks(self);
}
return reference->GetReferent();
@@ -292,7 +292,7 @@ void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
condition_.WaitHoldingLocks(self);
}
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index ffbca525d9..4be4ef05b4 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -587,15 +587,18 @@ class ImageSpaceLoader {
}
std::unique_ptr<MemMap> map;
+
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
+ // If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
+ // avoid reading proc maps for a mapping failure and slowing everything down.
map.reset(LoadImageFile(image_filename,
image_location,
*image_header,
image_header->GetImageBegin(),
file->Fd(),
logger,
- error_msg));
+ image_header->IsPic() ? nullptr : error_msg));
// If the header specifies PIC mode, we can also map at a random low_4gb address since we can
// relocate in-place.
if (map == nullptr && image_header->IsPic()) {
@@ -765,8 +768,10 @@ class ImageSpaceLoader {
if (storage_mode != ImageHeader::kStorageModeLZ4 &&
storage_mode != ImageHeader::kStorageModeLZ4HC) {
- *error_msg = StringPrintf("Invalid storage mode in image header %d",
- static_cast<int>(storage_mode));
+ if (error_msg != nullptr) {
+ *error_msg = StringPrintf("Invalid storage mode in image header %d",
+ static_cast<int>(storage_mode));
+ }
return nullptr;
}
@@ -790,7 +795,7 @@ class ImageSpaceLoader {
image_filename,
error_msg));
if (temp_map == nullptr) {
- DCHECK(!error_msg->empty());
+ DCHECK(error_msg == nullptr || !error_msg->empty());
return nullptr;
}
memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
@@ -802,12 +807,18 @@ class ImageSpaceLoader {
reinterpret_cast<char*>(map->Begin()) + decompress_offset,
stored_size,
map->Size() - decompress_offset);
- VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
+ const uint64_t time = NanoTime() - start;
+ // Add one 1 ns to prevent possible divide by 0.
+ VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
+ << PrettySize(static_cast<uint64_t>(map->Size()) * MsToNs(1000) / (time + 1))
+ << "/s)";
if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
- *error_msg = StringPrintf(
- "Decompressed size does not match expected image size %zu vs %zu",
- decompressed_size + sizeof(ImageHeader),
- image_header.GetImageSize());
+ if (error_msg != nullptr) {
+ *error_msg = StringPrintf(
+ "Decompressed size does not match expected image size %zu vs %zu",
+ decompressed_size + sizeof(ImageHeader),
+ image_header.GetImageSize());
+ }
return nullptr;
}
}
@@ -1226,9 +1237,9 @@ class ImageSpaceLoader {
}
dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
}
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
if (types != nullptr) {
- GcRoot<mirror::Class>* new_types = fixup_adapter.ForwardObject(types);
+ mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
if (types != new_types) {
dex_cache->SetResolvedTypes(new_types);
}
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index e5cddfc6f9..60105f4e4f 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -82,7 +82,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(&allow_disallow_lock_);
new_weak_condition_.WaitHoldingLocks(self);
}
}
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index b212d095cb..077f45e8f3 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -23,7 +23,7 @@
#include "handle.h"
#include "obj_ptr-inl.h"
#include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index aab1d9c224..f888482ae5 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -17,10 +17,12 @@
#include <type_traits>
#include "base/enums.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "gtest/gtest.h"
#include "handle.h"
#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3d3ad593b3..133502e6a3 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -224,12 +224,6 @@ class EndianOutput {
HandleU1List(values, count);
length_ += count;
}
- void AddU1AsU2List(const uint8_t* values, size_t count) {
- HandleU1AsU2List(values, count);
- // Array of char from compressed String (8-bit) is added as 16-bit blocks
- int ceil_count_to_even = count + ((count & 1) ? 1 : 0);
- length_ += ceil_count_to_even * sizeof(uint8_t);
- }
void AddU2List(const uint16_t* values, size_t count) {
HandleU2List(values, count);
length_ += count * sizeof(uint16_t);
@@ -1277,7 +1271,7 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), nullptr);
__ AddU1(t);
}
- // Add native value character array for strings.
+ // Add native value character array for strings / byte array for compressed strings.
if (klass->IsStringClass()) {
__ AddStringId(LookupStringId("value"));
__ AddU1(hprof_basic_object);
@@ -1359,8 +1353,16 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
case hprof_basic_short:
__ AddU2(f->GetShort(obj));
break;
- case hprof_basic_float:
case hprof_basic_int:
+ if (mirror::kUseStringCompression &&
+ klass->IsStringClass() &&
+ f->GetOffset().SizeValue() == mirror::String::CountOffset().SizeValue()) {
+ // Store the string length instead of the raw count field with compression flag.
+ __ AddU4(obj->AsString()->GetLength());
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case hprof_basic_float:
case hprof_basic_object:
__ AddU4(f->Get32(obj));
break;
@@ -1397,16 +1399,15 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
CHECK_EQ(obj->IsString(), string_value != nullptr);
if (string_value != nullptr) {
mirror::String* s = obj->AsString();
- // Compressed string's (8-bit) length is ceil(length/2) in 16-bit blocks
- int length_in_16_bit = (s->IsCompressed()) ? ((s->GetLength() + 1) / 2) : s->GetLength();
__ AddU1(HPROF_PRIMITIVE_ARRAY_DUMP);
__ AddObjectId(string_value);
__ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(obj));
- __ AddU4(length_in_16_bit);
- __ AddU1(hprof_basic_char);
+ __ AddU4(s->GetLength());
if (s->IsCompressed()) {
- __ AddU1AsU2List(s->GetValueCompressed(), s->GetLength());
+ __ AddU1(hprof_basic_byte);
+ __ AddU1List(s->GetValueCompressed(), s->GetLength());
} else {
+ __ AddU1(hprof_basic_char);
__ AddU2List(s->GetValue(), s->GetLength());
}
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 54b099eb14..87f429568d 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '6', '\0' }; // Erroneous resolved class.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '7', '\0' }; // hash-based DexCache types
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 0e66ae96b5..24ee22759c 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -25,7 +25,7 @@
#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
#include "runtime-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
namespace mirror {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c7371191b7..9fbb2e9930 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -25,7 +25,6 @@
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "utils.h"
-#include "verify_object-inl.h"
#include <cstdlib>
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index f11e2cba10..d862ff2708 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1010,15 +1010,18 @@ void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_o
void Instrumentation::ExceptionCaughtEvent(Thread* thread,
mirror::Throwable* exception_object) const {
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> h_exception(hs.NewHandle(exception_object));
if (HasExceptionCaughtListeners()) {
- DCHECK_EQ(thread->GetException(), exception_object);
+ DCHECK_EQ(thread->GetException(), h_exception.Get());
thread->ClearException();
for (InstrumentationListener* listener : exception_caught_listeners_) {
if (listener != nullptr) {
- listener->ExceptionCaught(thread, exception_object);
+ listener->ExceptionCaught(thread, h_exception.Get());
}
}
- thread->SetException(exception_object);
+ thread->SetException(h_exception.Get());
}
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 371e2f1e65..545cc1ad42 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -21,6 +21,7 @@
#include <stdlib.h>
#include <cmath>
+#include <initializer_list>
#include <limits>
#include <locale>
#include <unordered_map>
@@ -883,43 +884,74 @@ void UnstartedRuntime::UnstartedSystemGetPropertyWithDefault(
GetSystemProperty(self, shadow_frame, result, arg_offset, true);
}
-void UnstartedRuntime::UnstartedThreadLocalGet(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
- std::string caller(ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod()));
- bool ok = false;
- if (caller == "void java.lang.FloatingDecimal.developLongDigits(int, long, long)" ||
- caller == "java.lang.String java.lang.FloatingDecimal.toJavaFormatString()") {
- // Allocate non-threadlocal buffer.
- result->SetL(mirror::CharArray::Alloc(self, 26));
- ok = true;
- } else if (caller ==
- "java.lang.FloatingDecimal java.lang.FloatingDecimal.getThreadLocalInstance()") {
- // Allocate new object.
- StackHandleScope<2> hs(self);
- Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
- shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
- Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
- h_real_to_string_class->AllocObject(self)));
- if (h_real_to_string_obj.Get() != nullptr) {
- auto* cl = Runtime::Current()->GetClassLinker();
- ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
- "<init>", "()V", cl->GetImagePointerSize());
- if (init_method == nullptr) {
- h_real_to_string_class->DumpClass(LOG_STREAM(FATAL), mirror::Class::kDumpClassFullDetail);
- } else {
- JValue invoke_result;
- EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
- nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_real_to_string_obj.Get());
- ok = true;
- }
+static std::string GetImmediateCaller(ShadowFrame* shadow_frame)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (shadow_frame->GetLink() == nullptr) {
+ return "<no caller>";
+ }
+ return ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod());
+}
+
+static bool CheckCallers(ShadowFrame* shadow_frame,
+ std::initializer_list<std::string> allowed_call_stack)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (const std::string& allowed_caller : allowed_call_stack) {
+ if (shadow_frame->GetLink() == nullptr) {
+ return false;
+ }
+
+ std::string found_caller = ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod());
+ if (allowed_caller != found_caller) {
+ return false;
+ }
+
+ shadow_frame = shadow_frame->GetLink();
+ }
+ return true;
+}
+
+static ObjPtr<mirror::Object> CreateInstanceOf(Thread* self, const char* class_descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Find the requested class.
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ObjPtr<mirror::Class> klass =
+ class_linker->FindClass(self, class_descriptor, ScopedNullHandle<mirror::ClassLoader>());
+ if (klass == nullptr) {
+ AbortTransactionOrFail(self, "Could not load class %s", class_descriptor);
+ return nullptr;
+ }
+
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_class->AllocObject(self)));
+ if (h_obj.Get() != nullptr) {
+ ArtMethod* init_method = h_class->FindDirectMethod(
+ "<init>", "()V", class_linker->GetImagePointerSize());
+ if (init_method == nullptr) {
+ AbortTransactionOrFail(self, "Could not find <init> for %s", class_descriptor);
+ return nullptr;
+ } else {
+ JValue invoke_result;
+ EnterInterpreterFromInvoke(self, init_method, h_obj.Get(), nullptr, nullptr);
+ if (!self->IsExceptionPending()) {
+ return h_obj.Get();
}
+ AbortTransactionOrFail(self, "Could not run <init> for %s", class_descriptor);
}
}
+ AbortTransactionOrFail(self, "Could not allocate instance of %s", class_descriptor);
+ return nullptr;
+}
- if (!ok) {
- AbortTransactionOrFail(self, "Could not create RealToString object");
+void UnstartedRuntime::UnstartedThreadLocalGet(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+ if (CheckCallers(shadow_frame, { "sun.misc.FloatingDecimal$BinaryToASCIIBuffer "
+ "sun.misc.FloatingDecimal.getBinaryToASCIIBuffer()" })) {
+ result->SetL(CreateInstanceOf(self, "Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;"));
+ } else {
+ AbortTransactionOrFail(self,
+ "ThreadLocal.get() does not support %s",
+ GetImmediateCaller(shadow_frame).c_str());
}
}
@@ -1252,12 +1284,12 @@ void UnstartedRuntime::UnstartedReferenceGetReferent(
// initialization of other classes, so will *use* the value.
void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
- std::string caller(ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod()));
- if (caller == "void java.util.concurrent.SynchronousQueue.<clinit>()") {
+ if (CheckCallers(shadow_frame, { "void java.util.concurrent.SynchronousQueue.<clinit>()" })) {
// SynchronousQueue really only separates between single- and multiprocessor case. Return
// 8 as a conservative upper approximation.
result->SetI(8);
- } else if (caller == "void java.util.concurrent.ConcurrentHashMap.<clinit>()") {
+ } else if (CheckCallers(shadow_frame,
+ { "void java.util.concurrent.ConcurrentHashMap.<clinit>()" })) {
// ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely
// a good upper bound.
// TODO: Consider resetting in the zygote?
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index ae55f4c2ef..31be587e9c 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -944,5 +944,100 @@ TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
+TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ JValue result;
+ ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ StackHandleScope<1> hs(self);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ // Positive test. See that We get something for float conversion.
+ {
+ Handle<mirror::Class> floating_decimal = hs.NewHandle(
+ class_linker->FindClass(self,
+ "Lsun/misc/FloatingDecimal;",
+ ScopedNullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(floating_decimal.Get() != nullptr);
+ ASSERT_TRUE(class_linker->EnsureInitialized(self, floating_decimal, true, true));
+
+ ArtMethod* caller_method = floating_decimal->FindDeclaredDirectMethod(
+ "getBinaryToASCIIBuffer",
+ "()Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;",
+ class_linker->GetImagePointerSize());
+ // floating_decimal->DumpClass(LOG_STREAM(ERROR), mirror::Class::kDumpClassFullDetail);
+ ASSERT_TRUE(caller_method != nullptr);
+ ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
+ shadow_frame->SetLink(caller_frame);
+
+ UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+ EXPECT_TRUE(result.GetL() != nullptr);
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+ }
+
+ // Negative test.
+ PrepareForAborts();
+
+ {
+ // Just use a method in Class.
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ ArtMethod* caller_method =
+ &*class_class->GetDeclaredMethods(class_linker->GetImagePointerSize()).begin();
+ ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
+ shadow_frame->SetLink(caller_frame);
+
+ Transaction transaction;
+ Runtime::Current()->EnterTransactionMode(&transaction);
+ UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+ Runtime::Current()->ExitTransactionMode();
+ ASSERT_TRUE(self->IsExceptionPending());
+ ASSERT_TRUE(transaction.IsAborted());
+ self->ClearException();
+
+ ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
+TEST_F(UnstartedRuntimeTest, FloatConversion) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ StackHandleScope<1> hs(self);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> double_class = hs.NewHandle(
+ class_linker->FindClass(self,
+ "Ljava/lang/Double;",
+ ScopedNullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(double_class.Get() != nullptr);
+ ASSERT_TRUE(class_linker->EnsureInitialized(self, double_class, true, true));
+
+ ArtMethod* method = double_class->FindDeclaredDirectMethod("toString",
+ "(D)Ljava/lang/String;",
+ class_linker->GetImagePointerSize());
+ ASSERT_TRUE(method != nullptr);
+
+ // create instruction data for invoke-direct {v0, v1} of method with fake index
+ uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
+ const Instruction* inst = Instruction::At(inst_data);
+
+ JValue result;
+ ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
+ shadow_frame->SetVRegDouble(0, 1.23);
+ interpreter::DoCall<false, false>(method, self, *shadow_frame, inst, inst_data[0], &result);
+ ObjPtr<mirror::String> string_result = reinterpret_cast<mirror::String*>(result.GetL());
+ ASSERT_TRUE(string_result != nullptr);
+
+ std::string mod_utf = string_result->ToModifiedUtf8();
+ EXPECT_EQ("1.23", mod_utf);
+
+ ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index e0f28adc4f..a341cdb89f 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -572,7 +572,7 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
while (!kUseReadBarrier && UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.WaitHoldingLocks(self);
}
IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
@@ -706,7 +706,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectR
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.WaitHoldingLocks(self);
}
return weak_globals_.Get(ref);
@@ -731,7 +731,7 @@ bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.WaitHoldingLocks(self);
}
// When just checking a weak ref has been cleared, avoid triggering the read barrier in decode
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index e5d34e1a2d..86af6d44db 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -22,6 +22,7 @@
#include "jdwp/jdwp_bits.h"
#include "jdwp/jdwp_constants.h"
#include "jdwp/jdwp_expand_buf.h"
+#include "obj_ptr.h"
#include <pthread.h>
#include <stddef.h>
@@ -286,6 +287,10 @@ struct JdwpState {
REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass)
+ REQUIRES(!event_list_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
/*
* Unregister all events.
*/
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 172f52a974..96249f9b58 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -251,6 +251,43 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
return ERR_NONE;
}
+void JdwpState::UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass) {
+ VLOG(jdwp) << "Removing events within " << klass->PrettyClass();
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+ std::vector<JdwpEvent*> to_remove;
+ MutexLock mu(Thread::Current(), event_list_lock_);
+ for (JdwpEvent* cur_event = event_list_; cur_event != nullptr; cur_event = cur_event->next) {
+ // Fill in the to_remove list
+ bool found_event = false;
+ for (int i = 0; i < cur_event->modCount && !found_event; i++) {
+ JdwpEventMod& mod = cur_event->mods[i];
+ switch (mod.modKind) {
+ case MK_LOCATION_ONLY: {
+ JdwpLocation& loc = mod.locationOnly.loc;
+ JdwpError error;
+ ObjPtr<mirror::Class> breakpoint_class(
+ Dbg::GetObjectRegistry()->Get<art::mirror::Class*>(loc.class_id, &error));
+ DCHECK_EQ(error, ERR_NONE);
+ if (breakpoint_class == h_klass.Get()) {
+ to_remove.push_back(cur_event);
+ found_event = true;
+ }
+ break;
+ }
+ default:
+ // TODO Investigate how we should handle non-locationOnly events.
+ break;
+ }
+ }
+ }
+
+ for (JdwpEvent* event : to_remove) {
+ UnregisterEvent(event);
+ EventFree(event);
+ }
+}
+
/*
* Remove an event from the list. This will also remove the event from
* any optimization tables, e.g. breakpoints.
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 170887e397..4615574947 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -19,6 +19,7 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class.h"
+#include "mirror/throwable.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6deb03dc41..1ec4749146 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -145,7 +145,12 @@ Jit::Jit() : dump_info_on_shutdown_(false),
cumulative_timings_("JIT timings"),
memory_use_("Memory used for compilation", 16),
lock_("JIT memory use lock"),
- use_jit_compilation_(true) {}
+ use_jit_compilation_(true),
+ hot_method_threshold_(0),
+ warm_method_threshold_(0),
+ osr_method_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0) {}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
@@ -289,7 +294,11 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
void Jit::CreateThreadPool() {
// There is a DCHECK in the 'AddSamples' method to ensure the tread pool
// is not null when we instrument.
- thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+
+ // We need peers as we may report the JIT thread, e.g., in the debugger.
+ constexpr bool kJitPoolNeedsPeers = true;
+ thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
Start();
}
@@ -514,7 +523,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
}
}
- native_pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA) +
+ native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) +
osr_method->GetEntryPoint();
VLOG(jit) << "Jumping to "
<< method_name
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f5151b588a..0ac388ac02 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -556,12 +556,13 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
// Flush data cache, as compiled code references literals in it.
FlushDataCache(reinterpret_cast<char*>(roots_data),
reinterpret_cast<char*>(roots_data + data_size));
- // Flush caches before we remove write permission because on some ARMv8 hardware,
- // flushing caches require write permissions.
+ // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+ // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+ // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+ // 6P) stop being supported or their kernels are fixed.
//
- // For reference, here are kernel patches discussing about this issue:
- // https://android.googlesource.com/kernel/msm/%2B/0e7f7bcc3fc87489cda5aa6aff8ce40eed912279
- // https://patchwork.kernel.org/patch/9047921/
+ // For reference, this behavior is caused by this commit:
+ // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
DCHECK(!Runtime::Current()->IsAotCompiler());
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 1405c40096..54fc0386e1 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -597,6 +597,24 @@ uint32_t ProfileCompilationInfo::GetNumberOfResolvedClasses() const {
return total;
}
+// Produce a non-owning vector from a vector.
+template<typename T>
+const std::vector<T*>* MakeNonOwningVector(const std::vector<std::unique_ptr<T>>* owning_vector) {
+ auto non_owning_vector = new std::vector<T*>();
+ for (auto& element : *owning_vector) {
+ non_owning_vector->push_back(element.get());
+ }
+ return non_owning_vector;
+}
+
+std::string ProfileCompilationInfo::DumpInfo(
+ const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ bool print_full_dex_location) const {
+ std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
+ MakeNonOwningVector(dex_files));
+ return DumpInfo(non_owning_dex_files.get(), print_full_dex_location);
+}
+
std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location) const {
std::ostringstream os;
@@ -646,6 +664,38 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
return os.str();
}
+void ProfileCompilationInfo::GetClassNames(
+ const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::set<std::string>* class_names) const {
+ std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
+ MakeNonOwningVector(dex_files));
+ GetClassNames(non_owning_dex_files.get(), class_names);
+}
+
+void ProfileCompilationInfo::GetClassNames(const std::vector<const DexFile*>* dex_files,
+ std::set<std::string>* class_names) const {
+ if (info_.empty()) {
+ return;
+ }
+ for (const auto& it : info_) {
+ const std::string& location = it.first;
+ const DexFileData& dex_data = it.second;
+ const DexFile* dex_file = nullptr;
+ if (dex_files != nullptr) {
+ for (size_t i = 0; i < dex_files->size(); i++) {
+ if (location == (*dex_files)[i]->GetLocation()) {
+ dex_file = (*dex_files)[i];
+ }
+ }
+ }
+ for (const auto class_it : dex_data.class_set) {
+ if (dex_file != nullptr) {
+ class_names->insert(std::string(dex_file->PrettyType(class_it)));
+ }
+ }
+ }
+}
+
bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
return info_.Equals(other.info_);
}
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index f8061bcfd8..758b46d74a 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
#define ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
+#include <memory>
#include <set>
#include <vector>
@@ -72,9 +73,16 @@ class ProfileCompilationInfo {
// If dex_files is not null then the method indices will be resolved to their
// names.
// This is intended for testing and debugging.
+ std::string DumpInfo(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ bool print_full_dex_location = true) const;
std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location = true) const;
+ void GetClassNames(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::set<std::string>* class_names) const;
+ void GetClassNames(const std::vector<const DexFile*>* dex_files,
+ std::set<std::string>* class_names) const;
+
bool Equals(const ProfileCompilationInfo& other);
static std::string GetProfileDexFileKey(const std::string& dex_location);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 19a65bb27e..93c212bafb 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -70,6 +70,7 @@ std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
return os;
}
+std::mutex* MemMap::mem_maps_lock_ = nullptr;
MemMap::Maps* MemMap::maps_ = nullptr;
#if USE_ART_LOW_4G_ALLOCATOR
@@ -139,7 +140,7 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string*
// There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
// further.
{
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
for (auto& pair : *maps_) {
MemMap* const map = pair.second;
if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
@@ -399,7 +400,7 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
CHECK(expected_ptr != nullptr);
-
+ DCHECK(error_msg != nullptr);
DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
<< ((error_msg != nullptr) ? *error_msg : std::string());
flags |= MAP_FIXED;
@@ -490,7 +491,7 @@ MemMap::~MemMap() {
}
// Remove it from maps_.
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
bool found = false;
DCHECK(maps_ != nullptr);
for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
@@ -518,7 +519,7 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
CHECK_NE(base_size_, 0U);
// Add it to maps_.
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
DCHECK(maps_ != nullptr);
maps_->insert(std::make_pair(base_begin_, this));
}
@@ -637,7 +638,7 @@ bool MemMap::Protect(int prot) {
}
bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
CHECK(begin_map != nullptr);
CHECK(end_map != nullptr);
CHECK(HasMemMap(begin_map));
@@ -656,7 +657,7 @@ bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
}
void MemMap::DumpMaps(std::ostream& os, bool terse) {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
DumpMapsLocked(os, terse);
}
@@ -747,17 +748,31 @@ MemMap* MemMap::GetLargestMemMapAt(void* address) {
}
void MemMap::Init() {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- if (maps_ == nullptr) {
+ if (mem_maps_lock_ != nullptr) {
// dex2oat calls MemMap::Init twice since its needed before the runtime is created.
- maps_ = new Maps;
+ return;
}
+ mem_maps_lock_ = new std::mutex();
+ // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_).
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ DCHECK(maps_ == nullptr);
+ maps_ = new Maps;
}
void MemMap::Shutdown() {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- delete maps_;
- maps_ = nullptr;
+ if (mem_maps_lock_ == nullptr) {
+ // If MemMap::Shutdown is called more than once, there is no effect.
+ return;
+ }
+ {
+ // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_).
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ DCHECK(maps_ != nullptr);
+ delete maps_;
+ maps_ = nullptr;
+ }
+ delete mem_maps_lock_;
+ mem_maps_lock_ = nullptr;
}
void MemMap::SetSize(size_t new_size) {
@@ -813,7 +828,7 @@ void* MemMap::MapInternal(void* addr,
if (low_4gb && addr == nullptr) {
bool first_run = true;
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
// Use maps_ as an optimization to skip over large maps.
// Find the first map which is address > ptr.
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 0fea1a52c9..71db3f7014 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -21,6 +21,7 @@
#include <string>
#include <map>
+#include <mutex>
#include <stddef.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
@@ -120,7 +121,7 @@ class MemMap {
std::string* error_msg);
// Releases the memory mapping.
- ~MemMap() REQUIRES(!Locks::mem_maps_lock_);
+ ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
const std::string& GetName() const {
return name_;
@@ -175,14 +176,17 @@ class MemMap {
bool use_ashmem = true);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
- REQUIRES(!Locks::mem_maps_lock_);
+ REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMaps(std::ostream& os, bool terse = false)
- REQUIRES(!Locks::mem_maps_lock_);
+ REQUIRES(!MemMap::mem_maps_lock_);
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
- static void Init() REQUIRES(!Locks::mem_maps_lock_);
- static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
+ // Init and Shutdown are NOT thread safe.
+ // Both may be called multiple times and MemMap objects may be created any
+ // time after the first call to Init and before the first call to Shutodwn.
+ static void Init() REQUIRES(!MemMap::mem_maps_lock_);
+ static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
// If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
// faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
@@ -197,16 +201,16 @@ class MemMap {
size_t base_size,
int prot,
bool reuse,
- size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
+ size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMapsLocked(std::ostream& os, bool terse)
- REQUIRES(Locks::mem_maps_lock_);
+ REQUIRES(MemMap::mem_maps_lock_);
static bool HasMemMap(MemMap* map)
- REQUIRES(Locks::mem_maps_lock_);
+ REQUIRES(MemMap::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
- REQUIRES(Locks::mem_maps_lock_);
+ REQUIRES(MemMap::mem_maps_lock_);
static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
- REQUIRES(!Locks::mem_maps_lock_);
+ REQUIRES(!MemMap::mem_maps_lock_);
// Internal version of mmap that supports low 4gb emulation.
static void* MapInternal(void* addr,
@@ -236,8 +240,10 @@ class MemMap {
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
#endif
+ static std::mutex* mem_maps_lock_;
+
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
- static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
+ static Maps* maps_ GUARDED_BY(MemMap::mem_maps_lock_);
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 6a65e1271f..2cff47e8b4 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -65,6 +65,17 @@ inline Class* Class::GetSuperClass() {
OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
}
+inline void Class::SetSuperClass(ObjPtr<Class> new_super_class) {
+ // Super class is assigned once, except during class linker initialization.
+ if (kIsDebugBuild) {
+ ObjPtr<Class> old_super_class =
+ GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+ DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
+ }
+ DCHECK(new_super_class != nullptr);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
+}
+
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ClassLoader* Class::GetClassLoader() {
return GetFieldObject<ClassLoader, kVerifyFlags, kReadBarrierOption>(
@@ -635,23 +646,6 @@ inline void Class::SetClinitThreadId(pid_t new_clinit_thread_id) {
}
}
-template<VerifyObjectFlags kVerifyFlags>
-inline uint32_t Class::GetAccessFlags() {
- // Check class is loaded/retired or this is java.lang.String that has a
- // circularity issue during loading the names of its members
- DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
- IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
- this == String::GetJavaLangString())
- << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
- << " IsRetired=" << IsRetired<kVerifyFlags>()
- << " IsErroneous=" <<
- IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
- << " IsString=" << (this == String::GetJavaLangString())
- << " status= " << GetStatus<kVerifyFlags>()
- << " descriptor=" << PrettyDescriptor();
- return GetField32<kVerifyFlags>(AccessFlagsOffset());
-}
-
inline String* Class::GetName() {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f08d4daf95..f7ff735fcc 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -951,7 +951,8 @@ ObjPtr<Class> Class::GetDirectInterface(Thread* self, ObjPtr<Class> klass, uint3
return interfaces->Get(idx);
} else {
dex::TypeIndex type_idx = klass->GetDirectInterfaceTypeIdx(idx);
- ObjPtr<Class> interface = klass->GetDexCache()->GetResolvedType(type_idx);
+ ObjPtr<Class> interface = ClassLinker::LookupResolvedType(
+ type_idx, klass->GetDexCache(), klass->GetClassLoader());
return interface;
}
}
@@ -1345,5 +1346,26 @@ std::string Class::PrettyClassAndClassLoader() {
return result;
}
+template<VerifyObjectFlags kVerifyFlags> void Class::GetAccessFlagsDCheck() {
+ // Check class is loaded/retired or this is java.lang.String that has a
+ // circularity issue during loading the names of its members
+ DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
+ this == String::GetJavaLangString())
+ << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
+ << " IsRetired=" << IsRetired<kVerifyFlags>()
+ << " IsErroneous=" <<
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
+ << " IsString=" << (this == String::GetJavaLangString())
+ << " status= " << GetStatus<kVerifyFlags>()
+ << " descriptor=" << PrettyDescriptor();
+}
+// Instantiate the common cases.
+template void Class::GetAccessFlagsDCheck<kVerifyNone>();
+template void Class::GetAccessFlagsDCheck<kVerifyThis>();
+template void Class::GetAccessFlagsDCheck<kVerifyReads>();
+template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
+template void Class::GetAccessFlagsDCheck<kVerifyAll>();
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c9f27ad53f..d34f09c721 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -231,7 +231,13 @@ class MANAGED Class FINAL : public Object {
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ GetAccessFlagsDCheck<kVerifyFlags>();
+ }
+ return GetField32<kVerifyFlags>(AccessFlagsOffset());
+ }
+
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
@@ -683,14 +689,7 @@ class MANAGED Class FINAL : public Object {
// `This` and `klass` must be classes.
ObjPtr<Class> GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
- // Super class is assigned once, except during class linker initialization.
- ObjPtr<Class> old_super_class =
- GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
- DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
- DCHECK(new_super_class != nullptr);
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
- }
+ void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetSuperClass() != nullptr;
@@ -1397,6 +1396,9 @@ class MANAGED Class FINAL : public Object {
bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags>
+ void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
+
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index a59bb7b880..bef3ad29a3 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -40,14 +40,22 @@ inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
-inline mirror::String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) {
DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds());
- return StringDexCachePair::Lookup(GetStrings(), string_idx.index_, NumStrings()).Read();
+ const uint32_t slot_idx = string_idx.index_ % kDexCacheStringCacheSize;
+ DCHECK_LT(slot_idx, NumStrings());
+ return slot_idx;
}
-inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
- ObjPtr<mirror::String> resolved) {
- StringDexCachePair::Assign(GetStrings(), string_idx.index_, resolved.Ptr(), NumStrings());
+inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+ return GetStrings()[StringSlotIndex(string_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(string_idx.index_);
+}
+
+inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<String> resolved) {
+ DCHECK(resolved != nullptr);
+ GetStrings()[StringSlotIndex(string_idx)].store(
+ StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed);
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
DCHECK(runtime->IsAotCompiler());
@@ -58,50 +66,70 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
}
inline void DexCache::ClearString(dex::StringIndex string_idx) {
- const uint32_t slot_idx = string_idx.index_ % NumStrings();
DCHECK(Runtime::Current()->IsAotCompiler());
+ uint32_t slot_idx = StringSlotIndex(string_idx);
StringDexCacheType* slot = &GetStrings()[slot_idx];
// This is racy but should only be called from the transactional interpreter.
if (slot->load(std::memory_order_relaxed).index == string_idx.index_) {
- StringDexCachePair cleared(
- nullptr,
- StringDexCachePair::InvalidIndexForSlot(slot_idx));
+ StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx));
slot->store(cleared, std::memory_order_relaxed);
}
}
+inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) {
+ DCHECK_LT(type_idx.index_, GetDexFile()->NumTypeIds());
+ const uint32_t slot_idx = type_idx.index_ % kDexCacheTypeCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedTypes());
+ return slot_idx;
+}
+
inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) {
// It is theorized that a load acquire is not required since obtaining the resolved class will
// always have an address dependency or a lock.
- DCHECK_LT(type_idx.index_, NumResolvedTypes());
- return GetResolvedTypes()[type_idx.index_].Read();
+ return GetResolvedTypes()[TypeSlotIndex(type_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(type_idx.index_);
}
inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) {
- DCHECK_LT(type_idx.index_, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
+ DCHECK(resolved != nullptr);
// TODO default transaction support.
// Use a release store for SetResolvedType. This is done to prevent other threads from seeing a
// class but not necessarily seeing the loaded members like the static fields array.
// See b/32075261.
- reinterpret_cast<Atomic<GcRoot<mirror::Class>>&>(GetResolvedTypes()[type_idx.index_]).
- StoreRelease(GcRoot<Class>(resolved));
+ GetResolvedTypes()[TypeSlotIndex(type_idx)].store(
+ TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
-inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
- DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
- return MethodTypeDexCachePair::Lookup(
- GetResolvedMethodTypes(), proto_idx, NumResolvedMethodTypes()).Read();
+inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ uint32_t slot_idx = TypeSlotIndex(type_idx);
+ TypeDexCacheType* slot = &GetResolvedTypes()[slot_idx];
+ // This is racy but should only be called from the single-threaded ImageWriter and tests.
+ if (slot->load(std::memory_order_relaxed).index == type_idx.index_) {
+ TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx));
+ slot->store(cleared, std::memory_order_relaxed);
+ }
}
-inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+inline uint32_t DexCache::MethodTypeSlotIndex(uint32_t proto_idx) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
+ const uint32_t slot_idx = proto_idx % kDexCacheMethodTypeCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedMethodTypes());
+ return slot_idx;
+}
- MethodTypeDexCachePair::Assign(GetResolvedMethodTypes(), proto_idx, resolved,
- NumResolvedMethodTypes());
+inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
+ return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(proto_idx);
+}
+
+inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+ DCHECK(resolved != nullptr);
+ GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
+ MethodTypeDexCachePair(resolved, proto_idx), std::memory_order_relaxed);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
@@ -198,49 +226,49 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- VisitDexCachePairs<mirror::String, kReadBarrierOption, Visitor>(
+ VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
GetStrings(), NumStrings(), visitor);
- GcRoot<mirror::Class>* resolved_types = GetResolvedTypes();
- for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) {
- visitor.VisitRootIfNonNull(resolved_types[i].AddressWithoutBarrier());
- }
+ VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
+ GetResolvedTypes(), NumResolvedTypes(), visitor);
- VisitDexCachePairs<mirror::MethodType, kReadBarrierOption, Visitor>(
+ VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupStrings(mirror::StringDexCacheType* dest, const Visitor& visitor) {
- mirror::StringDexCacheType* src = GetStrings();
+inline void DexCache::FixupStrings(StringDexCacheType* dest, const Visitor& visitor) {
+ StringDexCacheType* src = GetStrings();
for (size_t i = 0, count = NumStrings(); i < count; ++i) {
StringDexCachePair source = src[i].load(std::memory_order_relaxed);
- mirror::String* ptr = source.object.Read<kReadBarrierOption>();
- mirror::String* new_source = visitor(ptr);
+ String* ptr = source.object.Read<kReadBarrierOption>();
+ String* new_source = visitor(ptr);
source.object = GcRoot<String>(new_source);
dest[i].store(source, std::memory_order_relaxed);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor) {
- GcRoot<mirror::Class>* src = GetResolvedTypes();
+inline void DexCache::FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor) {
+ TypeDexCacheType* src = GetResolvedTypes();
for (size_t i = 0, count = NumResolvedTypes(); i < count; ++i) {
- mirror::Class* source = src[i].Read<kReadBarrierOption>();
- mirror::Class* new_source = visitor(source);
- dest[i] = GcRoot<mirror::Class>(new_source);
+ TypeDexCachePair source = src[i].load(std::memory_order_relaxed);
+ Class* ptr = source.object.Read<kReadBarrierOption>();
+ Class* new_source = visitor(ptr);
+ source.object = GcRoot<Class>(new_source);
+ dest[i].store(source, std::memory_order_relaxed);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupResolvedMethodTypes(mirror::MethodTypeDexCacheType* dest,
+inline void DexCache::FixupResolvedMethodTypes(MethodTypeDexCacheType* dest,
const Visitor& visitor) {
- mirror::MethodTypeDexCacheType* src = GetResolvedMethodTypes();
+ MethodTypeDexCacheType* src = GetResolvedMethodTypes();
for (size_t i = 0, count = NumResolvedMethodTypes(); i < count; ++i) {
MethodTypeDexCachePair source = src[i].load(std::memory_order_relaxed);
- mirror::MethodType* ptr = source.object.Read<kReadBarrierOption>();
- mirror::MethodType* new_source = visitor(ptr);
+ MethodType* ptr = source.object.Read<kReadBarrierOption>();
+ MethodType* new_source = visitor(ptr);
source.object = GcRoot<MethodType>(new_source);
dest[i].store(source, std::memory_order_relaxed);
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 741cf3bb47..3103a92c83 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -58,8 +58,8 @@ void DexCache::InitializeDexCache(Thread* self,
mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- GcRoot<mirror::Class>* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
+ mirror::TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
+ reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
@@ -69,6 +69,10 @@ void DexCache::InitializeDexCache(Thread* self,
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (dex_file->NumTypeIds() < num_types) {
+ num_types = dex_file->NumTypeIds();
+ }
// Note that we allocate the method type dex caches regardless of this flag,
// and we make sure here that they're not used by the runtime. This is in the
@@ -104,8 +108,9 @@ void DexCache::InitializeDexCache(Thread* self,
CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u);
CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull());
}
- for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
- CHECK(types[i].IsNull());
+ for (size_t i = 0; i < num_types; ++i) {
+ CHECK_EQ(types[i].load(std::memory_order_relaxed).index, 0u);
+ CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
}
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
@@ -121,6 +126,9 @@ void DexCache::InitializeDexCache(Thread* self,
if (strings != nullptr) {
mirror::StringDexCachePair::Initialize(strings);
}
+ if (types != nullptr) {
+ mirror::TypeDexCachePair::Initialize(types);
+ }
if (method_types != nullptr) {
mirror::MethodTypeDexCachePair::Initialize(method_types);
}
@@ -129,7 +137,7 @@ void DexCache::InitializeDexCache(Thread* self,
strings,
num_strings,
types,
- dex_file->NumTypeIds(),
+ num_types,
methods,
dex_file->NumMethodIds(),
fields,
@@ -143,7 +151,7 @@ void DexCache::Init(const DexFile* dex_file,
ObjPtr<String> location,
StringDexCacheType* strings,
uint32_t num_strings,
- GcRoot<Class>* resolved_types,
+ TypeDexCacheType* resolved_types,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6f88cc5df4..e68b0c7219 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -18,14 +18,14 @@
#define ART_RUNTIME_MIRROR_DEX_CACHE_H_
#include "array.h"
-#include "art_field.h"
-#include "class.h"
+#include "base/bit_utils.h"
#include "dex_file_types.h"
#include "object.h"
#include "object_array.h"
namespace art {
+class ArtField;
class ArtMethod;
struct DexCacheOffsets;
class DexFile;
@@ -36,6 +36,7 @@ class Thread;
namespace mirror {
+class Class;
class MethodType;
class String;
@@ -60,7 +61,7 @@ template <typename T> struct PACKED(8) DexCachePair {
// it's always non-null if the id branch succeeds (except for the 0th id).
// Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
// the lookup id == stored id branch.
- DexCachePair(T* object, uint32_t index)
+ DexCachePair(ObjPtr<T> object, uint32_t index)
: object(object),
index(index) {}
DexCachePair() = default;
@@ -74,39 +75,28 @@ template <typename T> struct PACKED(8) DexCachePair {
dex_cache[0].store(first_elem, std::memory_order_relaxed);
}
- static GcRoot<T> Lookup(std::atomic<DexCachePair<T>>* dex_cache,
- uint32_t idx,
- uint32_t cache_size) {
- DCHECK_NE(cache_size, 0u);
- DexCachePair<T> element = dex_cache[idx % cache_size].load(std::memory_order_relaxed);
- if (idx != element.index) {
- return GcRoot<T>(nullptr);
- }
-
- DCHECK(!element.object.IsNull());
- return element.object;
- }
-
- static void Assign(std::atomic<DexCachePair<T>>* dex_cache,
- uint32_t idx,
- T* object,
- uint32_t cache_size) {
- DCHECK_LT(idx % cache_size, cache_size);
- dex_cache[idx % cache_size].store(
- DexCachePair<T>(object, idx), std::memory_order_relaxed);
- }
-
static uint32_t InvalidIndexForSlot(uint32_t slot) {
// Since the cache size is a power of two, 0 will always map to slot 0.
// Use 1 for slot 0 and 0 for all other slots.
return (slot == 0) ? 1u : 0u;
}
+
+ T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (idx != index) {
+ return nullptr;
+ }
+ DCHECK(!object.IsNull());
+ return object.Read();
+ }
};
-using StringDexCachePair = DexCachePair<mirror::String>;
+using TypeDexCachePair = DexCachePair<Class>;
+using TypeDexCacheType = std::atomic<TypeDexCachePair>;
+
+using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
-using MethodTypeDexCachePair = DexCachePair<mirror::MethodType>;
+using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
// C++ mirror of java.lang.DexCache.
@@ -115,6 +105,11 @@ class MANAGED DexCache FINAL : public Object {
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
+ // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+ static constexpr size_t kDexCacheTypeCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
+ "Type dex cache size is not a power of 2.");
+
// Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
static constexpr size_t kDexCacheStringCacheSize = 1024;
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
@@ -126,6 +121,10 @@ class MANAGED DexCache FINAL : public Object {
static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
"MethodType dex cache size is not a power of 2.");
+ static constexpr size_t StaticTypeSize() {
+ return kDexCacheTypeCacheSize;
+ }
+
static constexpr size_t StaticStringSize() {
return kDexCacheStringCacheSize;
}
@@ -156,7 +155,7 @@ class MANAGED DexCache FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
+ void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
@@ -211,7 +210,7 @@ class MANAGED DexCache FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
}
- mirror::String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
+ String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
@@ -226,6 +225,8 @@ class MANAGED DexCache FINAL : public Object {
void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -254,11 +255,11 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(StringsOffset(), strings);
}
- GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset());
+ TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
}
- void SetResolvedTypes(GcRoot<Class>* resolved_types)
+ void SetResolvedTypes(TypeDexCacheType* resolved_types)
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
@@ -323,7 +324,7 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
- void SetLocation(ObjPtr<mirror::String> location) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
// NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
// provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
@@ -340,7 +341,7 @@ class MANAGED DexCache FINAL : public Object {
ObjPtr<String> location,
StringDexCacheType* strings,
uint32_t num_strings,
- GcRoot<Class>* resolved_types,
+ TypeDexCacheType* resolved_types,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
@@ -351,12 +352,16 @@ class MANAGED DexCache FINAL : public Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
@@ -366,7 +371,7 @@ class MANAGED DexCache FINAL : public Object {
uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
// num_resolved_method_types_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
- uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements.
+ uint64_t resolved_types_; // TypeDexCacheType*, array with num_resolved_types_ elements.
uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
// elements.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8f978e122c..5693f67646 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -51,7 +51,8 @@ TEST_F(DexCacheTest, Open) {
EXPECT_TRUE(dex_cache->StaticStringSize() == dex_cache->NumStrings()
|| java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings());
- EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
+ EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes()
+ || java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index dca30626e0..53d267b52c 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -19,7 +19,7 @@
#include "class.h"
#include "gc_root.h"
-#include "object.h"
+#include "object-inl.h"
#include "method_handles.h"
#include "method_type.h"
diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc
index 03ab93069c..637bafd75e 100644
--- a/runtime/mirror/method_type_test.cc
+++ b/runtime/mirror/method_type_test.cc
@@ -19,12 +19,13 @@
#include <string>
#include <vector>
+#include "class-inl.h"
#include "class_linker.h"
+#include "class_loader.h"
#include "common_runtime_test.h"
#include "handle_scope-inl.h"
-#include "runtime/mirror/class.h"
-#include "runtime/mirror/class_loader.h"
-#include "scoped_thread_state_change.h"
+#include "object_array-inl.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 354410e6bf..8e591e4434 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -142,8 +142,10 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) {
}
inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
-#ifdef USE_BAKER_READ_BARRIER
- CHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
#if defined(__arm__)
uintptr_t obj = reinterpret_cast<uintptr_t>(this);
uintptr_t result;
@@ -190,37 +192,29 @@ inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency)
UNREACHABLE();
UNUSED(fake_address_dependency);
#endif
-#else // !USE_BAKER_READ_BARRIER
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(fake_address_dependency);
-#endif
}
inline uint32_t Object::GetReadBarrierState() {
-#ifdef USE_BAKER_READ_BARRIER
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(kUseBakerReadBarrier);
LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline uint32_t Object::GetReadBarrierStateAcquire() {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline uint32_t Object::GetMarkBit() {
@@ -233,23 +227,22 @@ inline uint32_t Object::GetMarkBit() {
}
inline void Object::SetReadBarrierState(uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord lw = GetLockWord(false);
lw.SetReadBarrierState(rb_state);
SetLockWord(lw, false);
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(rb_state);
-#endif
}
template<bool kCasRelease>
inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord expected_lw;
@@ -272,11 +265,6 @@ inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32
CasLockWordWeakRelease(expected_lw, new_lw) :
CasLockWordWeakRelaxed(expected_lw, new_lw)));
return true;
-#else
- UNUSED(expected_rb_state, rb_state);
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
@@ -691,19 +679,6 @@ inline void Object::SetFieldShortVolatile(MemberOffset field_offset, int16_t new
field_offset, new_value);
}
-template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
-inline int32_t Object::GetField32(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- return GetField<int32_t, kIsVolatile>(field_offset);
-}
-
-template<VerifyObjectFlags kVerifyFlags>
-inline int32_t Object::GetField32Volatile(MemberOffset field_offset) {
- return GetField32<kVerifyFlags, true>(field_offset);
-}
-
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
@@ -854,28 +829,6 @@ inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_va
new_value);
}
-template<typename kSize, bool kIsVolatile>
-inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- kSize* addr = reinterpret_cast<kSize*>(raw_addr);
- if (kIsVolatile) {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
- } else {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
- }
-}
-
-template<typename kSize, bool kIsVolatile>
-inline kSize Object::GetField(MemberOffset field_offset) {
- const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
- const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
- if (kIsVolatile) {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
- } else {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
- }
-}
-
template<typename kSize>
inline kSize Object::GetFieldAcquire(MemberOffset field_offset) {
const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index db58a60994..4541ce2a42 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_H_
#define ART_RUNTIME_MIRROR_OBJECT_H_
+#include "atomic.h"
#include "base/casts.h"
#include "base/enums.h"
#include "globals.h"
@@ -432,11 +433,18 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ return GetField<int32_t, kIsVolatile>(field_offset);
+ }
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags, true>(field_offset);
+ }
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
@@ -611,10 +619,28 @@ class MANAGED LOCKABLE Object {
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ kSize* addr = reinterpret_cast<kSize*>(raw_addr);
+ if (kIsVolatile) {
+ reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+ } else {
+ reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
+ }
+ }
+
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
+ const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+ if (kIsVolatile) {
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+ } else {
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
+ }
+ }
+
// Get a field with acquire semantics.
template<typename kSize>
ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 95b6c3e76b..409c6c2896 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -241,8 +241,9 @@ class MANAGED String FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
- // First bit (uppermost/leftmost) is taken out for Compressed/Uncompressed flag
- // [0] Uncompressed: string uses 16-bit memory | [1] Compressed: 8-bit memory
+
+ // If string compression is enabled, count_ holds the StringCompressionFlag in the
+ // least significant bit and the length in the remaining bits, length = count_ >> 1.
int32_t count_;
uint32_t hash_code_;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0ceb23a7a2..f3cb0df80e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -356,40 +356,44 @@ void Monitor::Lock(Thread* self) {
// Do this before releasing the lock so that we don't get deflated.
size_t num_waiters = num_waiters_;
++num_waiters_;
+
+ // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
+ // lock and then re-acquiring the mutator lock can deadlock.
+ bool started_trace = false;
+ if (ATRACE_ENABLED()) {
+ if (owner_ != nullptr) { // Did the owner_ give the lock up?
+ std::ostringstream oss;
+ std::string name;
+ owner_->GetThreadName(name);
+ oss << PrettyContentionInfo(name,
+ owner_->GetTid(),
+ owners_method,
+ owners_dex_pc,
+ num_waiters);
+ // Add info for contending thread.
+ uint32_t pc;
+ ArtMethod* m = self->GetCurrentMethod(&pc);
+ const char* filename;
+ int32_t line_number;
+ TranslateLocation(m, pc, &filename, &line_number);
+ oss << " blocking from "
+ << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
+ << ":" << line_number << ")";
+ ATRACE_BEGIN(oss.str().c_str());
+ started_trace = true;
+ }
+ }
+
monitor_lock_.Unlock(self); // Let go of locks in order.
self->SetMonitorEnterObject(GetObject());
{
- uint32_t original_owner_thread_id = 0u;
ScopedThreadSuspension tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
+ uint32_t original_owner_thread_id = 0u;
{
// Reacquire monitor_lock_ without mutator_lock_ for Wait.
MutexLock mu2(self, monitor_lock_);
if (owner_ != nullptr) { // Did the owner_ give the lock up?
original_owner_thread_id = owner_->GetThreadId();
- if (ATRACE_ENABLED()) {
- std::ostringstream oss;
- {
- // Reacquire mutator_lock_ for getting the location info.
- ScopedObjectAccess soa(self);
- std::string name;
- owner_->GetThreadName(name);
- oss << PrettyContentionInfo(name,
- owner_->GetTid(),
- owners_method,
- owners_dex_pc,
- num_waiters);
- // Add info for contending thread.
- uint32_t pc;
- ArtMethod* m = self->GetCurrentMethod(&pc);
- const char* filename;
- int32_t line_number;
- TranslateLocation(m, pc, &filename, &line_number);
- oss << " blocking from "
- << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
- << ":" << line_number << ")";
- }
- ATRACE_BEGIN(oss.str().c_str());
- }
monitor_contenders_.Wait(self); // Still contended so wait.
}
}
@@ -448,9 +452,11 @@ void Monitor::Lock(Thread* self) {
}
}
}
- ATRACE_END();
}
}
+ if (started_trace) {
+ ATRACE_END();
+ }
self->SetMonitorEnterObject(nullptr);
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
@@ -1374,7 +1380,7 @@ void MonitorList::Add(Monitor* m) {
while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
monitor_add_condition_.WaitHoldingLocks(self);
}
list_.push_front(m);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index cd0e55f261..1234933db9 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -188,7 +188,7 @@ static jobject DexFile_openDexFileNative(JNIEnv* env,
if (array == nullptr) {
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
- if (linker->FindDexCache(soa.Self(), *dex_file, true) != nullptr) {
+ if (linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
dex_file.release();
}
}
@@ -230,7 +230,7 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
if (dex_file != nullptr) {
// Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
// are calls to DexFile.close while the ART DexFile is still in use.
- if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
+ if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
// Clear the element in the array so that we can call close again.
long_dex_files->Set(i, 0);
delete dex_file;
@@ -281,7 +281,13 @@ static jclass DexFile_defineClassNative(JNIEnv* env,
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader>(javaLoader)));
- class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+ ObjPtr<mirror::DexCache> dex_cache =
+ class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+ if (dex_cache == nullptr) {
+ // OOME or InternalError (dexFile already registered with a different class loader).
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
ObjPtr<mirror::Class> result = class_linker->DefineClass(soa.Self(),
descriptor.c_str(),
hash,
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
index 981be68199..07959607fc 100644
--- a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
@@ -128,7 +128,7 @@ static void InMemoryDexClassLoader_DexData_uninitialize(JNIEnv* env, jclass, jlo
if (kIsDebugBuild) {
ScopedObjectAccess soa(env);
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- CHECK(class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr);
+ CHECK(!class_linker->IsDexFileRegistered(soa.Self(), *dex_file));
}
delete dex_file;
}
@@ -153,7 +153,13 @@ static jclass InMemoryDexClassLoader_DexData_findClass(
StackHandleScope<1> handle_scope(soa.Self());
Handle<mirror::ClassLoader> class_loader(
handle_scope.NewHandle(soa.Decode<mirror::ClassLoader>(loader)));
- class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+ ObjPtr<mirror::DexCache> dex_cache =
+ class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+ if (dex_cache == nullptr) {
+ // OOME or InternalError (dexFile already registered with a different class loader).
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
ObjPtr<mirror::Class> result = class_linker->DefineClass(
soa.Self(),
class_descriptor,
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 1af861929e..24308d9e81 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -448,11 +448,8 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
Thread* const self = Thread::Current();
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
CHECK(dex_file != nullptr);
- ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file, true);
- // If dex cache was deallocated, just continue.
- if (dex_cache == nullptr) {
- continue;
- }
+ ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file);
+ CHECK(dex_cache != nullptr); // Boot class path dex caches are never unloaded.
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(j));
if (string != nullptr) {
@@ -515,7 +512,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file, nullptr)));
-
+ CHECK(dex_cache.Get() != nullptr); // Boot class path dex caches are never unloaded.
if (kPreloadDexCachesStrings) {
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
PreloadDexCachesResolveString(dex_cache, dex::StringIndex(j), strings);
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index f1c350f23c..ee6dda56a5 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -53,7 +53,7 @@ static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) {
static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
+ CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds());
return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(dex::TypeIndex(type_index)));
}
@@ -65,12 +65,22 @@ static jobject DexCache_getResolvedString(JNIEnv* env, jobject javaDexCache, jin
dex_cache->GetResolvedString(dex::StringIndex(string_index)));
}
-static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index,
+static void DexCache_setResolvedType(JNIEnv* env,
+ jobject javaDexCache,
+ jint type_index,
jobject type) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
- dex_cache->SetResolvedType(dex::TypeIndex(type_index), soa.Decode<mirror::Class>(type));
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ CHECK_LT(static_cast<size_t>(type_index), dex_file.NumTypeIds());
+ ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type);
+ if (t != nullptr && t->DescriptorEquals(dex_file.StringByTypeIdx(dex::TypeIndex(type_index)))) {
+ ClassTable* table =
+ Runtime::Current()->GetClassLinker()->FindClassTable(soa.Self(), dex_cache);
+ if (table != nullptr && table->TryInsert(t) == t) {
+ dex_cache->SetResolvedType(dex::TypeIndex(type_index), t);
+ }
+ }
}
static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
@@ -78,7 +88,10 @@ static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint s
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
- dex_cache->SetResolvedString(dex::StringIndex(string_index), soa.Decode<mirror::String>(string));
+ ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string);
+ if (s != nullptr) {
+ dex_cache->SetResolvedString(dex::StringIndex(string_index), s);
+ }
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index ea266d131d..f1d6ff5f70 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -25,7 +25,7 @@
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index fcb017545a..195091f8ab 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -25,7 +25,7 @@
#include "ScopedUtfChars.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index ece0338c93..70cd6aaae2 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -22,7 +22,7 @@
#include "mirror/object_array.h"
#include "mirror/string.h"
#include "scoped_fast_native_object_access-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/oat.h b/runtime/oat.h
index 532c9681c3..e454c64d52 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '0', '9', '\0' }; // Register mask change.
+ static constexpr uint8_t kOatVersion[] = { '1', '1', '1', '\0' }; // hash-based DexCache types.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 33bd0f311d..a46b47075c 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -22,6 +22,7 @@
#include "android-base/stringprintf.h"
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -32,11 +33,13 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "well_known_classes.h"
namespace art {
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index fd84426bb8..b4e4285dc7 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -44,7 +44,7 @@ uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding.stack_map_encoding);
+ return stack_map.GetDexPc(encoding.stack_map.encoding);
}
} else {
DCHECK(method->IsNative());
@@ -80,7 +80,7 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method,
: code_info.GetStackMapForDexPc(dex_pc, encoding);
if (stack_map.IsValid()) {
return reinterpret_cast<uintptr_t>(entry_point) +
- stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA);
+ stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA);
}
if (abort_on_failure) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 2f51e27b2d..bdaad20d7e 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -46,7 +46,7 @@
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
#include "mirror/class_loader.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
#include "base/logging.h"
#include "base/macros.h"
#include "../../libcore/ojluni/src/main/native/jvm.h" // TODO(narayan): fix it
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index c14fd84264..fc4b6fe71c 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -42,12 +42,16 @@
#include "class_linker.h"
#include "common_throws.h"
#include "events-inl.h"
+#include "gc/heap.h"
+#include "gc_root.h"
#include "handle.h"
#include "jni_env_ext-inl.h"
#include "jni_internal.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
+#include "mirror/object_reference.h"
+#include "mirror/object-inl.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
@@ -261,15 +265,22 @@ struct ClassCallback : public art::ClassLoadCallback {
thread_jni.get(),
jklass.get());
}
- AddTempClass(thread, jklass.get());
+ if (klass->IsTemp()) {
+ AddTempClass(thread, jklass.get());
+ }
}
}
- void ClassPrepare(art::Handle<art::mirror::Class> temp_klass ATTRIBUTE_UNUSED,
+ void ClassPrepare(art::Handle<art::mirror::Class> temp_klass,
art::Handle<art::mirror::Class> klass)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassPrepare)) {
art::Thread* thread = art::Thread::Current();
+ if (temp_klass.Get() != klass.Get()) {
+ DCHECK(temp_klass->IsTemp());
+ DCHECK(temp_klass->IsRetired());
+ HandleTempClass(thread, temp_klass, klass);
+ }
ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
ScopedLocalRef<jthread> thread_jni(
@@ -283,32 +294,200 @@ struct ClassCallback : public art::ClassLoadCallback {
}
}
+ // To support parallel class-loading, we need to perform some locking dances here. Namely,
+ // the fixup stage must not be holding the temp_classes lock when it fixes up the system
+ // (as that requires suspending all mutators).
+
void AddTempClass(art::Thread* self, jclass klass) {
std::unique_lock<std::mutex> mu(temp_classes_lock);
- temp_classes.push_back(reinterpret_cast<jclass>(self->GetJniEnv()->NewGlobalRef(klass)));
+ jclass global_klass = reinterpret_cast<jclass>(self->GetJniEnv()->NewGlobalRef(klass));
+ temp_classes.push_back(global_klass);
}
- void HandleTempClass(art::Handle<art::mirror::Class> temp_klass,
+ void HandleTempClass(art::Thread* self,
+ art::Handle<art::mirror::Class> temp_klass,
art::Handle<art::mirror::Class> klass)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- std::unique_lock<std::mutex> mu(temp_classes_lock);
- if (temp_classes.empty()) {
- return;
- }
+ bool requires_fixup = false;
+ {
+ std::unique_lock<std::mutex> mu(temp_classes_lock);
+ if (temp_classes.empty()) {
+ return;
+ }
- art::Thread* self = art::Thread::Current();
- for (auto it = temp_classes.begin(); it != temp_classes.end(); ++it) {
- if (temp_klass.Get() == art::ObjPtr<art::mirror::Class>::DownCast(self->DecodeJObject(*it))) {
- temp_classes.erase(it);
- FixupTempClass(temp_klass, klass);
+ for (auto it = temp_classes.begin(); it != temp_classes.end(); ++it) {
+ if (temp_klass.Get() == art::ObjPtr<art::mirror::Class>::DownCast(self->DecodeJObject(*it))) {
+ self->GetJniEnv()->DeleteGlobalRef(*it);
+ temp_classes.erase(it);
+ requires_fixup = true;
+ break;
+ }
}
}
+ if (requires_fixup) {
+ FixupTempClass(self, temp_klass, klass);
+ }
}
- void FixupTempClass(art::Handle<art::mirror::Class> temp_klass ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Class> klass ATTRIBUTE_UNUSED)
+ void FixupTempClass(art::Thread* self,
+ art::Handle<art::mirror::Class> temp_klass,
+ art::Handle<art::mirror::Class> klass)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- // TODO: Implement.
+ // Suspend everything.
+ art::gc::Heap* heap = art::Runtime::Current()->GetHeap();
+ if (heap->IsGcConcurrentAndMoving()) {
+ // Need to take a heap dump while GC isn't running. See the
+ // comment in Heap::VisitObjects().
+ heap->IncrementDisableMovingGC(self);
+ }
+ {
+ art::ScopedThreadSuspension sts(self, art::kWaitingForVisitObjects);
+ art::ScopedSuspendAll ssa("FixupTempClass");
+
+ art::mirror::Class* input = temp_klass.Get();
+ art::mirror::Class* output = klass.Get();
+
+ FixupGlobalReferenceTables(input, output);
+ FixupLocalReferenceTables(self, input, output);
+ FixupHeap(input, output);
+ }
+ if (heap->IsGcConcurrentAndMoving()) {
+ heap->DecrementDisableMovingGC(self);
+ }
+ }
+
+ class RootUpdater : public art::RootVisitor {
+ public:
+ RootUpdater(const art::mirror::Class* input, art::mirror::Class* output)
+ : input_(input), output_(output) {}
+
+ void VisitRoots(art::mirror::Object*** roots,
+ size_t count,
+ const art::RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE {
+ for (size_t i = 0; i != count; ++i) {
+ if (*roots[i] == input_) {
+ *roots[i] = output_;
+ }
+ }
+ }
+
+ void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
+ size_t count,
+ const art::RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ for (size_t i = 0; i != count; ++i) {
+ if (roots[i]->AsMirrorPtr() == input_) {
+ roots[i]->Assign(output_);
+ }
+ }
+ }
+
+ private:
+ const art::mirror::Class* input_;
+ art::mirror::Class* output_;
+ };
+
+ void FixupGlobalReferenceTables(art::mirror::Class* input, art::mirror::Class* output)
+ REQUIRES(art::Locks::mutator_lock_) {
+ art::JavaVMExt* java_vm = art::Runtime::Current()->GetJavaVM();
+
+ // Fix up the global table with a root visitor.
+ RootUpdater global_update(input, output);
+ java_vm->VisitRoots(&global_update);
+
+ class WeakGlobalUpdate : public art::IsMarkedVisitor {
+ public:
+ WeakGlobalUpdate(art::mirror::Class* root_input, art::mirror::Class* root_output)
+ : input_(root_input), output_(root_output) {}
+
+ art::mirror::Object* IsMarked(art::mirror::Object* obj) OVERRIDE {
+ if (obj == input_) {
+ return output_;
+ }
+ return obj;
+ }
+
+ private:
+ const art::mirror::Class* input_;
+ art::mirror::Class* output_;
+ };
+ WeakGlobalUpdate weak_global_update(input, output);
+ java_vm->SweepJniWeakGlobals(&weak_global_update);
+ }
+
+ void FixupLocalReferenceTables(art::Thread* self,
+ art::mirror::Class* input,
+ art::mirror::Class* output)
+ REQUIRES(art::Locks::mutator_lock_) {
+ class LocalUpdate {
+ public:
+ LocalUpdate(const art::mirror::Class* root_input, art::mirror::Class* root_output)
+ : input_(root_input), output_(root_output) {}
+
+ static void Callback(art::Thread* t, void* arg) REQUIRES(art::Locks::mutator_lock_) {
+ LocalUpdate* local = reinterpret_cast<LocalUpdate*>(arg);
+
+ // Fix up the local table with a root visitor.
+ RootUpdater local_update(local->input_, local->output_);
+ t->GetJniEnv()->locals.VisitRoots(
+ &local_update, art::RootInfo(art::kRootJNILocal, t->GetThreadId()));
+ }
+
+ private:
+ const art::mirror::Class* input_;
+ art::mirror::Class* output_;
+ };
+ LocalUpdate local_upd(input, output);
+ art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Runtime::Current()->GetThreadList()->ForEach(LocalUpdate::Callback, &local_upd);
+ }
+
+ void FixupHeap(art::mirror::Class* input, art::mirror::Class* output)
+ REQUIRES(art::Locks::mutator_lock_) {
+ class HeapFixupVisitor {
+ public:
+ HeapFixupVisitor(const art::mirror::Class* root_input, art::mirror::Class* root_output)
+ : input_(root_input), output_(root_output) {}
+
+ void operator()(art::mirror::Object* src,
+ art::MemberOffset field_offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::mirror::HeapReference<art::mirror::Object>* trg =
+ src->GetFieldObjectReferenceAddr(field_offset);
+ if (trg->AsMirrorPtr() == input_) {
+ DCHECK_NE(field_offset.Uint32Value(), 0u); // This shouldn't be the class field of
+ // an object.
+ trg->Assign(output_);
+ }
+ }
+
+ void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {
+ LOG(FATAL) << "Unreachable";
+ }
+
+ void VisitRootIfNonNull(
+ art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const {
+ LOG(FATAL) << "Unreachable";
+ }
+
+ static void AllObjectsCallback(art::mirror::Object* obj, void* arg)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ HeapFixupVisitor* hfv = reinterpret_cast<HeapFixupVisitor*>(arg);
+
+ // Visit references, not native roots.
+ obj->VisitReferences<false>(*hfv, art::VoidFunctor());
+ }
+
+ private:
+ const art::mirror::Class* input_;
+ art::mirror::Class* output_;
+ };
+ HeapFixupVisitor hfv(input, output);
+ art::Runtime::Current()->GetHeap()->VisitObjectsPaused(HeapFixupVisitor::AllObjectsCallback,
+ &hfv);
}
// A set of all the temp classes we have handed out. We have to fix up references to these.
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 7b2521d63a..fe3e52b0c1 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -303,11 +303,11 @@ class FollowReferencesHelper FINAL {
art::Thread* thread = FindThread(info);
if (thread != nullptr) {
- art::mirror::Object* thread_obj = thread->GetPeer();
+ art::mirror::Object* thread_obj;
if (thread->IsStillStarting()) {
thread_obj = nullptr;
} else {
- thread_obj = thread->GetPeer();
+ thread_obj = thread->GetPeerFromOtherThread();
}
if (thread_obj != nullptr) {
ref_info->jni_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
@@ -333,11 +333,11 @@ class FollowReferencesHelper FINAL {
art::Thread* thread = FindThread(info);
if (thread != nullptr) {
- art::mirror::Object* thread_obj = thread->GetPeer();
+ art::mirror::Object* thread_obj;
if (thread->IsStillStarting()) {
thread_obj = nullptr;
} else {
- thread_obj = thread->GetPeer();
+ thread_obj = thread->GetPeerFromOtherThread();
}
if (thread_obj != nullptr) {
ref_info->stack_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
diff --git a/runtime/openjdkjvmti/ti_monitor.cc b/runtime/openjdkjvmti/ti_monitor.cc
index b82768397b..645faea41b 100644
--- a/runtime/openjdkjvmti/ti_monitor.cc
+++ b/runtime/openjdkjvmti/ti_monitor.cc
@@ -54,7 +54,7 @@ class JvmtiMonitor {
JvmtiMonitor() : owner_(nullptr), count_(0) {
}
- static bool Destroy(art::Thread* self, JvmtiMonitor* monitor) {
+ static bool Destroy(art::Thread* self, JvmtiMonitor* monitor) NO_THREAD_SAFETY_ANALYSIS {
// Check whether this thread holds the monitor, or nobody does.
art::Thread* owner_thread = monitor->owner_.load(std::memory_order_relaxed);
if (owner_thread != nullptr && self != owner_thread) {
@@ -71,7 +71,7 @@ class JvmtiMonitor {
return true;
}
- void MonitorEnter(art::Thread* self) {
+ void MonitorEnter(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
// Check for recursive enter.
if (IsOwner(self)) {
count_++;
@@ -86,7 +86,7 @@ class JvmtiMonitor {
count_ = 1;
}
- bool MonitorExit(art::Thread* self) {
+ bool MonitorExit(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
if (!IsOwner(self)) {
return false;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index eb4c2f9f21..843fd8c8e4 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -38,17 +38,22 @@
#include "art_jvmti.h"
#include "base/array_slice.h"
#include "base/logging.h"
+#include "debugger.h"
#include "dex_file.h"
#include "dex_file_types.h"
#include "events-inl.h"
#include "gc/allocation_listener.h"
#include "gc/heap.h"
#include "instrumentation.h"
+#include "jdwp/jdwp.h"
+#include "jdwp/jdwp_constants.h"
+#include "jdwp/jdwp_event.h"
+#include "jdwp/object_registry.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jni_env_ext-inl.h"
#include "jvmti_allocator.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
#include "object_lock.h"
@@ -56,6 +61,8 @@
#include "ScopedLocalRef.h"
#include "ti_class_loader.h"
#include "transform.h"
+#include "verifier/method_verifier.h"
+#include "verifier/verifier_log_mode.h"
namespace openjdkjvmti {
@@ -121,6 +128,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
new_obsolete_method->CopyFrom(old_method, ptr_size);
DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
new_obsolete_method->SetIsObsolete();
+ new_obsolete_method->SetDontCompile();
obsolete_maps_->insert({old_method, new_obsolete_method});
// Update JIT Data structures to point to the new method.
art::jit::Jit* jit = art::Runtime::Current()->GetJit();
@@ -378,7 +386,7 @@ art::mirror::ClassLoader* Redefiner::ClassRedefinition::GetClassLoader() {
art::mirror::DexCache* Redefiner::ClassRedefinition::CreateNewDexCache(
art::Handle<art::mirror::ClassLoader> loader) {
- return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
+ return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get()).Ptr();
}
void Redefiner::RecordFailure(jvmtiError result,
@@ -490,6 +498,143 @@ void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
}
}
+// Try and get the declared method. First try to get a virtual method then a direct method if that's
+// not found.
+static art::ArtMethod* FindMethod(art::Handle<art::mirror::Class> klass,
+ const char* name,
+ art::Signature sig) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = klass->FindDeclaredVirtualMethod(name, sig, art::kRuntimePointerSize);
+ if (m == nullptr) {
+ m = klass->FindDeclaredDirectMethod(name, sig, art::kRuntimePointerSize);
+ }
+ return m;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameMethods() {
+ art::StackHandleScope<1> hs(driver_->self_);
+ art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+
+ art::ClassDataItemIterator new_iter(*dex_file_,
+ dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+
+ // Make sure we have the same number of methods.
+ uint32_t num_new_method = new_iter.NumVirtualMethods() + new_iter.NumDirectMethods();
+ uint32_t num_old_method = h_klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size();
+ if (num_new_method != num_old_method) {
+ bool bigger = num_new_method > num_old_method;
+ RecordFailure(bigger ? ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+ : ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+ StringPrintf("Total number of declared methods changed from %d to %d",
+ num_old_method, num_new_method));
+ return false;
+ }
+
+ // Skip all of the fields. We should have already checked this.
+ while (new_iter.HasNextStaticField() || new_iter.HasNextInstanceField()) {
+ new_iter.Next();
+ }
+ // Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
+ // files have the same number of methods, which means there must be an equal amount of additions
+ // and removals.
+ for (; new_iter.HasNextVirtualMethod() || new_iter.HasNextDirectMethod(); new_iter.Next()) {
+ // Get the data on the method we are searching for
+ const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(new_iter.GetMemberIndex());
+ const char* new_method_name = dex_file_->GetMethodName(new_method_id);
+ art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
+ art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
+ // If we got past the check for the same number of methods above that means there must be at
+ // least one added and one removed method. We will return the ADDED failure message since it is
+ // easier to get a useful error report for it.
+ if (old_method == nullptr) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+ StringPrintf("Unknown method '%s' (sig: %s) was added!",
+ new_method_name,
+ new_method_signature.ToString().c_str()));
+ return false;
+ }
+ // Since direct methods have different flags than virtual ones (specifically direct methods must
+ // have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
+ // virtual to direct.
+ uint32_t new_flags = new_iter.GetMethodAccessFlags();
+ if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
+ StringPrintf("method '%s' (sig: %s) had different access flags",
+ new_method_name,
+ new_method_signature.ToString().c_str()));
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameFields() {
+ art::StackHandleScope<1> hs(driver_->self_);
+ art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+ art::ClassDataItemIterator new_iter(*dex_file_,
+ dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+ const art::DexFile& old_dex_file = h_klass->GetDexFile();
+ art::ClassDataItemIterator old_iter(old_dex_file,
+ old_dex_file.GetClassData(*h_klass->GetClassDef()));
+ // Instance and static fields can be differentiated by their flags so no need to check them
+ // separately.
+ while (new_iter.HasNextInstanceField() || new_iter.HasNextStaticField()) {
+ // Get the data on the method we are searching for
+ const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_iter.GetMemberIndex());
+ const char* new_field_name = dex_file_->GetFieldName(new_field_id);
+ const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
+
+ if (!(old_iter.HasNextInstanceField() || old_iter.HasNextStaticField())) {
+ // We are missing the old version of this method!
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Unknown field '%s' (type: %s) added!",
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter.GetMemberIndex());
+ const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
+ const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
+
+ // Check name and type.
+ if (strcmp(old_field_name, new_field_name) != 0 ||
+ strcmp(old_field_type, new_field_type) != 0) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Field changed from '%s' (sig: %s) to '%s' (sig: %s)!",
+ old_field_name,
+ old_field_type,
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ // Since static fields have different flags than instance ones (specifically static fields must
+ // have the kAccStatic flag) we can tell if a field changes from static to instance.
+ if (new_iter.GetFieldAccessFlags() != old_iter.GetFieldAccessFlags()) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Field '%s' (sig: %s) had different access flags",
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ new_iter.Next();
+ old_iter.Next();
+ }
+ if (old_iter.HasNextInstanceField() || old_iter.HasNextStaticField()) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("field '%s' (sig: %s) is missing!",
+ old_dex_file.GetFieldName(old_dex_file.GetFieldId(
+ old_iter.GetMemberIndex())),
+ old_dex_file.GetFieldTypeDescriptor(old_dex_file.GetFieldId(
+ old_iter.GetMemberIndex()))));
+ return false;
+ }
+ return true;
+}
+
bool Redefiner::ClassRedefinition::CheckClass() {
// TODO Might just want to put it in a ObjPtr and NoSuspend assert.
art::StackHandleScope<1> hs(driver_->self_);
@@ -565,7 +710,6 @@ bool Redefiner::ClassRedefinition::CheckClass() {
}
}
LOG(WARNING) << "No verification is done on annotations of redefined classes.";
- LOG(WARNING) << "Bytecodes of redefinitions are not verified.";
return true;
}
@@ -628,26 +772,28 @@ class RedefinitionDataHolder {
}
// TODO Maybe make an iterable view type to simplify using this.
- art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index)
+ art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
}
- art::mirror::Object* GetJavaDexFile(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::mirror::Object* GetJavaDexFile(jint klass_index) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
return GetSlot(klass_index, kSlotJavaDexFile);
}
- art::mirror::LongArray* GetNewDexFileCookie(jint klass_index)
+ art::mirror::LongArray* GetNewDexFileCookie(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::LongArray*>(GetSlot(klass_index, kSlotNewDexFileCookie));
}
- art::mirror::DexCache* GetNewDexCache(jint klass_index)
+ art::mirror::DexCache* GetNewDexCache(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::DexCache*>(GetSlot(klass_index, kSlotNewDexCache));
}
- art::mirror::Class* GetMirrorClass(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::mirror::Class* GetMirrorClass(jint klass_index) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
}
- art::mirror::ByteArray* GetOriginalDexFileBytes(jint klass_index)
+ art::mirror::ByteArray* GetOriginalDexFileBytes(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::ByteArray*>(GetSlot(klass_index, kSlotOrigDexFile));
}
@@ -677,15 +823,15 @@ class RedefinitionDataHolder {
SetSlot(klass_index, kSlotOrigDexFile, bytes);
}
- int32_t Length() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ int32_t Length() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_->GetLength() / kNumSlots;
}
private:
- art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+ mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
art::mirror::Object* GetSlot(jint klass_index,
- DataSlot slot) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_LT(klass_index, Length());
return arr_->Get((kNumSlots * klass_index) + slot);
}
@@ -701,6 +847,31 @@ class RedefinitionDataHolder {
DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
};
+// TODO Stash and update soft failure state
+bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
+ const RedefinitionDataHolder& holder) {
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+ art::StackHandleScope<2> hs(driver_->self_);
+ std::string error;
+ // TODO Make verification log level lower
+ art::verifier::MethodVerifier::FailureKind failure =
+ art::verifier::MethodVerifier::VerifyClass(driver_->self_,
+ dex_file_.get(),
+ hs.NewHandle(holder.GetNewDexCache(klass_index)),
+ hs.NewHandle(GetClassLoader()),
+ dex_file_->GetClassDef(0), /*class_def*/
+ nullptr, /*compiler_callbacks*/
+ false, /*allow_soft_failures*/
+ /*log_level*/
+ art::verifier::HardFailLogMode::kLogWarning,
+ &error);
+ bool passes = failure == art::verifier::MethodVerifier::kNoFailure;
+ if (!passes) {
+ RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
+ }
+ return passes;
+}
+
// Looks through the previously allocated cookies to see if we need to update them with another new
// dexfile. This is so that even if multiple classes with the same classloader are redefined at
// once they are all added to the classloader.
@@ -783,7 +954,7 @@ bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
}
holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
if (holder->GetNewDexCache(klass_index) == nullptr) {
- driver_->self_->AssertPendingOOMException();
+ driver_->self_->AssertPendingException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
return false;
@@ -800,6 +971,23 @@ bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
return true;
}
+void Redefiner::ClassRedefinition::UnregisterBreakpoints() {
+ DCHECK(art::Dbg::IsDebuggerActive());
+ art::JDWP::JdwpState* state = art::Dbg::GetJdwpState();
+ if (state != nullptr) {
+ state->UnregisterLocationEventsOnClass(GetMirrorClass());
+ }
+}
+
+void Redefiner::UnregisterAllBreakpoints() {
+ if (LIKELY(!art::Dbg::IsDebuggerActive())) {
+ return;
+ }
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ redef.UnregisterBreakpoints();
+ }
+}
+
bool Redefiner::CheckAllRedefinitionAreValid() {
for (Redefiner::ClassRedefinition& redef : redefinitions_) {
if (!redef.CheckRedefinitionIsValid()) {
@@ -840,6 +1028,17 @@ void Redefiner::ReleaseAllDexFiles() {
}
}
+bool Redefiner::CheckAllClassesAreVerified(const RedefinitionDataHolder& holder) {
+ int32_t cnt = 0;
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (!redef.CheckVerification(cnt, holder)) {
+ return false;
+ }
+ cnt++;
+ }
+ return true;
+}
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<1> hs(self_);
// Allocate an array to hold onto all java temporary objects associated with this redefinition.
@@ -859,13 +1058,15 @@ jvmtiError Redefiner::Run() {
// try loop.
if (!CheckAllRedefinitionAreValid() ||
!EnsureAllClassAllocationsFinished() ||
- !FinishAllRemainingAllocations(holder)) {
+ !FinishAllRemainingAllocations(holder) ||
+ !CheckAllClassesAreVerified(holder)) {
// TODO Null out the ClassExt fields we allocated (if possible, might be racing with another
// redefineclass call which made it even bigger. Leak shouldn't be huge (2x array of size
// declared_methods_.length) but would be good to get rid of. All other allocations should be
// cleaned up by the GC eventually.
return result_;
}
+ // At this point we can no longer fail without corrupting the runtime state.
int32_t counter = 0;
for (Redefiner::ClassRedefinition& redef : redefinitions_) {
if (holder.GetSourceClassLoader(counter) == nullptr) {
@@ -873,6 +1074,7 @@ jvmtiError Redefiner::Run() {
}
counter++;
}
+ UnregisterAllBreakpoints();
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
art::gc::Heap* heap = runtime_->GetHeap();
@@ -905,9 +1107,7 @@ jvmtiError Redefiner::Run() {
holder.GetOriginalDexFileBytes(counter));
counter++;
}
- // TODO Verify the new Class.
// TODO Shrink the obsolete method maps if possible?
- // TODO find appropriate class loader.
// TODO Put this into a scoped thing.
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5aa7dde55c..c441377b10 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -165,6 +165,11 @@ class Redefiner {
// data has not been modified in an incompatible manner.
bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ // Checks that the contained class can be successfully verified.
+ bool CheckVerification(int32_t klass_index,
+ const RedefinitionDataHolder& holder)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
// Preallocates all needed allocations in klass so that we can pause execution safely.
// TODO We should be able to free the arrays if they end up not being used. Investigate doing
// this in the future. For now we will just take the memory hit.
@@ -177,17 +182,11 @@ class Redefiner {
// Checks that the class can even be redefined.
bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Checks that the dex file does not add/remove methods.
- bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "methods are not checked for modification currently";
- return true;
- }
+ // Checks that the dex file does not add/remove methods, or change their modifiers or types.
+ bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Checks that the dex file does not modify fields
- bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "Fields are not checked for modification currently";
- return true;
- }
+ // Checks that the dex file does not modify fields types or modifiers.
+ bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_);
void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
art::ObjPtr<art::mirror::LongArray> new_cookie)
@@ -208,6 +207,8 @@ class Redefiner {
void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void UnregisterBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
Redefiner* driver_;
jclass klass_;
@@ -245,10 +246,13 @@ class Redefiner {
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool CheckAllClassesAreVerified(const RedefinitionDataHolder& holder)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void UnregisterAllBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
void RecordFailure(jvmtiError result, const std::string& error_msg) {
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 4cf55a6a98..b5a6c6e1ee 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -377,7 +377,8 @@ jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
jvmtiStackInfo& new_stack_info = stack_info[i];
- jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+ jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
+ threads[i]->GetPeerFromOtherThread());
new_stack_info.thread = thread_peer;
if (old_stack_info.frame_count > 0) {
@@ -453,7 +454,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
}
// Get the peer, and check whether we know it.
- art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
for (size_t index = 0; index != handles.size(); ++index) {
if (peer == handles[index].Get()) {
// Found the thread.
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index b18a5cd746..f8f8fa6b25 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -78,7 +78,9 @@ struct ThreadCallback : public art::ThreadLifecycleCallback, public art::Runtime
if (art::kIsDebugBuild) {
std::string name;
self->GetThreadName(name);
- if (name != "Signal Catcher" && !android::base::StartsWith(name, "Jit thread pool")) {
+ if (name != "JDWP" &&
+ name != "Signal Catcher" &&
+ !android::base::StartsWith(name, "Jit thread pool")) {
LOG(FATAL) << "Unexpected thread before start: " << name;
}
}
@@ -200,7 +202,7 @@ jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadI
info_ptr->is_daemon = self->IsDaemon();
- art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = self->GetPeerFromOtherThread();
// ThreadGroup.
if (peer != nullptr) {
@@ -458,7 +460,7 @@ jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
continue;
}
- art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
if (peer != nullptr) {
peers.push_back(peer);
}
diff --git a/runtime/openjdkjvmti/ti_threadgroup.cc b/runtime/openjdkjvmti/ti_threadgroup.cc
index 35b1bfd920..e63ce6576a 100644
--- a/runtime/openjdkjvmti/ti_threadgroup.cc
+++ b/runtime/openjdkjvmti/ti_threadgroup.cc
@@ -174,7 +174,7 @@ static void GetThreads(art::Handle<art::mirror::Object> thread_group,
if (t->IsStillStarting()) {
continue;
}
- art::ObjPtr<art::mirror::Object> peer = t->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = t->GetPeerFromOtherThread();
if (peer == nullptr) {
continue;
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index bf995095de..72e0500457 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -441,7 +441,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
value = *reinterpret_cast<const uint32_t*>(addr);
uint32_t bit = (offset >> 2);
- if (bit < encoding.stack_mask_size_in_bits && stack_mask.LoadBit(bit)) {
+ if (bit < encoding.stack_mask.encoding.BitSize() && stack_mask.LoadBit(bit)) {
is_reference = true;
}
break;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 693b8f4e2f..9609bee022 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -672,24 +672,6 @@ bool Runtime::Start() {
started_ = true;
- // Create the JIT either if we have to use JIT compilation or save profiling info.
- // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
- // recoding profiles. Maybe we should consider changing the name to be more clear it's
- // not only about compiling. b/28295073.
- if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
- std::string error_msg;
- if (!IsZygote()) {
- // If we are the zygote then we need to wait until after forking to create the code cache
- // due to SELinux restrictions on r/w/x memory regions.
- CreateJit();
- } else if (jit_options_->UseJitCompilation()) {
- if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
- // Try to load compiler pre zygote to reduce PSS. b/27744947
- LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
- }
- }
- }
-
if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<2> hs(soa.Self());
@@ -714,6 +696,27 @@ bool Runtime::Start() {
Thread::FinishStartup();
+ // Create the JIT either if we have to use JIT compilation or save profiling info. This is
+ // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
+ // ThreadGroup to exist.
+ //
+ // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
+ // recoding profiles. Maybe we should consider changing the name to be more clear it's
+ // not only about compiling. b/28295073.
+ if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
+ std::string error_msg;
+ if (!IsZygote()) {
+ // If we are the zygote then we need to wait until after forking to create the code cache
+ // due to SELinux restrictions on r/w/x memory regions.
+ CreateJit();
+ } else if (jit_options_->UseJitCompilation()) {
+ if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
+ // Try to load compiler pre zygote to reduce PSS. b/27744947
+ LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+ }
+ }
+ }
+
// Send the start phase event. We have to wait till here as this is when the main thread peer
// has just been generated, important root clinits have been run and JNI is completely functional.
{
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index d4469f4357..000da59bd2 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -110,6 +110,10 @@ inline ScopedObjectAccessUnchecked::ScopedObjectAccessUnchecked(Thread* self)
Locks::mutator_lock_->AssertSharedHeld(Self());
}
+inline ScopedObjectAccess::ScopedObjectAccess(JNIEnv* env) : ScopedObjectAccessUnchecked(env) {}
+inline ScopedObjectAccess::ScopedObjectAccess(Thread* self) : ScopedObjectAccessUnchecked(self) {}
+inline ScopedObjectAccess::~ScopedObjectAccess() {}
+
inline ScopedThreadSuspension::ScopedThreadSuspension(Thread* self, ThreadState suspended_state)
: self_(self), suspended_state_(suspended_state) {
DCHECK(self_ != nullptr);
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b4992586ce..24199f76b6 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -159,16 +159,14 @@ class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
public:
ALWAYS_INLINE explicit ScopedObjectAccess(JNIEnv* env)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : ScopedObjectAccessUnchecked(env) {}
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
ALWAYS_INLINE explicit ScopedObjectAccess(Thread* self)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : ScopedObjectAccessUnchecked(self) {}
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
// Base class will release share of lock. Invoked after this destructor.
- ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {}
+ ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE;
private:
// TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5ad00a4e55..d7ba1d75d8 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -37,7 +37,7 @@
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
@@ -96,13 +96,17 @@ bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_fr
return false;
}
-StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- : StackVisitor(thread, context, walk_kind, 0) {}
+StackVisitor::StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended)
+ : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
StackVisitor::StackVisitor(Thread* thread,
Context* context,
StackWalkKind walk_kind,
- size_t num_frames)
+ size_t num_frames,
+ bool check_suspended)
: thread_(thread),
walk_kind_(walk_kind),
cur_shadow_frame_(nullptr),
@@ -112,8 +116,11 @@ StackVisitor::StackVisitor(Thread* thread,
num_frames_(num_frames),
cur_depth_(0),
current_inlining_depth_(0),
- context_(context) {
- DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+ context_(context),
+ check_suspended_(check_suspended) {
+ if (check_suspended_) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+ }
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
@@ -138,7 +145,7 @@ ArtMethod* StackVisitor::GetMethod() const {
DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
return GetResolvedMethod(*GetCurrentQuickFrame(),
inline_info,
- encoding.inline_info_encoding,
+ encoding.inline_info.encoding,
depth_in_stack_map);
} else {
return *cur_quick_frame_;
@@ -155,7 +162,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
- return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info_encoding,
+ return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info.encoding,
depth_in_stack_map);
} else if (cur_oat_quick_method_header_ == nullptr) {
return DexFile::kDexNoIndex;
@@ -788,7 +795,9 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
template <StackVisitor::CountTransitions kCount>
void StackVisitor::WalkStack(bool include_transitions) {
- DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+ if (check_suspended_) {
+ DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+ }
CHECK_EQ(cur_depth_, 0U);
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
@@ -817,10 +826,10 @@ void StackVisitor::WalkStack(bool include_transitions) {
uint32_t native_pc_offset =
cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+ if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
DCHECK_EQ(current_inlining_depth_, 0u);
- for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info_encoding);
+ for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding);
current_inlining_depth_ != 0;
--current_inlining_depth_) {
bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index 9dceb2931d..90a0aee353 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -590,7 +590,10 @@ class StackVisitor {
};
protected:
- StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind);
+ StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -797,7 +800,11 @@ class StackVisitor {
private:
// Private constructor known in the case that num_frames_ has already been computed.
- StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
+ StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ size_t num_frames,
+ bool check_suspended = true)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
@@ -851,6 +858,7 @@ class StackVisitor {
protected:
Context* const context_;
+ const bool check_suspended_;
};
} // namespace art
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 4e7c3f4f9f..d657311ae9 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -126,9 +126,9 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
<< ", number_of_stack_maps=" << number_of_stack_maps
<< ")\n";
ScopedIndentation indent1(vios);
- encoding.stack_map_encoding.Dump(vios);
+ encoding.stack_map.encoding.Dump(vios);
if (HasInlineInfo(encoding)) {
- encoding.inline_info_encoding.Dump(vios);
+ encoding.inline_info.encoding.Dump(vios);
}
// Display the Dex register location catalog.
GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
@@ -193,22 +193,22 @@ void StackMap::Dump(VariableIndentationOutputStream* vios,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
const std::string& header_suffix) const {
- StackMapEncoding stack_map_encoding = encoding.stack_map_encoding;
+ StackMapEncoding stack_map_encoding = encoding.stack_map.encoding;
const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
vios->Stream()
<< "StackMap" << header_suffix
<< std::hex
<< " [native_pc=0x" << code_offset + pc_offset << "]"
- << " [entry_size=0x" << encoding.stack_map_encoding.BitSize() << " bits]"
+ << " [entry_size=0x" << encoding.stack_map.encoding.BitSize() << " bits]"
<< " (dex_pc=0x" << GetDexPc(stack_map_encoding)
<< ", native_pc_offset=0x" << pc_offset
<< ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
- << ", inline_info_offset=0x" << GetInlineDescriptorOffset(stack_map_encoding)
+ << ", inline_info_offset=0x" << GetInlineInfoIndex(stack_map_encoding)
<< ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
<< std::dec
<< ", stack_mask=0b";
BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
- for (size_t i = 0, e = encoding.stack_mask_size_in_bits; i < e; ++i) {
+ for (size_t i = 0, e = encoding.stack_mask.encoding.BitSize(); i < e; ++i) {
vios->Stream() << stack_mask.LoadBit(e - i - 1);
}
vios->Stream() << ")\n";
@@ -229,7 +229,7 @@ void StackMap::Dump(VariableIndentationOutputStream* vios,
void InlineInfo::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers[]) const {
- InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info_encoding;
+ InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
vios->Stream() << "InlineInfo with depth "
<< static_cast<uint32_t>(GetDepth(inline_info_encoding))
<< "\n";
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 062404dbf2..61d6a5847c 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -693,7 +693,7 @@ class StackMapEncoding {
size_t SetFromSizes(size_t native_pc_max,
size_t dex_pc_max,
size_t dex_register_map_size,
- size_t inline_info_size,
+ size_t number_of_inline_info,
size_t number_of_register_masks,
size_t number_of_stack_masks) {
total_bit_size_ = 0;
@@ -712,9 +712,7 @@ class StackMapEncoding {
// greater than the offset we might try to encode, we already implicitly have it.
// If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
inline_info_bit_offset_ = total_bit_size_;
- if (inline_info_size != 0) {
- total_bit_size_ += MinimumBitsToStore(dex_register_map_size + inline_info_size);
- }
+ total_bit_size_ += MinimumBitsToStore(number_of_inline_info);
register_mask_index_bit_offset_ = total_bit_size_;
total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
@@ -749,6 +747,18 @@ class StackMapEncoding {
return total_bit_size_;
}
+ template<typename Vector>
+ void Encode(Vector* dest) const {
+ static_assert(alignof(StackMapEncoding) == 1, "Should not require alignment");
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
+ dest->insert(dest->end(), ptr, ptr + sizeof(*this));
+ }
+
+ void Decode(const uint8_t** ptr) {
+ *this = *reinterpret_cast<const StackMapEncoding*>(*ptr);
+ *ptr += sizeof(*this);
+ }
+
void Dump(VariableIndentationOutputStream* vios) const;
private:
@@ -771,7 +781,7 @@ class StackMapEncoding {
*
* The information is of the form:
*
- * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask_index,
+ * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_index, register_mask_index,
* stack_mask_index].
*/
class StackMap {
@@ -809,12 +819,12 @@ class StackMap {
encoding.GetDexRegisterMapEncoding().Store(region_, offset);
}
- ALWAYS_INLINE uint32_t GetInlineDescriptorOffset(const StackMapEncoding& encoding) const {
+ ALWAYS_INLINE uint32_t GetInlineInfoIndex(const StackMapEncoding& encoding) const {
return encoding.GetInlineInfoEncoding().Load(region_);
}
- ALWAYS_INLINE void SetInlineDescriptorOffset(const StackMapEncoding& encoding, uint32_t offset) {
- encoding.GetInlineInfoEncoding().Store(region_, offset);
+ ALWAYS_INLINE void SetInlineInfoIndex(const StackMapEncoding& encoding, uint32_t index) {
+ encoding.GetInlineInfoEncoding().Store(region_, index);
}
ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
@@ -838,7 +848,7 @@ class StackMap {
}
ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
- return GetInlineDescriptorOffset(encoding) != kNoInlineInfo;
+ return GetInlineInfoIndex(encoding) != kNoInlineInfo;
}
ALWAYS_INLINE bool Equals(const StackMap& other) const {
@@ -908,12 +918,24 @@ class InlineInfoEncoding {
ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
}
- ALWAYS_INLINE size_t GetEntrySize() const {
- return RoundUp(total_bit_size_, kBitsPerByte) / kBitsPerByte;
+ ALWAYS_INLINE size_t BitSize() const {
+ return total_bit_size_;
}
void Dump(VariableIndentationOutputStream* vios) const;
+ template<typename Vector>
+ void Encode(Vector* dest) const {
+ static_assert(alignof(InlineInfoEncoding) == 1, "Should not require alignment");
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
+ dest->insert(dest->end(), ptr, ptr + sizeof(*this));
+ }
+
+ void Decode(const uint8_t** ptr) {
+ *this = *reinterpret_cast<const InlineInfoEncoding*>(*ptr);
+ *ptr += sizeof(*this);
+ }
+
private:
static constexpr uint8_t kIsLastBitOffset = 0;
static constexpr uint8_t kMethodIndexBitOffset = 1;
@@ -934,8 +956,7 @@ class InlineInfoEncoding {
*/
class InlineInfo {
public:
- explicit InlineInfo(MemoryRegion region) : region_(region) {
- }
+ explicit InlineInfo(BitMemoryRegion region) : region_(region) {}
ALWAYS_INLINE uint32_t GetDepth(const InlineInfoEncoding& encoding) const {
size_t depth = 0;
@@ -1018,83 +1039,189 @@ class InlineInfo {
uint16_t* number_of_dex_registers) const;
private:
- ALWAYS_INLINE MemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- size_t entry_size = encoding.GetEntrySize();
+ ALWAYS_INLINE BitMemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
+ uint32_t depth) const {
+ size_t entry_size = encoding.BitSize();
DCHECK_GT(entry_size, 0u);
return region_.Subregion(depth * entry_size, entry_size);
}
- MemoryRegion region_;
+ BitMemoryRegion region_;
+};
+
+// Bit sized region encoding, may be more than 255 bits.
+class BitRegionEncoding {
+ public:
+ uint32_t num_bits = 0;
+
+ ALWAYS_INLINE size_t BitSize() const {
+ return num_bits;
+ }
+
+ template<typename Vector>
+ void Encode(Vector* dest) const {
+ EncodeUnsignedLeb128(dest, num_bits); // Use leb in case num_bits is greater than 255.
+ }
+
+ void Decode(const uint8_t** ptr) {
+ num_bits = DecodeUnsignedLeb128(ptr);
+ }
+};
+
+// A table of bit sized encodings.
+template <typename Encoding>
+struct BitEncodingTable {
+ static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
+ // How the encoding is laid out (serialized).
+ Encoding encoding;
+
+ // Number of entries in the table (serialized).
+ size_t num_entries;
+
+ // Bit offset for the base of the table (computed).
+ size_t bit_offset = kInvalidOffset;
+
+ template<typename Vector>
+ void Encode(Vector* dest) const {
+ EncodeUnsignedLeb128(dest, num_entries);
+ encoding.Encode(dest);
+ }
+
+ ALWAYS_INLINE void Decode(const uint8_t** ptr) {
+ num_entries = DecodeUnsignedLeb128(ptr);
+ encoding.Decode(ptr);
+ }
+
+ // Set the bit offset in the table and adds the space used by the table to offset.
+ void UpdateBitOffset(size_t* offset) {
+ DCHECK(offset != nullptr);
+ bit_offset = *offset;
+ *offset += encoding.BitSize() * num_entries;
+ }
+
+ // Return the bit region for the map at index i.
+ ALWAYS_INLINE BitMemoryRegion BitRegion(MemoryRegion region, size_t index) const {
+ DCHECK_NE(bit_offset, kInvalidOffset) << "Invalid table offset";
+ DCHECK_LT(index, num_entries);
+ const size_t map_size = encoding.BitSize();
+ return BitMemoryRegion(region, bit_offset + index * map_size, map_size);
+ }
+};
+
+// A byte sized table of possible variable sized encodings.
+struct ByteSizedTable {
+ static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
+
+ // Number of entries in the table (serialized).
+ size_t num_entries = 0;
+
+ // Number of bytes of the table (serialized).
+ size_t num_bytes;
+
+ // Bit offset for the base of the table (computed).
+ size_t byte_offset = kInvalidOffset;
+
+ template<typename Vector>
+ void Encode(Vector* dest) const {
+ EncodeUnsignedLeb128(dest, num_entries);
+ EncodeUnsignedLeb128(dest, num_bytes);
+ }
+
+ ALWAYS_INLINE void Decode(const uint8_t** ptr) {
+ num_entries = DecodeUnsignedLeb128(ptr);
+ num_bytes = DecodeUnsignedLeb128(ptr);
+ }
+
+ // Set the bit offset of the table. Adds the total bit size of the table to offset.
+ void UpdateBitOffset(size_t* offset) {
+ DCHECK(offset != nullptr);
+ DCHECK_ALIGNED(*offset, kBitsPerByte);
+ byte_offset = *offset / kBitsPerByte;
+ *offset += num_bytes * kBitsPerByte;
+ }
};
// Most of the fields are encoded as ULEB128 to save space.
struct CodeInfoEncoding {
- uint32_t non_header_size;
- uint32_t number_of_stack_maps;
- uint32_t number_of_stack_masks;
- uint32_t number_of_register_masks;
- uint32_t stack_mask_size_in_bits;
- uint32_t register_mask_size_in_bits;
- uint32_t number_of_location_catalog_entries;
- StackMapEncoding stack_map_encoding;
- InlineInfoEncoding inline_info_encoding;
- uint8_t header_size;
-
- CodeInfoEncoding() { }
+ static constexpr uint32_t kInvalidSize = static_cast<size_t>(-1);
+ // Byte sized tables go first to avoid unnecessary alignment bits.
+ ByteSizedTable dex_register_map;
+ ByteSizedTable location_catalog;
+ BitEncodingTable<StackMapEncoding> stack_map;
+ BitEncodingTable<BitRegionEncoding> register_mask;
+ BitEncodingTable<BitRegionEncoding> stack_mask;
+ BitEncodingTable<InlineInfoEncoding> inline_info;
+
+ CodeInfoEncoding() {}
explicit CodeInfoEncoding(const void* data) {
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
- non_header_size = DecodeUnsignedLeb128(&ptr);
- number_of_stack_maps = DecodeUnsignedLeb128(&ptr);
- number_of_stack_masks = DecodeUnsignedLeb128(&ptr);
- number_of_register_masks = DecodeUnsignedLeb128(&ptr);
- stack_mask_size_in_bits = DecodeUnsignedLeb128(&ptr);
- register_mask_size_in_bits = DecodeUnsignedLeb128(&ptr);
- number_of_location_catalog_entries = DecodeUnsignedLeb128(&ptr);
- static_assert(alignof(StackMapEncoding) == 1,
- "StackMapEncoding should not require alignment");
- stack_map_encoding = *reinterpret_cast<const StackMapEncoding*>(ptr);
- ptr += sizeof(StackMapEncoding);
- if (stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0) {
- static_assert(alignof(InlineInfoEncoding) == 1,
- "InlineInfoEncoding should not require alignment");
- inline_info_encoding = *reinterpret_cast<const InlineInfoEncoding*>(ptr);
- ptr += sizeof(InlineInfoEncoding);
+ dex_register_map.Decode(&ptr);
+ location_catalog.Decode(&ptr);
+ stack_map.Decode(&ptr);
+ register_mask.Decode(&ptr);
+ stack_mask.Decode(&ptr);
+ if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
+ inline_info.Decode(&ptr);
} else {
- inline_info_encoding = InlineInfoEncoding{}; // NOLINT.
+ inline_info = BitEncodingTable<InlineInfoEncoding>();
}
- header_size = dchecked_integral_cast<uint8_t>(ptr - reinterpret_cast<const uint8_t*>(data));
+ cache_header_size =
+ dchecked_integral_cast<uint32_t>(ptr - reinterpret_cast<const uint8_t*>(data));
+ ComputeTableOffsets();
}
template<typename Vector>
- void Compress(Vector* dest) const {
- EncodeUnsignedLeb128(dest, non_header_size);
- EncodeUnsignedLeb128(dest, number_of_stack_maps);
- EncodeUnsignedLeb128(dest, number_of_stack_masks);
- EncodeUnsignedLeb128(dest, number_of_register_masks);
- EncodeUnsignedLeb128(dest, stack_mask_size_in_bits);
- EncodeUnsignedLeb128(dest, register_mask_size_in_bits);
- EncodeUnsignedLeb128(dest, number_of_location_catalog_entries);
- const uint8_t* stack_map_ptr = reinterpret_cast<const uint8_t*>(&stack_map_encoding);
- dest->insert(dest->end(), stack_map_ptr, stack_map_ptr + sizeof(StackMapEncoding));
- if (stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0) {
- const uint8_t* inline_info_ptr = reinterpret_cast<const uint8_t*>(&inline_info_encoding);
- dest->insert(dest->end(), inline_info_ptr, inline_info_ptr + sizeof(InlineInfoEncoding));
+ void Compress(Vector* dest) {
+ dex_register_map.Encode(dest);
+ location_catalog.Encode(dest);
+ stack_map.Encode(dest);
+ register_mask.Encode(dest);
+ stack_mask.Encode(dest);
+ if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
+ inline_info.Encode(dest);
}
+ cache_header_size = dest->size();
+ }
+
+ ALWAYS_INLINE void ComputeTableOffsets() {
+ // Skip the header.
+ size_t bit_offset = HeaderSize() * kBitsPerByte;
+ // The byte tables must be aligned so they must go first.
+ dex_register_map.UpdateBitOffset(&bit_offset);
+ location_catalog.UpdateBitOffset(&bit_offset);
+ // Other tables don't require alignment.
+ stack_map.UpdateBitOffset(&bit_offset);
+ register_mask.UpdateBitOffset(&bit_offset);
+ stack_mask.UpdateBitOffset(&bit_offset);
+ inline_info.UpdateBitOffset(&bit_offset);
+ cache_non_header_size = RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte - HeaderSize();
+ }
+
+ ALWAYS_INLINE size_t HeaderSize() const {
+ DCHECK_NE(cache_header_size, kInvalidSize) << "Uninitialized";
+ return cache_header_size;
+ }
+
+ ALWAYS_INLINE size_t NonHeaderSize() const {
+ DCHECK_NE(cache_non_header_size, kInvalidSize) << "Uninitialized";
+ return cache_non_header_size;
}
+
+ private:
+ // Computed fields (not serialized).
+ // Header size in bytes.
+ uint32_t cache_header_size = kInvalidSize;
+ // Non header size in bytes.
+ uint32_t cache_non_header_size = kInvalidSize;
};
/**
* Wrapper around all compiler information collected for a method.
* The information is of the form:
*
- * [CodeInfoEncoding, StackMap+, DexRegisterLocationCatalog+, DexRegisterMap+, InlineInfo*]
- *
- * where CodeInfoEncoding is of the form:
- *
- * [non_header_size, number_of_stack_maps, stack_map_size_in_bits,
- * number_of_location_catalog_entries, StackMapEncoding]
+ * [CodeInfoEncoding, DexRegisterMap+, DexLocationCatalog+, StackMap+, RegisterMask+, StackMask+,
+ * DexRegisterMap+, InlineInfo*]
*/
class CodeInfo {
public:
@@ -1104,7 +1231,7 @@ class CodeInfo {
explicit CodeInfo(const void* data) {
CodeInfoEncoding encoding = CodeInfoEncoding(data);
region_ = MemoryRegion(const_cast<void*>(data),
- encoding.header_size + encoding.non_header_size);
+ encoding.HeaderSize() + encoding.NonHeaderSize());
}
CodeInfoEncoding ExtractEncoding() const {
@@ -1114,99 +1241,67 @@ class CodeInfo {
}
bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0;
+ return encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0;
}
DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
- return DexRegisterLocationCatalog(region_.Subregion(
- GetDexRegisterLocationCatalogOffset(encoding),
- GetDexRegisterLocationCatalogSize(encoding)));
+ return DexRegisterLocationCatalog(region_.Subregion(encoding.location_catalog.byte_offset,
+ encoding.location_catalog.num_bytes));
}
ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
- return encoding.stack_mask_size_in_bits;
+ return encoding.stack_mask.encoding.BitSize();
}
- ALWAYS_INLINE StackMap GetStackMapAt(size_t i, const CodeInfoEncoding& encoding) const {
- const size_t map_size = encoding.stack_map_encoding.BitSize();
- return StackMap(BitMemoryRegion(GetStackMaps(encoding), i * map_size, map_size));
+ ALWAYS_INLINE StackMap GetStackMapAt(size_t index, const CodeInfoEncoding& encoding) const {
+ return StackMap(encoding.stack_map.BitRegion(region_, index));
}
- BitMemoryRegion GetStackMask(const CodeInfoEncoding& encoding, size_t stack_mask_index) const {
- // All stack mask data is stored before register map data (which is at the very end).
- const size_t entry_size = GetNumberOfStackMaskBits(encoding);
- const size_t register_mask_bits =
- encoding.register_mask_size_in_bits * encoding.number_of_register_masks;
- return BitMemoryRegion(region_,
- region_.size_in_bits() - register_mask_bits -
- entry_size * (stack_mask_index + 1),
- entry_size);
+ BitMemoryRegion GetStackMask(size_t index, const CodeInfoEncoding& encoding) const {
+ return encoding.stack_mask.BitRegion(region_, index);
}
BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
const StackMap& stack_map) const {
- return GetStackMask(encoding, stack_map.GetStackMaskIndex(encoding.stack_map_encoding));
+ return GetStackMask(stack_map.GetStackMaskIndex(encoding.stack_map.encoding), encoding);
}
- BitMemoryRegion GetRegisterMask(const CodeInfoEncoding& encoding, size_t index) const {
- const size_t entry_size = encoding.register_mask_size_in_bits;
- return BitMemoryRegion(region_,
- region_.size_in_bits() - entry_size * (index + 1),
- entry_size);
+ BitMemoryRegion GetRegisterMask(size_t index, const CodeInfoEncoding& encoding) const {
+ return encoding.register_mask.BitRegion(region_, index);
}
uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
- size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map_encoding);
- return GetRegisterMask(encoding, index).LoadBits(0u, encoding.register_mask_size_in_bits);
+ size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map.encoding);
+ return GetRegisterMask(index, encoding).LoadBits(0u, encoding.register_mask.encoding.BitSize());
}
uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
- return encoding.number_of_location_catalog_entries;
+ return encoding.location_catalog.num_entries;
}
uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
- return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(encoding),
- GetNumberOfLocationCatalogEntries(encoding));
+ return encoding.location_catalog.num_bytes;
}
uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
- return encoding.number_of_stack_maps;
+ return encoding.stack_map.num_entries;
}
// Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map_encoding.BitSize() * GetNumberOfStackMaps(encoding);
- }
-
- // Get the size of all the stack maps of this CodeInfo object, in bytes.
- size_t GetStackMapsSize(const CodeInfoEncoding& encoding) const {
- return RoundUp(GetStackMapsSizeInBits(encoding), kBitsPerByte) / kBitsPerByte;
- }
-
- uint32_t GetDexRegisterLocationCatalogOffset(const CodeInfoEncoding& encoding) const {
- return GetStackMapsOffset(encoding) + GetStackMapsSize(encoding);
- }
-
- size_t GetDexRegisterMapsOffset(const CodeInfoEncoding& encoding) const {
- return GetDexRegisterLocationCatalogOffset(encoding)
- + GetDexRegisterLocationCatalogSize(encoding);
- }
-
- uint32_t GetStackMapsOffset(const CodeInfoEncoding& encoding) const {
- return encoding.header_size;
+ return encoding.stack_map.encoding.BitSize() * GetNumberOfStackMaps(encoding);
}
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
const CodeInfoEncoding& encoding,
- uint32_t number_of_dex_registers) const {
- if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
+ size_t number_of_dex_registers) const {
+ if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
return DexRegisterMap();
- } else {
- uint32_t offset = GetDexRegisterMapsOffset(encoding)
- + stack_map.GetDexRegisterMapOffset(encoding.stack_map_encoding);
- size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
}
+ const uint32_t offset = encoding.dex_register_map.byte_offset +
+ stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding);
+ size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
+ return DexRegisterMap(region_.Subregion(offset, size));
}
size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
@@ -1225,27 +1320,34 @@ class CodeInfo {
InlineInfo inline_info,
const CodeInfoEncoding& encoding,
uint32_t number_of_dex_registers) const {
- if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, depth)) {
+ if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, depth)) {
return DexRegisterMap();
} else {
- uint32_t offset = GetDexRegisterMapsOffset(encoding) +
- inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info_encoding, depth);
+ uint32_t offset = encoding.dex_register_map.byte_offset +
+ inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, depth);
size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
return DexRegisterMap(region_.Subregion(offset, size));
}
}
+ InlineInfo GetInlineInfo(size_t index, const CodeInfoEncoding& encoding) const {
+ // Since we do not know the depth, we just return the whole remaining map.
+ // TODO: Clean this up.
+ const size_t bit_offset = encoding.inline_info.bit_offset +
+ index * encoding.inline_info.encoding.BitSize();
+ return InlineInfo(BitMemoryRegion(region_, bit_offset, region_.size_in_bits() - bit_offset));
+ }
+
InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
- DCHECK(stack_map.HasInlineInfo(encoding.stack_map_encoding));
- uint32_t offset = stack_map.GetInlineDescriptorOffset(encoding.stack_map_encoding)
- + GetDexRegisterMapsOffset(encoding);
- return InlineInfo(region_.Subregion(offset, region_.size() - offset));
+ DCHECK(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+ uint32_t index = stack_map.GetInlineInfoIndex(encoding.stack_map.encoding);
+ return GetInlineInfo(index, encoding);
}
StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(encoding.stack_map_encoding) == dex_pc) {
+ if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
return stack_map;
}
}
@@ -1257,7 +1359,7 @@ class CodeInfo {
StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
StackMap stack_map = GetStackMapAt(i - 1, encoding);
- if (stack_map.GetDexPc(encoding.stack_map_encoding) == dex_pc) {
+ if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
return stack_map;
}
}
@@ -1272,7 +1374,7 @@ class CodeInfo {
}
// Walk over all stack maps. If two consecutive stack maps are identical, then we
// have found a stack map suitable for OSR.
- const StackMapEncoding& stack_map_encoding = encoding.stack_map_encoding;
+ const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
for (size_t i = 0; i < e - 1; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
@@ -1303,7 +1405,7 @@ class CodeInfo {
// we could do binary search.
for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA) ==
+ if (stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) ==
native_pc_offset) {
return stack_map;
}
@@ -1324,23 +1426,17 @@ class CodeInfo {
// Check that the code info has valid stack map and abort if it does not.
void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
- if (region_.size() != 0 && region_.size() < GetStackMapsSize(encoding)) {
+ if (region_.size() != 0 && region_.size_in_bits() < GetStackMapsSizeInBits(encoding)) {
LOG(FATAL) << region_.size() << "\n"
- << encoding.header_size << "\n"
- << encoding.non_header_size << "\n"
- << encoding.number_of_location_catalog_entries << "\n"
- << encoding.number_of_stack_maps << "\n"
- << encoding.stack_map_encoding.BitSize();
+ << encoding.HeaderSize() << "\n"
+ << encoding.NonHeaderSize() << "\n"
+ << encoding.location_catalog.num_entries << "\n"
+ << encoding.stack_map.num_entries << "\n"
+ << encoding.stack_map.encoding.BitSize();
}
}
private:
- ALWAYS_INLINE MemoryRegion GetStackMaps(const CodeInfoEncoding& encoding) const {
- return region_.size() == 0
- ? MemoryRegion()
- : region_.Subregion(GetStackMapsOffset(encoding), GetStackMapsSize(encoding));
- }
-
// Compute the size of the Dex register map associated to the stack map at
// `dex_register_map_offset_in_code_info`.
size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index c92305f373..8d946262e8 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -80,7 +80,34 @@ inline void Thread::CheckSuspend() {
}
}
-inline void Thread::CheckEmptyCheckpoint() {
+inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
+ Thread* self = Thread::Current();
+ DCHECK_EQ(self, this);
+ for (;;) {
+ if (ReadFlag(kEmptyCheckpointRequest)) {
+ RunEmptyCheckpoint();
+ // Check we hold only an expected mutex when accessing weak ref.
+ if (kIsDebugBuild) {
+ for (int i = kLockLevelCount - 1; i >= 0; --i) {
+ BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
+ if (held_mutex != nullptr &&
+ held_mutex != Locks::mutator_lock_ &&
+ held_mutex != cond_var_mutex) {
+ std::vector<BaseMutex*>& expected_mutexes = Locks::expected_mutexes_on_weak_ref_access_;
+ CHECK(std::find(expected_mutexes.begin(), expected_mutexes.end(), held_mutex) !=
+ expected_mutexes.end())
+ << "Holding unexpected mutex " << held_mutex->GetName()
+ << " when accessing weak ref";
+ }
+ }
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+inline void Thread::CheckEmptyCheckpointFromMutex() {
DCHECK_EQ(Thread::Current(), this);
for (;;) {
if (ReadFlag(kEmptyCheckpointRequest)) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d843de5e7f..7b6540436a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -65,6 +65,7 @@
#include "object_lock.h"
#include "quick_exception_handler.h"
#include "quick/quick_method_frame_info.h"
+#include "read_barrier-inl.h"
#include "reflection.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -77,7 +78,7 @@
#include "thread-inl.h"
#include "utils.h"
#include "verifier/method_verifier.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
#include "well_known_classes.h"
#include "interpreter/interpreter.h"
@@ -1583,15 +1584,24 @@ void Thread::DumpState(std::ostream& os) const {
}
struct StackDumpVisitor : public StackVisitor {
- StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
+ StackDumpVisitor(std::ostream& os_in,
+ Thread* thread_in,
+ Context* context,
+ bool can_allocate_in,
+ bool check_suspended = true,
+ bool dump_locks_in = true)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread_in,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
os(os_in),
can_allocate(can_allocate_in),
last_method(nullptr),
last_line_number(0),
repetition_count(0),
- frame_count(0) {}
+ frame_count(0),
+ dump_locks(dump_locks_in) {}
virtual ~StackDumpVisitor() {
if (frame_count == 0) {
@@ -1636,8 +1646,10 @@ struct StackDumpVisitor : public StackVisitor {
if (frame_count == 0) {
Monitor::DescribeWait(os, GetThread());
}
- if (can_allocate) {
+ if (can_allocate && dump_locks) {
// Visit locks, but do not abort on errors. This would trigger a nested abort.
+ // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
+ // RegTypeCache::RegTypeCache due to thread_list_lock.
Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}
@@ -1681,6 +1693,7 @@ struct StackDumpVisitor : public StackVisitor {
int last_line_number;
int repetition_count;
int frame_count;
+ const bool dump_locks;
};
static bool ShouldShowNativeStack(const Thread* thread)
@@ -1712,7 +1725,7 @@ static bool ShouldShowNativeStack(const Thread* thread)
return current_method != nullptr && current_method->IsNative();
}
-void Thread::DumpJavaStack(std::ostream& os) const {
+void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const {
// If flip_function is not null, it means we have run a checkpoint
// before the thread wakes up to execute the flip function and the
// thread roots haven't been forwarded. So the following access to
@@ -1741,7 +1754,7 @@ void Thread::DumpJavaStack(std::ostream& os) const {
std::unique_ptr<Context> context(Context::Create());
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
- !tls32_.throwing_OutOfMemoryError);
+ !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
dumper.WalkStack();
if (have_exception) {
@@ -1767,10 +1780,15 @@ void Thread::DumpStack(std::ostream& os,
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- ArtMethod* method = GetCurrentMethod(nullptr, !(dump_for_abort || force_dump_stack));
+ ArtMethod* method =
+ GetCurrentMethod(nullptr,
+ /*check_suspended*/ !force_dump_stack,
+ /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
- DumpJavaStack(os);
+ DumpJavaStack(os,
+ /*check_suspended*/ !force_dump_stack,
+ /*dump_locks*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
@@ -1845,6 +1863,7 @@ Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
interrupted_(false),
+ custom_tls_(nullptr),
can_call_into_java_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
@@ -2918,9 +2937,12 @@ Context* Thread::GetLongJumpContext() {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
+ CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
@@ -2944,8 +2966,13 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
const bool abort_on_error_;
};
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended,
+ bool abort_on_error) const {
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this),
+ nullptr,
+ check_suspended,
+ abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -3457,4 +3484,15 @@ bool Thread::IsAotCompiler() {
return Runtime::Current()->IsAotCompiler();
}
+mirror::Object* Thread::GetPeerFromOtherThread() const {
+ mirror::Object* peer = GetPeer();
+ if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
+ // mark/forward it here.
+ peer = art::ReadBarrier::Mark(peer);
+ }
+ return peer;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index b59eac68e9..a46e799d72 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -176,7 +176,8 @@ class Thread {
void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
// Process a pending empty checkpoint if pending.
- void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
+ void CheckEmptyCheckpointFromMutex();
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
ObjPtr<mirror::Object> thread_peer)
@@ -201,7 +202,9 @@ class Thread {
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpJavaStack(std::ostream& os) const
+ void DumpJavaStack(std::ostream& os,
+ bool check_suspended = true,
+ bool dump_locks = true) const
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -359,6 +362,10 @@ class Thread {
CHECK(tlsPtr_.jpeer == nullptr);
return tlsPtr_.opeer;
}
+ // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
+ // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
+ // This function will explicitly mark/forward it.
+ mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
bool HasPeer() const {
return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
@@ -411,7 +418,9 @@ class Thread {
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
- ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
+ ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended = true,
+ bool abort_on_error = true) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index df8acc37a2..caed36936a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -379,13 +379,15 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback
return count;
}
-size_t ThreadList::RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids) {
+void ThreadList::RunEmptyCheckpoint() {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
-
+ std::vector<uint32_t> runnable_thread_ids;
size_t count = 0;
+ Barrier* barrier = empty_checkpoint_barrier_.get();
+ barrier->Init(self, 0);
{
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
@@ -415,8 +417,72 @@ size_t ThreadList::RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids
// checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
-
- return count;
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ uint64_t total_wait_time = 0;
+ bool first_iter = true;
+ while (true) {
+ // Wake up the runnable threads blocked on the mutexes that another thread, which is blocked
+ // on a weak ref access, holds (indirectly blocking for weak ref access through another thread
+ // and a mutex.) This needs to be done periodically because the thread may be preempted
+ // between the CheckEmptyCheckpointFromMutex call and the subsequent futex wait in
+ // Mutex::ExclusiveLock, etc. when the wakeup via WakeupToRespondToEmptyCheckpoint
+ // arrives. This could cause a *very rare* deadlock, if not repeated. Most of the cases are
+ // handled in the first iteration.
+ for (BaseMutex* mutex : Locks::expected_mutexes_on_weak_ref_access_) {
+ mutex->WakeupToRespondToEmptyCheckpoint();
+ }
+ static constexpr uint64_t kEmptyCheckpointPeriodicTimeoutMs = 100; // 100ms
+ static constexpr uint64_t kEmptyCheckpointTotalTimeoutMs = 600 * 1000; // 10 minutes.
+ size_t barrier_count = first_iter ? count : 0;
+ first_iter = false; // Don't add to the barrier count from the second iteration on.
+ bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointPeriodicTimeoutMs);
+ if (!timed_out) {
+ break; // Success
+ }
+ // This is a very rare case.
+ total_wait_time += kEmptyCheckpointPeriodicTimeoutMs;
+ if (kIsDebugBuild && total_wait_time > kEmptyCheckpointTotalTimeoutMs) {
+ std::ostringstream ss;
+ ss << "Empty checkpoint timeout\n";
+ ss << "Barrier count " << barrier->GetCount(self) << "\n";
+ ss << "Runnable thread IDs";
+ for (uint32_t tid : runnable_thread_ids) {
+ ss << " " << tid;
+ }
+ ss << "\n";
+ Locks::mutator_lock_->Dump(ss);
+ ss << "\n";
+ LOG(FATAL_WITHOUT_ABORT) << ss.str();
+ // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
+ // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
+ {
+ ScopedObjectAccess soa(self);
+ MutexLock mu1(self, *Locks::thread_list_lock_);
+ for (Thread* thread : GetList()) {
+ uint32_t tid = thread->GetThreadId();
+ bool is_in_runnable_thread_ids =
+ std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
+ runnable_thread_ids.end();
+ if (is_in_runnable_thread_ids &&
+ thread->ReadFlag(kEmptyCheckpointRequest)) {
+ // Found a runnable thread that hasn't responded to the empty checkpoint request.
+ // Assume it's stuck and safe to dump its stack.
+ thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
+ /*dump_native_stack*/ true,
+ /*backtrace_map*/ nullptr,
+ /*force_dump_stack*/ true);
+ }
+ }
+ }
+ LOG(FATAL_WITHOUT_ABORT)
+ << "Dumped runnable threads that haven't responded to empty checkpoint.";
+ // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
+ Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ LOG(FATAL) << "Dumped all threads.";
+ }
+ }
+ }
}
// Request that a checkpoint function be run on all active (non-suspended)
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index b60fca1fdc..70917eb0f7 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -109,9 +109,7 @@ class ThreadList {
// in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
// decrementing the empty checkpoint barrier count. This works even when the weak ref access is
// disabled. Only one concurrent use is currently supported.
- // In debug build, runnable_thread_ids will be populated with the thread IDS of the runnable
- // thread to wait for.
- size_t RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids)
+ void RunEmptyCheckpoint()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 25369686fd..56ff0a13ac 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -41,12 +41,12 @@ Transaction::~Transaction() {
MutexLock mu(Thread::Current(), log_lock_);
size_t objects_count = object_logs_.size();
size_t field_values_count = 0;
- for (auto it : object_logs_) {
+ for (const auto& it : object_logs_) {
field_values_count += it.second.Size();
}
size_t array_count = array_logs_.size();
size_t array_values_count = 0;
- for (auto it : array_logs_) {
+ for (const auto& it : array_logs_) {
array_values_count += it.second.Size();
}
size_t intern_string_count = intern_string_logs_.size();
@@ -100,24 +100,30 @@ const std::string& Transaction::GetAbortMessage() {
return abort_message_;
}
-void Transaction::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
- uint8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
object_log.LogBooleanValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
- int8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
object_log.LogByteValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
- uint16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -125,8 +131,10 @@ void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_o
}
-void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
- int16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -134,7 +142,9 @@ void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_
}
-void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+void Transaction::RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
@@ -142,7 +152,9 @@ void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_off
object_log.Log32BitsValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+void Transaction::RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
@@ -150,8 +162,10 @@ void Transaction::RecordWriteField64(mirror::Object* obj, MemberOffset field_off
object_log.Log64BitsValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile) {
+void Transaction::RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -163,8 +177,12 @@ void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t
DCHECK(array->IsArrayInstance());
DCHECK(!array->IsObjectArray());
MutexLock mu(Thread::Current(), log_lock_);
- ArrayLog& array_log = array_logs_[array];
- array_log.LogValue(index, value);
+ auto it = array_logs_.find(array);
+ if (it == array_logs_.end()) {
+ ArrayLog log;
+ it = array_logs_.emplace(array, std::move(log)).first;
+ }
+ it->second.LogValue(index, value);
}
void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
@@ -172,33 +190,33 @@ void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
DCHECK(dex_cache != nullptr);
DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
MutexLock mu(Thread::Current(), log_lock_);
- resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
+ resolve_string_logs_.emplace_back(dex_cache, string_idx);
}
void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
-void Transaction::LogInternedString(const InternStringLog& log) {
+void Transaction::LogInternedString(InternStringLog&& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
- intern_string_logs_.push_front(log);
+ intern_string_logs_.push_front(std::move(log));
}
void Transaction::Rollback() {
@@ -216,7 +234,7 @@ void Transaction::Rollback() {
void Transaction::UndoObjectModifications() {
// TODO we may not need to restore objects allocated during this transaction. Or we could directly
// remove them from the heap.
- for (auto it : object_logs_) {
+ for (const auto& it : object_logs_) {
it.second.Undo(it.first);
}
object_logs_.clear();
@@ -225,7 +243,7 @@ void Transaction::UndoObjectModifications() {
void Transaction::UndoArrayModifications() {
// TODO we may not need to restore array allocated during this transaction. Or we could directly
// remove them from the heap.
- for (auto it : array_logs_) {
+ for (const auto& it : array_logs_) {
it.second.Undo(it.first);
}
array_logs_.clear();
@@ -235,7 +253,7 @@ void Transaction::UndoInternStringTableModifications() {
InternTable* const intern_table = Runtime::Current()->GetInternTable();
// We want to undo each operation from the most recent to the oldest. List has been filled so the
// most recent operation is at list begin so just have to iterate over it.
- for (InternStringLog& string_log : intern_string_logs_) {
+ for (const InternStringLog& string_log : intern_string_logs_) {
string_log.Undo(intern_table);
}
intern_string_logs_.clear();
@@ -262,7 +280,7 @@ void Transaction::VisitObjectLogs(RootVisitor* visitor) {
std::list<ObjectPair> moving_roots;
// Visit roots.
- for (auto it : object_logs_) {
+ for (auto& it : object_logs_) {
it.second.VisitRoots(visitor);
mirror::Object* old_root = it.first;
mirror::Object* new_root = old_root;
@@ -279,7 +297,7 @@ void Transaction::VisitObjectLogs(RootVisitor* visitor) {
auto old_root_it = object_logs_.find(old_root);
CHECK(old_root_it != object_logs_.end());
CHECK(object_logs_.find(new_root) == object_logs_.end());
- object_logs_.insert(std::make_pair(new_root, old_root_it->second));
+ object_logs_.emplace(new_root, std::move(old_root_it->second));
object_logs_.erase(old_root_it);
}
}
@@ -289,7 +307,7 @@ void Transaction::VisitArrayLogs(RootVisitor* visitor) {
typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
std::list<ArrayPair> moving_roots;
- for (auto it : array_logs_) {
+ for (auto& it : array_logs_) {
mirror::Array* old_root = it.first;
CHECK(!old_root->IsObjectArray());
mirror::Array* new_root = old_root;
@@ -306,7 +324,7 @@ void Transaction::VisitArrayLogs(RootVisitor* visitor) {
auto old_root_it = array_logs_.find(old_root);
CHECK(old_root_it != array_logs_.end());
CHECK(array_logs_.find(new_root) == array_logs_.end());
- array_logs_.insert(std::make_pair(new_root, old_root_it->second));
+ array_logs_.emplace(new_root, std::move(old_root_it->second));
array_logs_.erase(old_root_it);
}
}
@@ -347,23 +365,27 @@ void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value,
LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
}
-void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile) {
+void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
+ mirror::Object* obj,
+ bool is_volatile) {
LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
}
void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
- MemberOffset offset, uint64_t value, bool is_volatile) {
+ MemberOffset offset,
+ uint64_t value,
+ bool is_volatile) {
auto it = field_values_.find(offset.Uint32Value());
if (it == field_values_.end()) {
ObjectLog::FieldValue field_value;
field_value.value = value;
field_value.is_volatile = is_volatile;
field_value.kind = kind;
- field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
+ field_values_.emplace(offset.Uint32Value(), std::move(field_value));
}
}
-void Transaction::ObjectLog::Undo(mirror::Object* obj) {
+void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
for (auto& it : field_values_) {
// Garbage collector needs to access object's class and array's length. So we don't rollback
// these values.
@@ -377,60 +399,71 @@ void Transaction::ObjectLog::Undo(mirror::Object* obj) {
// Skip Array::length field.
continue;
}
- FieldValue& field_value = it.second;
+ const FieldValue& field_value = it.second;
UndoFieldWrite(obj, field_offset, field_value);
}
}
-void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) {
+void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
+ MemberOffset field_offset,
+ const FieldValue& field_value) const {
// TODO We may want to abort a transaction while still being in transaction mode. In this case,
// we'd need to disable the check.
constexpr bool kCheckTransaction = true;
switch (field_value.kind) {
case kBoolean:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldBooleanVolatile<false, kCheckTransaction>(field_offset,
- static_cast<bool>(field_value.value));
+ obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<bool>(field_value.value));
} else {
- obj->SetFieldBoolean<false, kCheckTransaction>(field_offset,
- static_cast<bool>(field_value.value));
+ obj->SetFieldBoolean<false, kCheckTransaction>(
+ field_offset,
+ static_cast<bool>(field_value.value));
}
break;
case kByte:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldByteVolatile<false, kCheckTransaction>(field_offset,
- static_cast<int8_t>(field_value.value));
+ obj->SetFieldByteVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int8_t>(field_value.value));
} else {
- obj->SetFieldByte<false, kCheckTransaction>(field_offset,
- static_cast<int8_t>(field_value.value));
+ obj->SetFieldByte<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int8_t>(field_value.value));
}
break;
case kChar:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldCharVolatile<false, kCheckTransaction>(field_offset,
- static_cast<uint16_t>(field_value.value));
+ obj->SetFieldCharVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint16_t>(field_value.value));
} else {
- obj->SetFieldChar<false, kCheckTransaction>(field_offset,
- static_cast<uint16_t>(field_value.value));
+ obj->SetFieldChar<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint16_t>(field_value.value));
}
break;
case kShort:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldShortVolatile<false, kCheckTransaction>(field_offset,
- static_cast<int16_t>(field_value.value));
+ obj->SetFieldShortVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int16_t>(field_value.value));
} else {
- obj->SetFieldShort<false, kCheckTransaction>(field_offset,
- static_cast<int16_t>(field_value.value));
+ obj->SetFieldShort<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int16_t>(field_value.value));
}
break;
case k32Bits:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetField32Volatile<false, kCheckTransaction>(field_offset,
- static_cast<uint32_t>(field_value.value));
+ obj->SetField32Volatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint32_t>(field_value.value));
} else {
- obj->SetField32<false, kCheckTransaction>(field_offset,
- static_cast<uint32_t>(field_value.value));
+ obj->SetField32<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint32_t>(field_value.value));
}
break;
case k64Bits:
@@ -442,11 +475,13 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
break;
case kReference:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldObjectVolatile<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value));
+ obj->SetFieldObjectVolatile<false, kCheckTransaction>(
+ field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
} else {
- obj->SetFieldObject<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value));
+ obj->SetFieldObject<false, kCheckTransaction>(
+ field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
}
break;
default:
@@ -456,7 +491,7 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
}
void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
- for (auto it : field_values_) {
+ for (auto& it : field_values_) {
FieldValue& field_value = it.second;
if (field_value.kind == ObjectLog::kReference) {
visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
@@ -465,7 +500,7 @@ void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
}
}
-void Transaction::InternStringLog::Undo(InternTable* intern_table) {
+void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
DCHECK(intern_table != nullptr);
switch (string_op_) {
case InternStringLog::kInsert: {
@@ -506,7 +541,7 @@ void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
str_.VisitRoot(visitor, RootInfo(kRootInternedString));
}
-void Transaction::ResolveStringLog::Undo() {
+void Transaction::ResolveStringLog::Undo() const {
dex_cache_.Read()->ClearString(string_idx_);
}
@@ -538,7 +573,7 @@ void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
}
}
-void Transaction::ArrayLog::Undo(mirror::Array* array) {
+void Transaction::ArrayLog::Undo(mirror::Array* array) const {
DCHECK(array != nullptr);
DCHECK(array->IsArrayInstance());
Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
@@ -547,8 +582,10 @@ void Transaction::ArrayLog::Undo(mirror::Array* array) {
}
}
-void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array, Primitive::Type array_type,
- size_t index, uint64_t value) {
+void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
+ Primitive::Type array_type,
+ size_t index,
+ uint64_t value) const {
// TODO We may want to abort a transaction while still being in transaction mode. In this case,
// we'd need to disable the check.
switch (array_type) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 1774657d40..7aa98cd33d 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -56,26 +56,40 @@ class Transaction FINAL {
bool IsAborted() REQUIRES(!log_lock_);
// Record object field changes.
- void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+ void RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
- bool is_volatile)
+ void RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
+ bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+ void RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+ void RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+ void RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+ void RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile)
+ void RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile)
REQUIRES(!log_lock_);
// Record array change.
@@ -122,13 +136,16 @@ class Transaction FINAL {
void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
- void Undo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
}
+ ObjectLog() = default;
+ ObjectLog(ObjectLog&& log) = default;
+
private:
enum FieldValueKind {
kBoolean,
@@ -144,33 +161,49 @@ class Transaction FINAL {
uint64_t value;
FieldValueKind kind;
bool is_volatile;
+
+ FieldValue() = default;
+ FieldValue(FieldValue&& log) = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldValue);
};
void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
- void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoFieldWrite(mirror::Object* obj,
+ MemberOffset field_offset,
+ const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
// Maps field's offset to its value.
std::map<uint32_t, FieldValue> field_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectLog);
};
class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
- void Undo(mirror::Array* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return array_values_.size();
}
+ ArrayLog() = default;
+ ArrayLog(ArrayLog&& log) = default;
+
private:
- void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
- uint64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoArrayWrite(mirror::Array* array,
+ Primitive::Type array_type,
+ size_t index,
+ uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
// Maps index to value.
// TODO use JValue instead ?
std::map<size_t, uint64_t> array_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayLog);
};
class InternStringLog : public ValueObject {
@@ -185,31 +218,38 @@ class Transaction FINAL {
};
InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
- void Undo(InternTable* intern_table)
+ void Undo(InternTable* intern_table) const
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ InternStringLog() = default;
+ InternStringLog(InternStringLog&& log) = default;
+
private:
- GcRoot<mirror::String> str_;
+ mutable GcRoot<mirror::String> str_;
const StringKind string_kind_;
const StringOp string_op_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternStringLog);
};
class ResolveStringLog : public ValueObject {
public:
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
- void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
GcRoot<mirror::DexCache> dex_cache_;
const dex::StringIndex string_idx_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
};
- void LogInternedString(const InternStringLog& log)
+ void LogInternedString(InternStringLog&& log)
REQUIRES(Locks::intern_table_lock_)
REQUIRES(!log_lock_);
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index bd1b044dae..2812c21004 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -48,9 +48,11 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, cons
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
}
-inline constexpr size_t DexCacheArraysLayout::Alignment() {
- // GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
- static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
+constexpr size_t DexCacheArraysLayout::Alignment() {
+ // mirror::Type/String/MethodTypeDexCacheType alignment is 8,
+ // i.e. higher than or equal to the pointer alignment.
+ static_assert(alignof(mirror::TypeDexCacheType) == 8,
+ "Expecting alignof(ClassDexCacheType) == 8");
static_assert(alignof(mirror::StringDexCacheType) == 8,
"Expecting alignof(StringDexCacheType) == 8");
static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
@@ -60,17 +62,22 @@ inline constexpr size_t DexCacheArraysLayout::Alignment() {
}
template <typename T>
-static constexpr PointerSize GcRootAsPointerSize() {
+constexpr PointerSize GcRootAsPointerSize() {
static_assert(sizeof(GcRoot<T>) == 4U, "Unexpected GcRoot size");
return PointerSize::k32;
}
inline size_t DexCacheArraysLayout::TypeOffset(dex::TypeIndex type_idx) const {
- return types_offset_ + ElementOffset(GcRootAsPointerSize<mirror::Class>(), type_idx.index_);
+ return types_offset_ + ElementOffset(PointerSize::k64,
+ type_idx.index_ % mirror::DexCache::kDexCacheTypeCacheSize);
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements);
+ size_t cache_size = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (num_elements < cache_size) {
+ cache_size = num_elements;
+ }
+ return ArraySize(PointerSize::k64, cache_size);
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b915457557..9598870dcf 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -415,12 +415,12 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
result.kind = kSoftFailure;
if (method != nullptr &&
!CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) {
- method->AddAccessFlags(kAccCompileDontBother);
+ method->SetDontCompile();
}
}
if (method != nullptr) {
if (verifier.HasInstructionThatWillThrow()) {
- method->AddAccessFlags(kAccCompileDontBother);
+ method->SetDontCompile();
if (Runtime::Current()->IsAotCompiler() &&
(callbacks != nullptr) && !callbacks->IsBootImage()) {
// When compiling apps, make HasInstructionThatWillThrow a soft error to trigger
@@ -2399,7 +2399,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
// If this is a primitive type, fail HARD.
- mirror::Class* klass = dex_cache_->GetResolvedType(type_idx);
+ ObjPtr<mirror::Class> klass =
+ ClassLinker::LookupResolvedType(type_idx, dex_cache_.Get(), class_loader_.Get());
if (klass != nullptr && klass->IsPrimitive()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
<< dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
@@ -3684,9 +3685,16 @@ inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
}
const RegType& MethodVerifier::ResolveClassAndCheckAccess(dex::TypeIndex class_idx) {
- mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
+ mirror::Class* klass = can_load_classes_
+ ? Runtime::Current()->GetClassLinker()->ResolveType(
+ *dex_file_, class_idx, dex_cache_, class_loader_)
+ : ClassLinker::LookupResolvedType(class_idx, dex_cache_.Get(), class_loader_.Get()).Ptr();
+ if (can_load_classes_ && klass == nullptr) {
+ DCHECK(self_->IsExceptionPending());
+ self_->ClearException();
+ }
const RegType* result = nullptr;
- if (klass != nullptr) {
+ if (klass != nullptr && !klass->IsErroneous()) {
bool precise = klass->CannotBeAssignedFromOtherTypes();
if (precise && !IsInstantiableOrPrimitive(klass)) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
@@ -3709,10 +3717,6 @@ const RegType& MethodVerifier::ResolveClassAndCheckAccess(dex::TypeIndex class_i
<< "' in " << GetDeclaringClass();
return *result;
}
- if (klass == nullptr && !result->IsUnresolvedTypes()) {
- klass = result->GetClass();
- dex_cache_->SetResolvedType(class_idx, klass);
- }
// Record result of class resolution attempt.
VerifierDeps::MaybeRecordClassResolution(*dex_file_, class_idx, klass);
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 43151dd425..363fde220d 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -19,33 +19,11 @@
#include "verify_object.h"
-#include "gc/heap.h"
#include "mirror/object-inl.h"
#include "obj_ptr-inl.h"
namespace art {
-inline void VerifyObject(ObjPtr<mirror::Object> obj) {
- if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
- if (kVerifyObjectSupport > kVerifyObjectModeFast) {
- // Slow object verification, try the heap right away.
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
- } else {
- // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
- // print the diagnostic message.
- bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
- if (!failed) {
- mirror::Class* c = obj->GetClass<kVerifyNone>();
- failed = failed || !IsAligned<kObjectAlignment>(c);
- failed = failed || !VerifyClassClass(c);
- }
- if (UNLIKELY(failed)) {
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
- }
- }
- }
-}
-
inline bool VerifyClassClass(ObjPtr<mirror::Class> c) {
if (UNLIKELY(c == nullptr)) {
return false;
diff --git a/runtime/verify_object.cc b/runtime/verify_object.cc
new file mode 100644
index 0000000000..a031a07a94
--- /dev/null
+++ b/runtime/verify_object.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verify_object-inl.h"
+
+#include "base/bit_utils.h"
+#include "gc/heap.h"
+#include "globals.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) {
+ if (kVerifyObjectSupport > kVerifyObjectModeFast) {
+ // Slow object verification, try the heap right away.
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ } else {
+ // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
+ // print the diagnostic message.
+ bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
+ if (!failed) {
+ mirror::Class* c = obj->GetClass<kVerifyNone>();
+ failed = failed || !IsAligned<kObjectAlignment>(c);
+ failed = failed || !VerifyClassClass(c);
+ }
+ if (UNLIKELY(failed)) {
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
index 384e56f7f4..519f7f5f5a 100644
--- a/runtime/verify_object.h
+++ b/runtime/verify_object.h
@@ -53,7 +53,16 @@ static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone;
static constexpr VerifyObjectMode kVerifyObjectSupport =
kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled;
-ALWAYS_INLINE void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+// Implements the actual object checks.
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+
+// Is a front to optimize out any calls if no verification is enabled.
+ALWAYS_INLINE
+static inline void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS {
+ if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
+ VerifyObjectImpl(obj);
+ }
+}
// Check that c.getClass() == c.getClass().getClass().
ALWAYS_INLINE bool VerifyClassClass(ObjPtr<mirror::Class> c) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/test/004-NativeAllocations/src/Main.java b/test/004-NativeAllocations/src/Main.java
index 92f4e21f40..8712755125 100644
--- a/test/004-NativeAllocations/src/Main.java
+++ b/test/004-NativeAllocations/src/Main.java
@@ -16,6 +16,7 @@
import java.lang.reflect.*;
import java.lang.Runtime;
+import dalvik.system.VMRuntime;
public class Main {
static Object nativeLock = new Object();
@@ -33,10 +34,19 @@ public class Main {
NativeAllocation(int bytes, boolean testingDeadlock) throws Exception {
this.bytes = bytes;
register_native_allocation.invoke(runtime, bytes);
+
+ // Register native allocation can only provide guarantees bounding
+ // the maximum outstanding allocations if finalizers don't time
+ // out. In case finalizers have timed out, wait longer for them
+ // now to complete so we can test the guarantees.
+ if (!testingDeadlock) {
+ VMRuntime.runFinalization(0);
+ }
+
synchronized (nativeLock) {
if (!testingDeadlock) {
nativeBytes += bytes;
- if (nativeBytes > maxMem) {
+ if (nativeBytes > 2 * maxMem) {
throw new OutOfMemoryError();
}
}
diff --git a/test/155-java-set-resolved-type/expected.txt b/test/155-java-set-resolved-type/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/155-java-set-resolved-type/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/155-java-set-resolved-type/info.txt b/test/155-java-set-resolved-type/info.txt
new file mode 100644
index 0000000000..ba5bc0ad61
--- /dev/null
+++ b/test/155-java-set-resolved-type/info.txt
@@ -0,0 +1,2 @@
+Regression test for Java call to DexCache.setResolvedType() storing the
+type in the dex cache while it was not in the class loader's class table.
diff --git a/test/155-java-set-resolved-type/src-ex/TestInterface.java b/test/155-java-set-resolved-type/src-ex/TestInterface.java
new file mode 100644
index 0000000000..037c760765
--- /dev/null
+++ b/test/155-java-set-resolved-type/src-ex/TestInterface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface TestInterface {
+ public void foo();
+}
diff --git a/test/155-java-set-resolved-type/src/Main.java b/test/155-java-set-resolved-type/src/Main.java
new file mode 100644
index 0000000000..56b8c3ece9
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/Main.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+ public static String TEST_NAME = "155-java-set-resolved-type";
+
+ public static void main(String[] args) {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ System.loadLibrary(args[0]);
+ } catch (ClassNotFoundException e) {
+ usingRI = true;
+ // Add expected JNI_OnLoad log line to match expected.txt.
+ System.out.println("JNI_OnLoad called");
+ }
+ try {
+ String dex_location = System.getenv("DEX_LOCATION");
+ ClassLoader systemLoader = ClassLoader.getSystemClassLoader().getParent();
+ ClassLoader exLoader = getClassLoaderFor(dex_location, systemLoader, /* ex */ true);
+ ClassLoader mainLoader = getClassLoaderFor(dex_location, exLoader, /* ex */ false);
+
+ // Resolve TestParameter class. It shall be defined by mainLoader.
+ // This does not resolve method parameter types.
+ Class<?> tpc = Class.forName("TestParameter", false, mainLoader);
+ // Get declared methods of TestParameter.
+ // This still does not resolve method parameter types.
+ Method[] ms = tpc.getDeclaredMethods();
+ if (ms == null || ms.length != 1) { throw new Error("Unexpected methods"); };
+ // Call getParameterTypes() to resolve parameter types. The parameter type
+ // TestInterface shall be defined by the exLoader. This used to store the
+ // TestInterface class in the dex cache resolved types for the mainLoader
+ // but not in the mainLoader's class table. This discrepancy used to cause
+ // a crash further down.
+ ms[0].getParameterTypes();
+
+ // Resolve but do not initialize TestImplementation. During the resolution,
+ // we see the TestInterface in the dex cache, so we do not try to look it up
+ // or resolve it using the mainLoader.
+ Class<?> timpl = Class.forName("TestImplementation", false, mainLoader);
+ // Clear the dex cache resolved types to force a proper lookup the next time
+ // we need to find TestInterface.
+ clearResolvedTypes(timpl);
+
+ // Force intialization of TestClass2. This expects the interface type to be
+ // resolved and found through simple lookup.
+ timpl.newInstance();
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ }
+
+ public static ClassLoader getClassLoaderFor(String location, ClassLoader parent, boolean ex)
+ throws Exception {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> ctor =
+ class_loader_class.getConstructor(String.class, ClassLoader.class);
+ /* on Dalvik, this is a DexFile; otherwise, it's null */
+ String path = location + "/" + TEST_NAME + (ex ? "-ex.jar" : ".jar");
+ return (ClassLoader)ctor.newInstance(path, parent);
+ } catch (ClassNotFoundException e) {
+ // Running on RI. Use URLClassLoader.
+ String url = "file://" + location + (ex ? "/classes-ex/" : "/classes/");
+ return new java.net.URLClassLoader(
+ new java.net.URL[] { new java.net.URL(url) }, parent);
+ }
+ }
+
+ public static void clearResolvedTypes(Class<?> c) {
+ if (!usingRI) {
+ nativeClearResolvedTypes(c);
+ }
+ }
+
+ private static boolean usingRI = false;
+
+ public static native void nativeClearResolvedTypes(Class<?> c);
+}
diff --git a/test/155-java-set-resolved-type/src/TestImplementation.java b/test/155-java-set-resolved-type/src/TestImplementation.java
new file mode 100644
index 0000000000..4a3e74d157
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/TestImplementation.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestImplementation implements TestInterface {
+ public void foo() { }
+}
diff --git a/test/155-java-set-resolved-type/src/TestInterface.java b/test/155-java-set-resolved-type/src/TestInterface.java
new file mode 100644
index 0000000000..037c760765
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/TestInterface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface TestInterface {
+ public void foo();
+}
diff --git a/test/155-java-set-resolved-type/src/TestParameter.java b/test/155-java-set-resolved-type/src/TestParameter.java
new file mode 100644
index 0000000000..c881f3f8cf
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/TestParameter.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestParameter {
+ public void bar(TestInterface ti) { }
+}
diff --git a/test/156-register-dex-file-multi-loader/expected.txt b/test/156-register-dex-file-multi-loader/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/156-register-dex-file-multi-loader/expected.txt
diff --git a/test/156-register-dex-file-multi-loader/info.txt b/test/156-register-dex-file-multi-loader/info.txt
new file mode 100644
index 0000000000..49d153ca98
--- /dev/null
+++ b/test/156-register-dex-file-multi-loader/info.txt
@@ -0,0 +1,2 @@
+Regression test to check that we do not allow registering the same dex file
+with multiple class loaders.
diff --git a/test/156-register-dex-file-multi-loader/src/Main.java b/test/156-register-dex-file-multi-loader/src/Main.java
new file mode 100644
index 0000000000..ff5a2bd570
--- /dev/null
+++ b/test/156-register-dex-file-multi-loader/src/Main.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.List;
+
+class MyClassLoader extends ClassLoader {
+ MyClassLoader() throws Exception {
+ super(MyClassLoader.class.getClassLoader());
+
+ // Some magic to get access to the pathList field of BaseDexClassLoader.
+ ClassLoader loader = getClass().getClassLoader();
+ Class<?> baseDexClassLoader = loader.getClass().getSuperclass();
+ Field f = baseDexClassLoader.getDeclaredField("pathList");
+ f.setAccessible(true);
+ Object pathList = f.get(loader);
+
+ // Some magic to get access to the dexField field of pathList.
+ f = pathList.getClass().getDeclaredField("dexElements");
+ f.setAccessible(true);
+ dexElements = (Object[]) f.get(pathList);
+ dexFileField = dexElements[0].getClass().getDeclaredField("dexFile");
+ dexFileField.setAccessible(true);
+ }
+
+ Object[] dexElements;
+ Field dexFileField;
+
+ protected Class<?> loadClass(String className, boolean resolve) throws ClassNotFoundException {
+ // Mimic what DexPathList.findClass is doing.
+ try {
+ for (Object element : dexElements) {
+ Object dex = dexFileField.get(element);
+ Method method = dex.getClass().getDeclaredMethod(
+ "loadClassBinaryName", String.class, ClassLoader.class, List.class);
+
+ if (dex != null) {
+ Class<?> clazz = (Class<?>)method.invoke(dex, className, this, null);
+ if (clazz != null) {
+ return clazz;
+ }
+ }
+ }
+ } catch (InvocationTargetException ite) {
+ throw new ClassNotFoundException(className, ite.getCause());
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ return getParent().loadClass(className);
+ }
+}
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ MyClassLoader o = new MyClassLoader();
+ try {
+ Class<?> foo = o.loadClass("Main");
+ throw new Error("Unreachable");
+ } catch (ClassNotFoundException cnfe) {
+ boolean unexpected = false;
+ if (!(cnfe.getCause() instanceof InternalError)) {
+ unexpected = true;
+ } else {
+ String message = cnfe.getCause().getMessage();
+ unexpected = !message.startsWith("Attempt to register dex file ") ||
+ !message.endsWith(" with multiple class loaders");
+ }
+ if (unexpected) {
+ cnfe.getCause().printStackTrace();
+ }
+ }
+ }
+}
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 65dfd411fd..86977d1b8e 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -164,6 +164,12 @@ public class Main {
}
}
+
+ static boolean $opt$noinline$ensureSideEffects() {
+ if (doThrow) throw new Error("");
+ return true;
+ }
+
/// CHECK-START: void Main.loop9() liveness (after)
/// CHECK: <<Arg:z\d+>> StaticFieldGet liveness:<<ArgLiv:\d+>> ranges:{[<<ArgLiv>>,<<ArgLoopUse:\d+>>)} uses:[<<ArgUse:\d+>>,<<ArgLoopUse>>]
/// CHECK: If [<<Arg>>] liveness:<<IfLiv:\d+>>
@@ -178,7 +184,7 @@ public class Main {
// Add some code at entry to avoid having the entry block be a pre header.
// This avoids having to create a synthesized block.
System.out.println("Enter");
- while (Runtime.getRuntime() != null) {
+ while ($opt$noinline$ensureSideEffects()) {
// 'incoming' must only have a use in the inner loop.
boolean incoming = field;
while (incoming) {}
@@ -189,4 +195,5 @@ public class Main {
}
static boolean field;
+ static boolean doThrow = false;
}
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index bf0cbe66c1..dd77423870 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -283,9 +283,6 @@ public class Main {
return "non-boot-image-string";
}
- /// CHECK-START: java.lang.Class Main.$noinline$getStringClass() sharpening (before)
- /// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:java.lang.String
-
/// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
@@ -323,9 +320,6 @@ public class Main {
return String.class;
}
- /// CHECK-START: java.lang.Class Main.$noinline$getOtherClass() sharpening (before)
- /// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:Other
-
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index b035896166..ff5ae6bd0d 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -15,6 +15,9 @@
*/
#include "jni.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
#include "object_lock.h"
#include "scoped_thread_state_change-inl.h"
@@ -24,7 +27,8 @@ extern "C" JNIEXPORT void JNICALL Java_Main_nativeClearResolvedTypes(JNIEnv*, jc
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = soa.Decode<mirror::Class>(cls)->GetDexCache();
for (size_t i = 0, num_types = dex_cache->NumResolvedTypes(); i != num_types; ++i) {
- dex_cache->SetResolvedType(dex::TypeIndex(i), ObjPtr<mirror::Class>(nullptr));
+ mirror::TypeDexCachePair cleared(nullptr, mirror::TypeDexCachePair::InvalidIndexForSlot(i));
+ dex_cache->GetResolvedTypes()[i].store(cleared, std::memory_order_relaxed);
}
}
diff --git a/test/626-const-class-linking/src/Main.java b/test/626-const-class-linking/src/Main.java
index 0029428d90..1bc94a7c7d 100644
--- a/test/626-const-class-linking/src/Main.java
+++ b/test/626-const-class-linking/src/Main.java
@@ -23,8 +23,10 @@ import java.util.ArrayList;
public class Main {
public static void main(String[] args) throws Exception {
try {
+ // Check if we're running dalvik or RI.
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
System.loadLibrary(args[0]);
- } catch (UnsatisfiedLinkError ule) {
+ } catch (ClassNotFoundException e) {
usingRI = true;
// Add expected JNI_OnLoad log line to match expected.txt.
System.out.println("JNI_OnLoad called");
diff --git a/test/636-arm64-veneer-pool/build b/test/636-arm64-veneer-pool/build
new file mode 100755
index 0000000000..27cc4d6d98
--- /dev/null
+++ b/test/636-arm64-veneer-pool/build
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# Make us exit on a failure.
+
+set -e
+
+# Use javac+dx instead of jack.
+export USE_JACK=false
+./default-build "$@"
diff --git a/test/636-arm64-veneer-pool/expected.txt b/test/636-arm64-veneer-pool/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/636-arm64-veneer-pool/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/636-arm64-veneer-pool/info.txt b/test/636-arm64-veneer-pool/info.txt
new file mode 100644
index 0000000000..2393be0538
--- /dev/null
+++ b/test/636-arm64-veneer-pool/info.txt
@@ -0,0 +1 @@
+Regression test for an issue with VIXL ARM64 veneer pools (b/34850123).
diff --git a/test/636-arm64-veneer-pool/src/Main.java b/test/636-arm64-veneer-pool/src/Main.java
new file mode 100644
index 0000000000..8229fee378
--- /dev/null
+++ b/test/636-arm64-veneer-pool/src/Main.java
@@ -0,0 +1,4223 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class C0 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C2 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C3 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C4 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C5 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mImpl(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C6 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C7 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C8 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C9 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C10 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C11 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C12 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C13 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C14 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C15 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C16 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C17 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C18 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C19 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C20 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C21 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C22 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C23 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C24 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C25 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C26 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C27 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C28 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C29 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C30 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C31 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C32 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C33 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C34 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C35 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C36 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C37 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C38 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C39 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C40 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C41 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C42 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C43 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C44 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C45 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C46 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C47 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C48 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C49 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C50 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C51 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C52 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C53 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C54 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C55 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C56 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C57 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C58 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C59 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C60 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C61 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C62 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C63 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C64 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C65 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C66 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C67 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C68 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C69 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C70 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C71 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C72 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C73 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C74 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C75 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C76 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C77 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C78 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C79 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C80 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C81 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C82 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C83 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C84 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C85 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C86 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C87 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C88 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C89 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C90 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mReport_Factory(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mApi(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C91 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C92 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C93 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C94 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mImpl(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C95 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C96 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C97 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C98 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C99 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C100 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C101 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C102 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C103 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C104 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C105 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void m_InMemoryScanner(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void m_Scanner(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C106 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C107 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C108 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C109 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C110 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C111 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C112 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C113 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C114 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C115 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C116 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C117 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C118 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C119 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C120 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C121 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C122 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C123 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C124 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C125 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C126 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C127 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C128 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C129 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C130 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C131 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C132 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C133 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C134 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C135 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C136 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C137 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C138 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C139 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C140 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C141 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C142 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C143 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C144 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C145 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C146 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C147 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C148 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C149 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C150 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C151 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C152 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C153 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C154 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C155 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C156 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C157 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C158 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C159 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C160 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C161 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C162 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C163 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C164 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C165 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C166 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C167 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C168 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C169 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C170 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C171 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C172 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C173 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C174 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C175 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C176 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C177 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C178 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C179 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C180 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C181 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C182 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C183 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C184 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C185 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C186 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C187 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C188 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C189 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C190 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C191 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C192 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C193 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C194 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C195 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C196 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C197 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C198 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C199 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C200 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C201 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C202 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C203 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C204 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C205 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C206 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C207 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C208 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C209 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C210 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C211 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C212 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C213 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C214 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C215 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C216 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C217 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C218 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C219 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C220 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C221 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C222 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C223 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C224 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C225 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C226 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C227 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C228 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C229 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C230 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C231 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C232 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C233 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C234 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C235 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C236 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C237 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C238 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C239 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C240 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C241 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C242 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C243 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C244 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C245 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C246 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C247 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C248 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C249 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C250 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C251 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C252 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C253 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C254 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C255 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mFactory(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C256 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C257 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C258 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C259 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C260 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C261 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C262 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C263 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C264 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C265 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C266 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C267 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C268 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C269 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C270 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C271 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C272 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C273 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C274 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C275 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C276 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C277 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C278 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C279 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C280 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C281 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C282 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C283 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C284 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C285 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C286 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C287 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C288 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C289 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C290 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C291 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C292 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C293 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C294 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C295 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C296 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C297 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C298 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C299 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C300 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C301 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C302 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C303 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C304 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C305 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C306 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C307 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C308 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C309 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C310 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C311 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C312 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C313 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C314 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C315 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C316 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C317 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C318 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C319 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C320 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C321 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C322 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C323 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C324 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C325 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C326 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C327 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C328 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C329 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C330 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C331 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C332 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C333 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C334 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C335 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C336 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C337 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C338 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C339 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C340 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C341 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C342 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C343 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C344 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C345 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C346 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C347 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C348 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C349 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C350 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C351 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C352 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C353 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C354 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C355 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C356 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C357 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C358 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C359 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C360 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C361 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C362 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C363 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C364 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C365 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C366 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C367 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C368 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C369 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C370 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C371 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C372 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C373 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C374 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C375 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C376 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C377 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C378 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C379 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C380 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C381 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C382 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C383 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C384 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C385 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C386 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C387 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C388 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C389 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C390 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C391 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C392 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C393 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C394 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C395 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C396 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C397 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C398 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C399 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C400 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C401 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C402 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C403 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C404 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C405 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C406 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C407 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C408 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C409 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C410 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C411 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C412 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C413 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C414 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C415 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C416 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C417 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C418 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C419 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C420 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C421 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C422 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C423 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C424 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C425 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C426 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C427 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C428 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C429 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C430 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C431 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C432 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C433 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C434 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C435 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C436 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C437 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C438 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C439 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C440 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C441 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C442 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C443 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C444 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C445 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C446 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C447 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C448 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C449 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C450 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C451 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C452 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C453 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C454 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C455 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C456 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C457 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mMap(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C458 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C459 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C460 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C461 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C462 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C463 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C464 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C465 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C466 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C467 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C468 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C469 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C470 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C471 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C472 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C473 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C474 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C475 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C476 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C477 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C478 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C479 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C480 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C481 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C482 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C483 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C484 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C485 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C486 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C487 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C488 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C489 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C490 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C491 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C492 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C493 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C494 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C495 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C496 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C497 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C498 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C499 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C500 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C501 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C502 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C503 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C504 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C505 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C506 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C507 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C508 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C509 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C510 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C511 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C512 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C513 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C514 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C515 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C516 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C517 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C518 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C519 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C520 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C521 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C522 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C523 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C524 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C525 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C526 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C527 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C528 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C529 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C530 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C531 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C532 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C533 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C534 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C535 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C536 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C537 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C538 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C539 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C540 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C541 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C542 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C543 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C544 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C545 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C546 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C547 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C548 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C549 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C550 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C551 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C552 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C553 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C554 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C555 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C556 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C557 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C558 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C559 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C560 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C561 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C562 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C563 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C564 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C565 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C566 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C567 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C568 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C569 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C570 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C571 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C572 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C573 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C574 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C575 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C576 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C577 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mDebug(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C578 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C579 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C580 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C581 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C582 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C583 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C584 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C585 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C586 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C587 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C588 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C589 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C590 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C591 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C592 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C593 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C594 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C595 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C596 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C597 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C598 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C599 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C600 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C601 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C602 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C603 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C604 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C605 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C606 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C607 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C608 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C609 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C610 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C611 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C612 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C613 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C614 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C615 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C616 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C617 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C618 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C619 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C620 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C621 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C622 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C623 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C624 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C625 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C626 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C627 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C628 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C629 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C630 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C631 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C632 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C633 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C634 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C635 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C636 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C637 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C638 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C639 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C640 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C641 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C642 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C643 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C644 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C645 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C646 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C647 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C648 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C649 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C650 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C651 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C652 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C653 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C654 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C655 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C656 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C657 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C658 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C659 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C660 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C661 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C662 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C663 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C664 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C665 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C666 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C667 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C668 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C669 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C670 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C671 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C672 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C673 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C674 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C675 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C676 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C677 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C678 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C679 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C680 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C681 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C682 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C683 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C684 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C685 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C686 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C687 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C688 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C689 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C690 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C691 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C692 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C693 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C694 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C695 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C696 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C697 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C698 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C699 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C700 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C701 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C702 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C703 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C704 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C705 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C706 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C707 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C708 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C709 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C710 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C711 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C712 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C713 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C714 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C715 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C716 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C717 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C718 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C719 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C720 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C721 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C722 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C723 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C724 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C725 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C726 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C727 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C728 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C729 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C730 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C731 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C732 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C733 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C734 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C735 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C736 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C737 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C738 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C739 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C740 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C741 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C742 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C743 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C744 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C745 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C746 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C747 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C748 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C749 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C750 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C751 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C752 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C753 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C754 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C755 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C756 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C757 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C758 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C759 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C760 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C761 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C762 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C763 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C764 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C765 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C766 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C767 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C768 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C769 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C770 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C771 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C772 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C773 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C774 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C775 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C776 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C777 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C778 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C779 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C780 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C781 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C782 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C783 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C784 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C785 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C786 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C787 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C788 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C789 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C790 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C791 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C792 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C793 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C794 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C795 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C796 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C797 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C798 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C799 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C800 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C801 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C802 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C803 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C804 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C805 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C806 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C807 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C808 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C809 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C810 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C811 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C812 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C813 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C814 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C815 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C816 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C817 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C818 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C819 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C820 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C821 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C822 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C823 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C824 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C825 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C826 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C827 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C828 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C829 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C830 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C831 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C832 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C833 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C834 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C835 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C836 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C837 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C838 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C839 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C840 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C841 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C842 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C843 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C844 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C845 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C846 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C847 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C848 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mMap(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C849 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C850 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C851 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C852 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C853 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C854 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C855 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C856 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C857 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C858 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C859 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C860 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C861 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C862 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C863 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C864 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C865 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C866 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C867 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C868 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C869 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C870 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mImpl(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C871 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C872 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C873 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C874 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C875 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C876 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C877 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C878 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C879 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C880 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C881 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C882 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C883 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C884 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C885 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C886 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C887 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C888 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C889 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C890 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C891 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C892 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C893 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C894 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C895 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C896 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C897 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C898 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C899 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C900 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C901 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C902 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C903 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C904 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C905 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C906 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C907 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C908 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C909 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C910 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C911 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C912 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C913 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C914 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C915 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C916 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C917 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C918 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C919 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C920 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C921 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C922 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C923 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C924 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C925 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C926 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C927 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C928 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C929 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C930 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C931 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C932 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C933 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C934 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C935 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C936 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C937 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C938 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C939 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C940 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C941 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C942 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C943 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C944 {
+ public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+ public static void mManager(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } }
+}
+class C945 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C946 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C947 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C948 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C949 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C950 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C951 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C952 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C953 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C954 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C955 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C956 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C957 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C958 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C959 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C960 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C961 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C962 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C963 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C964 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C965 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C966 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C967 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C968 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C969 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C970 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C971 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C972 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C973 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C974 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C975 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C976 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C977 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C978 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C979 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C980 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C981 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C982 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C983 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C984 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C985 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C986 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C987 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C988 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C989 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C990 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C991 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C992 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C993 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C994 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C995 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C996 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C997 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C998 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C999 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1000 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1001 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1002 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1003 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1004 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1005 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1006 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1007 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1008 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1009 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1010 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1011 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1012 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1013 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1014 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1015 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1016 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1017 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1018 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1019 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1020 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1021 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1022 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1023 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1024 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1025 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1026 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1027 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1028 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1029 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1030 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1031 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1032 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1033 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1034 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1035 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1036 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1037 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1038 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1039 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1040 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1041 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1042 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1043 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1044 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1045 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1046 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1047 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1048 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1049 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1050 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1051 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1052 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1053 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1054 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1055 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1056 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1057 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1058 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1059 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1060 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1061 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1062 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1063 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1064 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1065 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1066 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1067 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1068 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1069 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1070 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1071 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1072 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1073 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1074 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1075 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1076 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1077 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1078 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1079 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1080 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1081 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1082 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1083 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1084 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1085 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1086 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1087 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1088 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1089 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1090 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1091 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1092 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1093 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1094 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1095 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1096 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1097 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1098 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1099 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1100 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1101 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1102 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1103 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1104 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1105 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1106 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1107 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1108 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1109 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1110 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1111 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1112 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1113 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1114 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1115 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1116 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1117 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1118 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1119 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1120 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1121 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1122 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1123 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1124 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1125 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1126 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1127 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1128 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1129 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1130 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1131 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1132 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1133 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1134 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1135 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1136 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1137 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1138 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1139 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1140 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1141 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1142 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1143 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1144 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1145 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1146 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1147 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1148 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1149 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1150 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1151 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1152 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1153 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1154 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1155 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1156 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1157 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1158 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1159 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1160 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1161 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1162 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1163 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1164 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1165 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1166 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1167 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1168 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1169 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1170 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1171 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1172 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1173 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1174 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1175 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1176 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1177 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1178 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1179 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1180 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1181 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1181a { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1181b { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1182 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1183 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1184 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1185 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1186 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1187 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1188 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1189 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1190 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1191 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1192 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1193 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1194 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1195 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1196 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1197 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1198 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1199 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1200 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1201 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1202 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1203 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1204 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1205 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1206 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1207 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1208 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1209 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1210 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1211 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+class C1212 { public static void m(Context c, Binder b) { /* Defeat inlining. */ if (Main.doThrow) { throw new Error(); } } }
+
+class Context {}
+class Binder {}
+
+public class Main {
+
+ java.util.HashMap<String, Integer> typeMap;
+ private void buildTypeMap() {}
+
+ // TODO: Add Checker assertions?
+ public void configure(Context context, Class<?> cls, Binder binder) {
+ if (this.typeMap == null) {
+ buildTypeMap();
+ }
+ Integer num = (Integer) this.typeMap.get(cls.getName());
+ if (num != null) {
+ switch (num.intValue()) {
+ case 0:
+ C0.m(context, binder);
+ return;
+ case 1:
+ C1.m(context, binder);
+ return;
+ case 2:
+ C2.m(context, binder);
+ return;
+ case 3:
+ C3.m(context, binder);
+ return;
+ case 4:
+ C4.m(context, binder);
+ return;
+ case 5:
+ C5.m(context, binder);
+ return;
+ case 6:
+ C6.m(context, binder);
+ C7.m(context, binder);
+ C8.m(context, binder);
+ return;
+ case 7:
+ C9.m(context, binder);
+ return;
+ case 8:
+ C10.m(context, binder);
+ return;
+ case 9:
+ C11.m(context, binder);
+ return;
+ case 10:
+ C12.m(context, binder);
+ return;
+ case 11:
+ C13.m(context, binder);
+ return;
+ case 12:
+ C14.m(context, binder);
+ return;
+ case 13:
+ C15.m(context, binder);
+ return;
+ case 14:
+ C16.m(context, binder);
+ return;
+ case 15:
+ C17.m(context, binder);
+ return;
+ case 16:
+ C18.m(context, binder);
+ C19.m(context, binder);
+ return;
+ case 17:
+ C20.m(context, binder);
+ return;
+ case 18:
+ C21.m(context, binder);
+ return;
+ case 19:
+ C22.m(context, binder);
+ return;
+ case 20:
+ C23.m(context, binder);
+ C24.m(context, binder);
+ C25.m(context, binder);
+ C26.m(context, binder);
+ C27.m(context, binder);
+ C28.m(context, binder);
+ C29.m(context, binder);
+ C30.m(context, binder);
+ C31.m(context, binder);
+ C32.m(context, binder);
+ C33.m(context, binder);
+ C34.m(context, binder);
+ C35.m(context, binder);
+ C36.m(context, binder);
+ C37.m(context, binder);
+ C38.m(context, binder);
+ C39.m(context, binder);
+ C40.m(context, binder);
+ C41.m(context, binder);
+ C42.m(context, binder);
+ C43.m(context, binder);
+ C44.m(context, binder);
+ C45.m(context, binder);
+ C46.m(context, binder);
+ C47.m(context, binder);
+ C48.m(context, binder);
+ C49.m(context, binder);
+ C50.m(context, binder);
+ C51.m(context, binder);
+ C52.m(context, binder);
+ C53.m(context, binder);
+ C54.m(context, binder);
+ C55.m(context, binder);
+ C56.m(context, binder);
+ C57.m(context, binder);
+ C58.m(context, binder);
+ C59.m(context, binder);
+ C60.m(context, binder);
+ C61.m(context, binder);
+ C62.m(context, binder);
+ C63.m(context, binder);
+ C64.m(context, binder);
+ C65.m(context, binder);
+ C66.m(context, binder);
+ C67.m(context, binder);
+ C68.m(context, binder);
+ C69.m(context, binder);
+ C70.m(context, binder);
+ C71.m(context, binder);
+ C72.m(context, binder);
+ C73.m(context, binder);
+ C74.m(context, binder);
+ C75.m(context, binder);
+ C76.m(context, binder);
+ C77.m(context, binder);
+ C78.m(context, binder);
+ C79.m(context, binder);
+ C80.m(context, binder);
+ C81.m(context, binder);
+ C82.m(context, binder);
+ C83.m(context, binder);
+ C84.m(context, binder);
+ C85.m(context, binder);
+ return;
+ case 21:
+ C86.m(context, binder);
+ return;
+ case 22:
+ C87.m(context, binder);
+ return;
+ case 23:
+ C88.m(context, binder);
+ C89.m(context, binder);
+ return;
+ case 24:
+ C90.m(context, binder);
+ return;
+ case 25:
+ C91.m(context, binder);
+ return;
+ case 26:
+ C92.m(context, binder);
+ return;
+ case 27:
+ C93.m(context, binder);
+ return;
+ case 28:
+ C94.m(context, binder);
+ return;
+ case 29:
+ C95.m(context, binder);
+ return;
+ case 30:
+ C96.m(context, binder);
+ return;
+ case 31:
+ C97.m(context, binder);
+ return;
+ case 32:
+ C98.m(context, binder);
+ C99.m(context, binder);
+ return;
+ case 33:
+ C100.m(context, binder);
+ return;
+ case 34:
+ C101.m(context, binder);
+ return;
+ case 35:
+ C102.m(context, binder);
+ return;
+ case 36:
+ C103.m(context, binder);
+ return;
+ case 37:
+ C104.m(context, binder);
+ return;
+ case 38:
+ C105.m(context, binder);
+ return;
+ case 39:
+ C106.m(context, binder);
+ return;
+ case 40:
+ C107.m(context, binder);
+ return;
+ case 41:
+ C108.m(context, binder);
+ return;
+ case 42:
+ C109.m(context, binder);
+ return;
+ case 43:
+ C110.m(context, binder);
+ return;
+ case 44:
+ C111.m(context, binder);
+ return;
+ case 45:
+ C112.m(context, binder);
+ return;
+ case 46:
+ C113.m(context, binder);
+ return;
+ case 47:
+ C114.m(context, binder);
+ return;
+ case 48:
+ C115.m(context, binder);
+ return;
+ case 49:
+ C116.m(context, binder);
+ return;
+ case 50:
+ C117.m(context, binder);
+ C118.m(context, binder);
+ return;
+ case 51:
+ C119.m(context, binder);
+ return;
+ case 52:
+ C120.m(context, binder);
+ return;
+ case 53:
+ C121.m(context, binder);
+ return;
+ case 54:
+ C122.m(context, binder);
+ return;
+ case 55:
+ C123.m(context, binder);
+ return;
+ case 56:
+ C124.m(context, binder);
+ return;
+ case 57:
+ C125.m(context, binder);
+ return;
+ case 58:
+ C126.m(context, binder);
+ return;
+ case 59:
+ C127.m(context, binder);
+ return;
+ case 60:
+ C128.m(context, binder);
+ return;
+ case 61:
+ C129.m(context, binder);
+ return;
+ case 62:
+ C130.m(context, binder);
+ C131.m(context, binder);
+ C132.m(context, binder);
+ C133.m(context, binder);
+ C134.m(context, binder);
+ C135.m(context, binder);
+ C136.m(context, binder);
+ C137.m(context, binder);
+ return;
+ case 63:
+ C138.m(context, binder);
+ return;
+ case 64:
+ C139.m(context, binder);
+ return;
+ case 65:
+ C140.m(context, binder);
+ return;
+ case 66:
+ C141.m(context, binder);
+ return;
+ case 67:
+ C142.m(context, binder);
+ return;
+ case 68:
+ C143.m(context, binder);
+ C144.m(context, binder);
+ C145.m(context, binder);
+ return;
+ case 69:
+ C146.m(context, binder);
+ return;
+ case 70:
+ C147.m(context, binder);
+ return;
+ case 71:
+ C148.m(context, binder);
+ return;
+ case 72:
+ C149.m(context, binder);
+ return;
+ case 73:
+ C150.m(context, binder);
+ return;
+ case 74:
+ C151.m(context, binder);
+ return;
+ case 75:
+ C152.m(context, binder);
+ return;
+ case 76:
+ C153.m(context, binder);
+ return;
+ case 77:
+ C154.m(context, binder);
+ return;
+ case 78:
+ C155.m(context, binder);
+ return;
+ case 79:
+ C156.m(context, binder);
+ return;
+ case 80:
+ C157.m(context, binder);
+ C158.m(context, binder);
+ C159.m(context, binder);
+ return;
+ case 81:
+ C160.m(context, binder);
+ return;
+ case 82:
+ C161.m(context, binder);
+ return;
+ case 83:
+ C162.m(context, binder);
+ return;
+ case 84:
+ C163.m(context, binder);
+ return;
+ case 85:
+ C164.m(context, binder);
+ C165.m(context, binder);
+ C166.m(context, binder);
+ C167.m(context, binder);
+ C168.m(context, binder);
+ C169.m(context, binder);
+ C170.m(context, binder);
+ C171.m(context, binder);
+ C172.m(context, binder);
+ C173.m(context, binder);
+ C174.m(context, binder);
+ C175.m(context, binder);
+ C176.m(context, binder);
+ C177.m(context, binder);
+ C178.m(context, binder);
+ C179.m(context, binder);
+ C180.m(context, binder);
+ C181.m(context, binder);
+ C182.m(context, binder);
+ C183.m(context, binder);
+ C184.m(context, binder);
+ return;
+ case 86:
+ C185.m(context, binder);
+ return;
+ case 87:
+ C186.m(context, binder);
+ return;
+ case 88:
+ C187.m(context, binder);
+ return;
+ case 89:
+ C188.m(context, binder);
+ return;
+ case 90:
+ C189.m(context, binder);
+ return;
+ case 91:
+ C190.m(context, binder);
+ return;
+ case 92:
+ C191.m(context, binder);
+ return;
+ case 93:
+ C192.m(context, binder);
+ return;
+ case 94:
+ C193.m(context, binder);
+ return;
+ case 95:
+ C194.m(context, binder);
+ return;
+ case 96:
+ C195.m(context, binder);
+ return;
+ case 97:
+ C196.m(context, binder);
+ return;
+ case 98:
+ C197.m(context, binder);
+ return;
+ case 99:
+ C198.m(context, binder);
+ return;
+ case 100:
+ C199.m(context, binder);
+ return;
+ case 101:
+ C200.m(context, binder);
+ return;
+ case 102:
+ C201.m(context, binder);
+ return;
+ case 103:
+ C202.m(context, binder);
+ C203.m(context, binder);
+ C204.m(context, binder);
+ C205.m(context, binder);
+ C206.m(context, binder);
+ return;
+ case 104:
+ C207.m(context, binder);
+ return;
+ case 105:
+ C208.m(context, binder);
+ return;
+ case 106:
+ C209.m(context, binder);
+ return;
+ case 107:
+ C210.m(context, binder);
+ return;
+ case 108:
+ C211.m(context, binder);
+ return;
+ case 109:
+ C212.m(context, binder);
+ return;
+ case 110:
+ C213.m(context, binder);
+ return;
+ case 111:
+ C214.m(context, binder);
+ return;
+ case 112:
+ C215.m(context, binder);
+ C216.m(context, binder);
+ C217.m(context, binder);
+ C218.m(context, binder);
+ C219.m(context, binder);
+ C220.m(context, binder);
+ C221.m(context, binder);
+ C222.m(context, binder);
+ C223.m(context, binder);
+ C224.m(context, binder);
+ C225.m(context, binder);
+ C226.m(context, binder);
+ return;
+ case 113:
+ C227.m(context, binder);
+ return;
+ case 114:
+ C228.m(context, binder);
+ return;
+ case 115:
+ C229.m(context, binder);
+ return;
+ case 116:
+ C230.m(context, binder);
+ return;
+ case 117:
+ C231.m(context, binder);
+ return;
+ case 118:
+ C232.m(context, binder);
+ return;
+ case 119:
+ C233.m(context, binder);
+ return;
+ case 120:
+ C234.m(context, binder);
+ return;
+ case 121:
+ C235.m(context, binder);
+ return;
+ case 122:
+ C236.m(context, binder);
+ return;
+ case 123:
+ C237.m(context, binder);
+ return;
+ case 124:
+ C238.m(context, binder);
+ return;
+ case 125:
+ C239.m(context, binder);
+ return;
+ case 126:
+ C240.m(context, binder);
+ return;
+ case 127:
+ C241.m(context, binder);
+ return;
+ case 128:
+ C242.m(context, binder);
+ return;
+ case 129:
+ C243.m(context, binder);
+ C244.m(context, binder);
+ C245.m(context, binder);
+ C246.m(context, binder);
+ C247.m(context, binder);
+ C248.m(context, binder);
+ C249.m(context, binder);
+ C250.m(context, binder);
+ C251.m(context, binder);
+ return;
+ case 130:
+ C252.m(context, binder);
+ return;
+ case 131:
+ C253.m(context, binder);
+ return;
+ case 132:
+ C254.m(context, binder);
+ return;
+ case 133:
+ C255.m(context, binder);
+ return;
+ case 134:
+ C256.m(context, binder);
+ return;
+ case 135:
+ C257.m(context, binder);
+ return;
+ case 136:
+ C258.m(context, binder);
+ return;
+ case 137:
+ C259.m(context, binder);
+ return;
+ case 138:
+ C260.m(context, binder);
+ return;
+ case 139:
+ C261.m(context, binder);
+ return;
+ case 140:
+ C262.m(context, binder);
+ return;
+ case 141:
+ C263.m(context, binder);
+ return;
+ case 142:
+ C264.m(context, binder);
+ return;
+ case 143:
+ C265.m(context, binder);
+ return;
+ case 144:
+ C266.m(context, binder);
+ C267.m(context, binder);
+ return;
+ case 145:
+ C268.m(context, binder);
+ return;
+ case 146:
+ C269.m(context, binder);
+ return;
+ case 147:
+ C270.m(context, binder);
+ return;
+ case 148:
+ C271.m(context, binder);
+ return;
+ case 149:
+ C272.m(context, binder);
+ return;
+ case 150:
+ C273.m(context, binder);
+ return;
+ case 151:
+ C274.m(context, binder);
+ return;
+ case 152:
+ C275.m(context, binder);
+ return;
+ case 153:
+ C276.m(context, binder);
+ return;
+ case 154:
+ C277.m(context, binder);
+ return;
+ case 155:
+ C278.m(context, binder);
+ return;
+ case 156:
+ C279.m(context, binder);
+ return;
+ case 157:
+ C280.m(context, binder);
+ return;
+ case 158:
+ C281.m(context, binder);
+ return;
+ case 159:
+ C282.m(context, binder);
+ return;
+ case 160:
+ C283.m(context, binder);
+ return;
+ case 161:
+ C284.m(context, binder);
+ return;
+ case 162:
+ C285.m(context, binder);
+ return;
+ case 163:
+ C286.m(context, binder);
+ return;
+ case 164:
+ C287.m(context, binder);
+ return;
+ case 165:
+ C288.m(context, binder);
+ return;
+ case 166:
+ C289.m(context, binder);
+ return;
+ case 167:
+ C290.m(context, binder);
+ return;
+ case 168:
+ C291.m(context, binder);
+ C292.m(context, binder);
+ C293.m(context, binder);
+ C294.m(context, binder);
+ C295.m(context, binder);
+ C296.m(context, binder);
+ C297.m(context, binder);
+ return;
+ case 169:
+ C298.m(context, binder);
+ return;
+ case 170:
+ C299.m(context, binder);
+ return;
+ case 171:
+ C300.m(context, binder);
+ return;
+ case 172:
+ C301.m(context, binder);
+ return;
+ case 173:
+ C302.m(context, binder);
+ return;
+ case 174:
+ C303.m(context, binder);
+ return;
+ case 175:
+ C304.m(context, binder);
+ return;
+ case 176:
+ C305.m(context, binder);
+ return;
+ case 177:
+ C306.m(context, binder);
+ return;
+ case 178:
+ C307.m(context, binder);
+ return;
+ case 179:
+ C308.m(context, binder);
+ return;
+ case 180:
+ C309.m(context, binder);
+ return;
+ case 181:
+ C310.m(context, binder);
+ return;
+ case 182:
+ C311.m(context, binder);
+ return;
+ case 183:
+ C312.m(context, binder);
+ return;
+ case 184:
+ C313.m(context, binder);
+ return;
+ case 185:
+ C314.m(context, binder);
+ return;
+ case 186:
+ C315.m(context, binder);
+ return;
+ case 187:
+ C316.m(context, binder);
+ return;
+ case 188:
+ C317.m(context, binder);
+ return;
+ case 189:
+ C318.m(context, binder);
+ return;
+ case 190:
+ C319.m(context, binder);
+ return;
+ case 191:
+ C320.m(context, binder);
+ return;
+ case 192:
+ C321.m(context, binder);
+ return;
+ case 193:
+ C322.m(context, binder);
+ return;
+ case 194:
+ C323.m(context, binder);
+ C324.m(context, binder);
+ C325.m(context, binder);
+ return;
+ case 195:
+ C326.m(context, binder);
+ return;
+ case 196:
+ C327.m(context, binder);
+ return;
+ case 197:
+ C328.m(context, binder);
+ return;
+ case 198:
+ C329.m(context, binder);
+ return;
+ case 199:
+ C330.m(context, binder);
+ return;
+ case 200:
+ C331.m(context, binder);
+ return;
+ case 201:
+ C332.m(context, binder);
+ return;
+ case 202:
+ C333.m(context, binder);
+ return;
+ case 203:
+ C334.m(context, binder);
+ C335.m(context, binder);
+ C336.m(context, binder);
+ C337.m(context, binder);
+ C338.m(context, binder);
+ C339.m(context, binder);
+ C340.m(context, binder);
+ C341.m(context, binder);
+ C342.m(context, binder);
+ C343.m(context, binder);
+ C344.m(context, binder);
+ C345.m(context, binder);
+ return;
+ case 204:
+ C346.m(context, binder);
+ return;
+ case 205:
+ C347.m(context, binder);
+ return;
+ case 206:
+ C348.m(context, binder);
+ return;
+ case 207:
+ C349.m(context, binder);
+ return;
+ case 208:
+ C350.m(context, binder);
+ return;
+ case 209:
+ C351.m(context, binder);
+ return;
+ case 210:
+ C352.m(context, binder);
+ C353.m(context, binder);
+ return;
+ case 211:
+ C354.m(context, binder);
+ return;
+ case 212:
+ C355.m(context, binder);
+ C356.m(context, binder);
+ C357.m(context, binder);
+ C358.m(context, binder);
+ C359.m(context, binder);
+ C360.m(context, binder);
+ C361.m(context, binder);
+ C362.m(context, binder);
+ C363.m(context, binder);
+ C364.m(context, binder);
+ C365.m(context, binder);
+ C366.m(context, binder);
+ C367.m(context, binder);
+ C368.m(context, binder);
+ C369.m(context, binder);
+ C370.m(context, binder);
+ C371.m(context, binder);
+ return;
+ case 213:
+ C372.m(context, binder);
+ return;
+ case 214:
+ C373.m(context, binder);
+ return;
+ case 215:
+ C374.m(context, binder);
+ return;
+ case 216:
+ C375.m(context, binder);
+ C376.m(context, binder);
+ C377.m(context, binder);
+ C378.m(context, binder);
+ C379.m(context, binder);
+ C380.m(context, binder);
+ C381.m(context, binder);
+ C382.m(context, binder);
+ return;
+ case 217:
+ C383.m(context, binder);
+ return;
+ case 218:
+ C384.m(context, binder);
+ return;
+ case 219:
+ C385.m(context, binder);
+ return;
+ case 220:
+ C386.m(context, binder);
+ return;
+ case 221:
+ C387.m(context, binder);
+ return;
+ case 222:
+ C388.m(context, binder);
+ return;
+ case 223:
+ C389.m(context, binder);
+ return;
+ case 224:
+ C390.m(context, binder);
+ return;
+ case 225:
+ C391.m(context, binder);
+ return;
+ case 226:
+ C392.m(context, binder);
+ return;
+ case 227:
+ C393.m(context, binder);
+ C394.m(context, binder);
+ return;
+ case 228:
+ C395.m(context, binder);
+ return;
+ case 229:
+ C396.m(context, binder);
+ return;
+ case 230:
+ C397.m(context, binder);
+ return;
+ case 231:
+ C398.m(context, binder);
+ return;
+ case 232:
+ C399.m(context, binder);
+ return;
+ case 233:
+ C400.m(context, binder);
+ return;
+ case 234:
+ C401.m(context, binder);
+ return;
+ case 235:
+ C402.m(context, binder);
+ return;
+ case 236:
+ C403.m(context, binder);
+ return;
+ case 237:
+ C404.m(context, binder);
+ return;
+ case 238:
+ C405.m(context, binder);
+ return;
+ case 239:
+ C406.m(context, binder);
+ return;
+ case 240:
+ C407.m(context, binder);
+ return;
+ case 241:
+ C408.m(context, binder);
+ return;
+ case 242:
+ C409.m(context, binder);
+ return;
+ case 243:
+ C410.m(context, binder);
+ return;
+ case 244:
+ C411.m(context, binder);
+ return;
+ case 245:
+ C412.m(context, binder);
+ return;
+ case 246:
+ C413.m(context, binder);
+ return;
+ case 247:
+ C414.m(context, binder);
+ return;
+ case 248:
+ C415.m(context, binder);
+ return;
+ case 249:
+ C416.m(context, binder);
+ return;
+ case 250:
+ C417.m(context, binder);
+ return;
+ case 251:
+ C418.m(context, binder);
+ return;
+ case 252:
+ C419.m(context, binder);
+ return;
+ case 253:
+ C420.m(context, binder);
+ return;
+ case 254:
+ C421.m(context, binder);
+ return;
+ case 255:
+ C422.m(context, binder);
+ return;
+ case 256:
+ C423.m(context, binder);
+ return;
+ case 257:
+ C424.m(context, binder);
+ return;
+ case 258:
+ C425.m(context, binder);
+ return;
+ case 259:
+ C426.m(context, binder);
+ return;
+ case 260:
+ C427.m(context, binder);
+ return;
+ case 261:
+ C428.m(context, binder);
+ return;
+ case 262:
+ C429.m(context, binder);
+ return;
+ case 263:
+ C430.m(context, binder);
+ return;
+ case 264:
+ C431.m(context, binder);
+ return;
+ case 265:
+ C432.m(context, binder);
+ return;
+ case 266:
+ C433.m(context, binder);
+ return;
+ case 267:
+ C434.m(context, binder);
+ C435.m(context, binder);
+ C436.m(context, binder);
+ C437.m(context, binder);
+ return;
+ case 268:
+ C438.m(context, binder);
+ return;
+ case 269:
+ C439.m(context, binder);
+ return;
+ case 270:
+ C440.m(context, binder);
+ return;
+ case 271:
+ C441.m(context, binder);
+ return;
+ case 272:
+ C442.m(context, binder);
+ return;
+ case 273:
+ C443.m(context, binder);
+ return;
+ case 274:
+ C444.m(context, binder);
+ return;
+ case 275:
+ C445.m(context, binder);
+ return;
+ case 276:
+ C446.m(context, binder);
+ return;
+ case 277:
+ C447.m(context, binder);
+ return;
+ case 278:
+ C448.m(context, binder);
+ return;
+ case 279:
+ C449.m(context, binder);
+ return;
+ case 280:
+ C450.m(context, binder);
+ return;
+ case 281:
+ C451.m(context, binder);
+ return;
+ case 282:
+ C452.m(context, binder);
+ return;
+ case 283:
+ C453.m(context, binder);
+ return;
+ case 284:
+ C454.m(context, binder);
+ return;
+ case 285:
+ C455.m(context, binder);
+ return;
+ case 286:
+ C456.m(context, binder);
+ return;
+ case 287:
+ C457.m(context, binder);
+ return;
+ case 288:
+ C458.m(context, binder);
+ return;
+ case 289:
+ C459.m(context, binder);
+ return;
+ case 290:
+ C460.m(context, binder);
+ return;
+ case 291:
+ C461.m(context, binder);
+ return;
+ case 292:
+ C462.m(context, binder);
+ return;
+ case 293:
+ C463.m(context, binder);
+ return;
+ case 294:
+ C464.m(context, binder);
+ return;
+ case 295:
+ C465.m(context, binder);
+ return;
+ case 296:
+ C466.m(context, binder);
+ return;
+ case 297:
+ C467.m(context, binder);
+ return;
+ case 298:
+ C468.m(context, binder);
+ return;
+ case 299:
+ C469.m(context, binder);
+ return;
+ case 300:
+ C470.m(context, binder);
+ return;
+ case 301:
+ C471.m(context, binder);
+ return;
+ case 302:
+ C472.m(context, binder);
+ return;
+ case 303:
+ C473.m(context, binder);
+ return;
+ case 304:
+ C474.m(context, binder);
+ return;
+ case 305:
+ C475.m(context, binder);
+ return;
+ case 306:
+ C476.m(context, binder);
+ return;
+ case 307:
+ C477.m(context, binder);
+ return;
+ case 308:
+ C478.m(context, binder);
+ return;
+ case 309:
+ C479.m(context, binder);
+ return;
+ case 310:
+ C480.m(context, binder);
+ return;
+ case 311:
+ C481.m(context, binder);
+ return;
+ case 312:
+ C482.m(context, binder);
+ return;
+ case 313:
+ C483.m(context, binder);
+ return;
+ case 314:
+ C484.m(context, binder);
+ return;
+ case 315:
+ C485.m(context, binder);
+ return;
+ case 316:
+ C486.m(context, binder);
+ return;
+ case 317:
+ C487.m(context, binder);
+ return;
+ case 318:
+ C488.m(context, binder);
+ return;
+ case 319:
+ C489.m(context, binder);
+ return;
+ case 320:
+ C490.m(context, binder);
+ return;
+ case 321:
+ C491.m(context, binder);
+ C492.m(context, binder);
+ C493.m(context, binder);
+ C494.m(context, binder);
+ C495.m(context, binder);
+ C496.m(context, binder);
+ C497.m(context, binder);
+ C498.m(context, binder);
+ return;
+ case 322:
+ C499.m(context, binder);
+ return;
+ case 323:
+ C500.m(context, binder);
+ return;
+ case 324:
+ C501.m(context, binder);
+ return;
+ case 325:
+ C502.m(context, binder);
+ return;
+ case 326:
+ C503.m(context, binder);
+ return;
+ case 327:
+ C504.m(context, binder);
+ return;
+ case 328:
+ C505.m(context, binder);
+ return;
+ case 329:
+ C506.m(context, binder);
+ return;
+ case 330:
+ C507.m(context, binder);
+ return;
+ case 331:
+ C508.m(context, binder);
+ return;
+ case 332:
+ C509.m(context, binder);
+ return;
+ case 333:
+ C510.m(context, binder);
+ return;
+ case 334:
+ C511.m(context, binder);
+ return;
+ case 335:
+ C512.m(context, binder);
+ return;
+ case 336:
+ C513.m(context, binder);
+ return;
+ case 337:
+ C514.m(context, binder);
+ return;
+ case 338:
+ C515.m(context, binder);
+ return;
+ case 339:
+ C516.m(context, binder);
+ return;
+ case 340:
+ C517.m(context, binder);
+ return;
+ case 341:
+ C518.m(context, binder);
+ return;
+ case 342:
+ C519.m(context, binder);
+ return;
+ case 343:
+ C520.m(context, binder);
+ return;
+ case 344:
+ C255.mFactory(context, binder);
+ return;
+ case 345:
+ C522.m(context, binder);
+ return;
+ case 346:
+ C523.m(context, binder);
+ return;
+ case 347:
+ C524.m(context, binder);
+ return;
+ case 348:
+ C525.m(context, binder);
+ return;
+ case 349:
+ C526.m(context, binder);
+ return;
+ case 350:
+ C527.m(context, binder);
+ return;
+ case 351:
+ C528.m(context, binder);
+ return;
+ case 352:
+ C529.m(context, binder);
+ return;
+ case 353:
+ C530.m(context, binder);
+ return;
+ case 354:
+ C531.m(context, binder);
+ return;
+ case 355:
+ C532.m(context, binder);
+ return;
+ case 356:
+ C533.m(context, binder);
+ return;
+ case 357:
+ C534.m(context, binder);
+ return;
+ case 358:
+ C535.m(context, binder);
+ return;
+ case 359:
+ C536.m(context, binder);
+ return;
+ case 360:
+ C537.m(context, binder);
+ return;
+ case 361:
+ C538.m(context, binder);
+ return;
+ case 362:
+ C539.m(context, binder);
+ return;
+ case 363:
+ C540.m(context, binder);
+ return;
+ case 364:
+ C541.m(context, binder);
+ return;
+ case 365:
+ C542.m(context, binder);
+ return;
+ case 366:
+ C543.m(context, binder);
+ return;
+ case 367:
+ C544.m(context, binder);
+ C545.m(context, binder);
+ return;
+ case 368:
+ C546.m(context, binder);
+ return;
+ case 369:
+ C547.m(context, binder);
+ return;
+ case 370:
+ C548.m(context, binder);
+ return;
+ case 371:
+ C549.m(context, binder);
+ return;
+ case 372:
+ C550.m(context, binder);
+ return;
+ case 373:
+ C551.m(context, binder);
+ return;
+ case 374:
+ C552.m(context, binder);
+ return;
+ case 375:
+ C553.m(context, binder);
+ return;
+ case 376:
+ C554.m(context, binder);
+ return;
+ case 377:
+ C555.m(context, binder);
+ return;
+ case 378:
+ C556.m(context, binder);
+ return;
+ case 379:
+ C557.m(context, binder);
+ return;
+ case 380:
+ C5.mImpl(context, binder);
+ return;
+ case 381:
+ C559.m(context, binder);
+ return;
+ case 382:
+ C560.m(context, binder);
+ return;
+ case 383:
+ C561.m(context, binder);
+ return;
+ case 384:
+ C562.m(context, binder);
+ return;
+ case 385:
+ C563.m(context, binder);
+ return;
+ case 386:
+ C564.m(context, binder);
+ return;
+ case 387:
+ C565.m(context, binder);
+ return;
+ case 388:
+ C566.m(context, binder);
+ return;
+ case 389:
+ C567.m(context, binder);
+ return;
+ case 390:
+ C568.m(context, binder);
+ return;
+ case 391:
+ C569.m(context, binder);
+ return;
+ case 392:
+ C570.m(context, binder);
+ return;
+ case 393:
+ C571.m(context, binder);
+ return;
+ case 394:
+ C572.m(context, binder);
+ return;
+ case 395:
+ C573.m(context, binder);
+ return;
+ case 396:
+ C574.m(context, binder);
+ return;
+ case 397:
+ C575.m(context, binder);
+ return;
+ case 398:
+ C576.m(context, binder);
+ return;
+ case 399:
+ C577.m(context, binder);
+ return;
+ case 400:
+ C578.m(context, binder);
+ return;
+ case 401:
+ C579.m(context, binder);
+ return;
+ case 402:
+ C580.m(context, binder);
+ return;
+ case 403:
+ C581.m(context, binder);
+ return;
+ case 404:
+ C582.m(context, binder);
+ return;
+ case 405:
+ C583.m(context, binder);
+ return;
+ case 406:
+ C584.m(context, binder);
+ C585.m(context, binder);
+ C586.m(context, binder);
+ C587.m(context, binder);
+ C588.m(context, binder);
+ C589.m(context, binder);
+ C590.m(context, binder);
+ C591.m(context, binder);
+ C592.m(context, binder);
+ C593.m(context, binder);
+ C594.m(context, binder);
+ return;
+ case 407:
+ C595.m(context, binder);
+ return;
+ case 408:
+ C596.m(context, binder);
+ return;
+ case 409:
+ C597.m(context, binder);
+ C598.m(context, binder);
+ return;
+ case 410:
+ C599.m(context, binder);
+ return;
+ case 411:
+ C600.m(context, binder);
+ return;
+ case 412:
+ C601.m(context, binder);
+ return;
+ case 413:
+ C602.m(context, binder);
+ return;
+ case 414:
+ C603.m(context, binder);
+ return;
+ case 415:
+ C604.m(context, binder);
+ return;
+ case 416:
+ C605.m(context, binder);
+ return;
+ case 417:
+ C606.m(context, binder);
+ return;
+ case 418:
+ C607.m(context, binder);
+ return;
+ case 419:
+ C608.m(context, binder);
+ return;
+ case 420:
+ C609.m(context, binder);
+ return;
+ case 421:
+ C610.m(context, binder);
+ return;
+ case 422:
+ C611.m(context, binder);
+ return;
+ case 423:
+ C612.m(context, binder);
+ return;
+ case 424:
+ C613.m(context, binder);
+ return;
+ case 425:
+ C614.m(context, binder);
+ C615.m(context, binder);
+ C616.m(context, binder);
+ return;
+ case 426:
+ C617.m(context, binder);
+ return;
+ case 427:
+ C618.m(context, binder);
+ return;
+ case 428:
+ C619.m(context, binder);
+ return;
+ case 429:
+ C620.m(context, binder);
+ return;
+ case 430:
+ C621.m(context, binder);
+ return;
+ case 431:
+ C622.m(context, binder);
+ return;
+ case 432:
+ C623.m(context, binder);
+ return;
+ case 433:
+ C624.m(context, binder);
+ return;
+ case 434:
+ C625.m(context, binder);
+ return;
+ case 435:
+ C626.m(context, binder);
+ return;
+ case 436:
+ C627.m(context, binder);
+ return;
+ case 437:
+ C628.m(context, binder);
+ return;
+ case 438:
+ C629.m(context, binder);
+ return;
+ case 439:
+ C630.m(context, binder);
+ return;
+ case 440:
+ C631.m(context, binder);
+ return;
+ case 441:
+ C632.m(context, binder);
+ return;
+ case 442:
+ C633.m(context, binder);
+ return;
+ case 443:
+ C634.m(context, binder);
+ return;
+ case 444:
+ C635.m(context, binder);
+ return;
+ case 445:
+ C636.m(context, binder);
+ return;
+ case 446:
+ C637.m(context, binder);
+ return;
+ case 447:
+ C638.m(context, binder);
+ return;
+ case 448:
+ C639.m(context, binder);
+ return;
+ case 449:
+ C640.m(context, binder);
+ return;
+ case 450:
+ C641.m(context, binder);
+ return;
+ case 451:
+ C642.m(context, binder);
+ return;
+ case 452:
+ C643.m(context, binder);
+ return;
+ case 453:
+ C644.m(context, binder);
+ return;
+ case 454:
+ C645.m(context, binder);
+ return;
+ case 455:
+ C646.m(context, binder);
+ return;
+ case 456:
+ C647.m(context, binder);
+ return;
+ case 457:
+ C648.m(context, binder);
+ return;
+ case 458:
+ C649.m(context, binder);
+ return;
+ case 459:
+ C650.m(context, binder);
+ return;
+ case 460:
+ C651.m(context, binder);
+ return;
+ case 461:
+ C652.m(context, binder);
+ return;
+ case 462:
+ C653.m(context, binder);
+ return;
+ case 463:
+ C654.m(context, binder);
+ return;
+ case 464:
+ C655.m(context, binder);
+ return;
+ case 465:
+ C656.m(context, binder);
+ return;
+ case 466:
+ C657.m(context, binder);
+ return;
+ case 467:
+ C658.m(context, binder);
+ return;
+ case 468:
+ C659.m(context, binder);
+ return;
+ case 469:
+ C660.m(context, binder);
+ return;
+ case 470:
+ C661.m(context, binder);
+ return;
+ case 471:
+ C90.mReport_Factory(context, binder);
+ return;
+ case 472:
+ C663.m(context, binder);
+ return;
+ case 473:
+ C664.m(context, binder);
+ return;
+ case 474:
+ C665.m(context, binder);
+ return;
+ case 475:
+ C666.m(context, binder);
+ return;
+ case 476:
+ C667.m(context, binder);
+ return;
+ case 477:
+ C668.m(context, binder);
+ return;
+ case 478:
+ C105.m_InMemoryScanner(context, binder);
+ return;
+ case 479:
+ C670.m(context, binder);
+ C671.m(context, binder);
+ return;
+ case 480:
+ C672.m(context, binder);
+ return;
+ case 481:
+ C673.m(context, binder);
+ return;
+ case 482:
+ C674.m(context, binder);
+ return;
+ case 483:
+ C675.m(context, binder);
+ return;
+ case 484:
+ C676.m(context, binder);
+ return;
+ case 485:
+ C677.m(context, binder);
+ return;
+ case 486:
+ C678.m(context, binder);
+ return;
+ case 487:
+ C679.m(context, binder);
+ C680.m(context, binder);
+ C681.m(context, binder);
+ C682.m(context, binder);
+ C683.m(context, binder);
+ C684.m(context, binder);
+ C685.m(context, binder);
+ return;
+ case 488:
+ C686.m(context, binder);
+ return;
+ case 489:
+ C687.m(context, binder);
+ return;
+ case 490:
+ C688.m(context, binder);
+ return;
+ case 491:
+ C689.m(context, binder);
+ return;
+ case 492:
+ C690.m(context, binder);
+ return;
+ case 493:
+ C691.m(context, binder);
+ C692.m(context, binder);
+ C693.m(context, binder);
+ C694.m(context, binder);
+ C695.m(context, binder);
+ C696.m(context, binder);
+ C697.m(context, binder);
+ C698.m(context, binder);
+ C699.m(context, binder);
+ C700.m(context, binder);
+ C701.m(context, binder);
+ C702.m(context, binder);
+ C703.m(context, binder);
+ C704.m(context, binder);
+ C705.m(context, binder);
+ C706.m(context, binder);
+ C707.m(context, binder);
+ C708.m(context, binder);
+ C709.m(context, binder);
+ C710.m(context, binder);
+ C711.m(context, binder);
+ C712.m(context, binder);
+ C713.m(context, binder);
+ C714.m(context, binder);
+ C715.m(context, binder);
+ C716.m(context, binder);
+ C717.m(context, binder);
+ C718.m(context, binder);
+ C719.m(context, binder);
+ C720.m(context, binder);
+ return;
+ case 494:
+ C721.m(context, binder);
+ C722.m(context, binder);
+ C723.m(context, binder);
+ return;
+ case 495:
+ C724.m(context, binder);
+ return;
+ case 496:
+ C725.m(context, binder);
+ return;
+ case 497:
+ C726.m(context, binder);
+ return;
+ case 498:
+ C727.m(context, binder);
+ return;
+ case 499:
+ C728.m(context, binder);
+ return;
+ case 500:
+ C729.m(context, binder);
+ return;
+ case 501:
+ C730.m(context, binder);
+ return;
+ case 502:
+ C731.m(context, binder);
+ return;
+ case 503:
+ C732.m(context, binder);
+ return;
+ case 504:
+ C733.m(context, binder);
+ return;
+ case 505:
+ C734.m(context, binder);
+ return;
+ case 506:
+ C735.m(context, binder);
+ return;
+ case 507:
+ C736.m(context, binder);
+ return;
+ case 508:
+ C737.m(context, binder);
+ return;
+ case 509:
+ C738.m(context, binder);
+ return;
+ case 510:
+ C739.m(context, binder);
+ return;
+ case 511:
+ C740.m(context, binder);
+ return;
+ case 512:
+ C741.m(context, binder);
+ return;
+ case 513:
+ C742.m(context, binder);
+ return;
+ case 514:
+ C743.m(context, binder);
+ return;
+ case 515:
+ C744.m(context, binder);
+ return;
+ case 516:
+ C745.m(context, binder);
+ return;
+ case 517:
+ C746.m(context, binder);
+ return;
+ case 518:
+ C747.m(context, binder);
+ return;
+ case 519:
+ C748.m(context, binder);
+ return;
+ case 520:
+ C749.m(context, binder);
+ return;
+ case 521:
+ C750.m(context, binder);
+ C751.m(context, binder);
+ C752.m(context, binder);
+ C753.m(context, binder);
+ C754.m(context, binder);
+ C755.m(context, binder);
+ C756.m(context, binder);
+ C757.m(context, binder);
+ C758.m(context, binder);
+ C759.m(context, binder);
+ C760.m(context, binder);
+ C761.m(context, binder);
+ return;
+ case 522:
+ C762.m(context, binder);
+ return;
+ case 523:
+ C763.m(context, binder);
+ return;
+ case 524:
+ C764.m(context, binder);
+ return;
+ case 525:
+ C765.m(context, binder);
+ return;
+ case 526:
+ C766.m(context, binder);
+ return;
+ case 527:
+ C767.m(context, binder);
+ return;
+ case 528:
+ C768.m(context, binder);
+ return;
+ case 529:
+ C769.m(context, binder);
+ return;
+ case 530:
+ C770.m(context, binder);
+ return;
+ case 531:
+ C771.m(context, binder);
+ return;
+ case 532:
+ C772.m(context, binder);
+ return;
+ case 533:
+ C773.m(context, binder);
+ return;
+ case 534:
+ C774.m(context, binder);
+ return;
+ case 535:
+ C775.m(context, binder);
+ return;
+ case 536:
+ C776.m(context, binder);
+ return;
+ case 537:
+ C777.m(context, binder);
+ return;
+ case 538:
+ C778.m(context, binder);
+ return;
+ case 539:
+ C779.m(context, binder);
+ return;
+ case 540:
+ C780.m(context, binder);
+ return;
+ case 541:
+ C781.m(context, binder);
+ return;
+ case 542:
+ C90.mApi(context, binder);
+ return;
+ case 543:
+ C783.m(context, binder);
+ return;
+ case 544:
+ C784.m(context, binder);
+ return;
+ case 545:
+ C785.m(context, binder);
+ return;
+ case 546:
+ C786.m(context, binder);
+ return;
+ case 547:
+ C787.m(context, binder);
+ return;
+ case 548:
+ C788.m(context, binder);
+ return;
+ case 549:
+ C789.m(context, binder);
+ return;
+ case 550:
+ C790.m(context, binder);
+ return;
+ case 551:
+ C791.m(context, binder);
+ return;
+ case 552:
+ C792.m(context, binder);
+ return;
+ case 553:
+ C793.m(context, binder);
+ return;
+ case 554:
+ C794.m(context, binder);
+ return;
+ case 555:
+ C795.m(context, binder);
+ C796.m(context, binder);
+ return;
+ case 556:
+ C797.m(context, binder);
+ return;
+ case 557:
+ C798.m(context, binder);
+ return;
+ case 558:
+ C799.m(context, binder);
+ return;
+ case 559:
+ C800.m(context, binder);
+ return;
+ case 560:
+ C801.m(context, binder);
+ return;
+ case 561:
+ C802.m(context, binder);
+ return;
+ case 562:
+ C803.m(context, binder);
+ C804.m(context, binder);
+ C805.m(context, binder);
+ C806.m(context, binder);
+ return;
+ case 563:
+ C807.m(context, binder);
+ return;
+ case 564:
+ C808.m(context, binder);
+ return;
+ case 565:
+ C809.m(context, binder);
+ return;
+ case 566:
+ C810.m(context, binder);
+ return;
+ case 567:
+ C811.m(context, binder);
+ return;
+ case 568:
+ C812.m(context, binder);
+ return;
+ case 569:
+ C813.m(context, binder);
+ C814.m(context, binder);
+ C815.m(context, binder);
+ C816.m(context, binder);
+ C817.m(context, binder);
+ C818.m(context, binder);
+ C819.m(context, binder);
+ C820.m(context, binder);
+ C821.m(context, binder);
+ return;
+ case 570:
+ C822.m(context, binder);
+ C823.m(context, binder);
+ return;
+ case 571:
+ C824.m(context, binder);
+ return;
+ case 572:
+ C825.m(context, binder);
+ return;
+ case 573:
+ C826.m(context, binder);
+ return;
+ case 574:
+ C827.m(context, binder);
+ return;
+ case 575:
+ C828.m(context, binder);
+ return;
+ case 576:
+ C829.m(context, binder);
+ return;
+ case 577:
+ C830.m(context, binder);
+ return;
+ case 578:
+ C831.m(context, binder);
+ return;
+ case 579:
+ C832.m(context, binder);
+ return;
+ case 580:
+ C833.m(context, binder);
+ return;
+ case 581:
+ C834.m(context, binder);
+ return;
+ case 582:
+ C835.m(context, binder);
+ return;
+ case 583:
+ C836.m(context, binder);
+ return;
+ case 584:
+ C837.m(context, binder);
+ return;
+ case 585:
+ C838.m(context, binder);
+ return;
+ case 586:
+ C105.m_Scanner(context, binder);
+ C840.m(context, binder);
+ return;
+ case 587:
+ C94.mImpl(context, binder);
+ return;
+ case 588:
+ C842.m(context, binder);
+ C843.m(context, binder);
+ C844.m(context, binder);
+ C845.m(context, binder);
+ return;
+ case 589:
+ C846.m(context, binder);
+ return;
+ case 590:
+ C847.m(context, binder);
+ return;
+ case 591:
+ C848.m(context, binder);
+ return;
+ case 592:
+ C849.m(context, binder);
+ return;
+ case 593:
+ C850.m(context, binder);
+ return;
+ case 594:
+ C851.m(context, binder);
+ return;
+ case 595:
+ C852.m(context, binder);
+ return;
+ case 596:
+ C853.m(context, binder);
+ return;
+ case 597:
+ C854.m(context, binder);
+ return;
+ case 598:
+ C855.m(context, binder);
+ return;
+ case 599:
+ C856.m(context, binder);
+ return;
+ case 600:
+ C857.m(context, binder);
+ return;
+ case 601:
+ C858.m(context, binder);
+ return;
+ case 602:
+ C859.m(context, binder);
+ return;
+ case 603:
+ C860.m(context, binder);
+ return;
+ case 604:
+ C861.m(context, binder);
+ return;
+ case 605:
+ C862.m(context, binder);
+ return;
+ case 606:
+ C863.m(context, binder);
+ return;
+ case 607:
+ C864.m(context, binder);
+ return;
+ case 608:
+ C865.m(context, binder);
+ return;
+ case 609:
+ C866.m(context, binder);
+ return;
+ case 610:
+ C867.m(context, binder);
+ return;
+ case 611:
+ C868.m(context, binder);
+ return;
+ case 612:
+ C869.m(context, binder);
+ return;
+ case 613:
+ C870.m(context, binder);
+ return;
+ case 614:
+ C871.m(context, binder);
+ return;
+ case 615:
+ C872.m(context, binder);
+ return;
+ case 616:
+ C873.m(context, binder);
+ return;
+ case 617:
+ C874.m(context, binder);
+ return;
+ case 618:
+ C875.m(context, binder);
+ return;
+ case 619:
+ C876.m(context, binder);
+ return;
+ case 620:
+ C877.m(context, binder);
+ return;
+ case 621:
+ C878.m(context, binder);
+ return;
+ case 622:
+ C879.m(context, binder);
+ C880.m(context, binder);
+ return;
+ case 623:
+ C881.m(context, binder);
+ return;
+ case 624:
+ C882.m(context, binder);
+ return;
+ case 625:
+ C883.m(context, binder);
+ return;
+ case 626:
+ C884.m(context, binder);
+ C885.m(context, binder);
+ C886.m(context, binder);
+ C887.m(context, binder);
+ C888.m(context, binder);
+ return;
+ case 627:
+ C889.m(context, binder);
+ return;
+ case 628:
+ C890.m(context, binder);
+ return;
+ case 629:
+ C891.m(context, binder);
+ return;
+ case 630:
+ C892.m(context, binder);
+ return;
+ case 631:
+ C893.m(context, binder);
+ return;
+ case 632:
+ C894.m(context, binder);
+ return;
+ case 633:
+ C895.m(context, binder);
+ return;
+ case 634:
+ C896.m(context, binder);
+ return;
+ case 635:
+ C897.m(context, binder);
+ return;
+ case 636:
+ C898.m(context, binder);
+ return;
+ case 637:
+ C899.m(context, binder);
+ return;
+ case 638:
+ C900.m(context, binder);
+ return;
+ case 639:
+ C901.m(context, binder);
+ return;
+ case 640:
+ C870.mImpl(context, binder);
+ return;
+ case 641:
+ C903.m(context, binder);
+ return;
+ case 642:
+ C904.m(context, binder);
+ return;
+ case 643:
+ C905.m(context, binder);
+ return;
+ case 644:
+ C906.m(context, binder);
+ C907.m(context, binder);
+ C908.m(context, binder);
+ C909.m(context, binder);
+ C910.m(context, binder);
+ return;
+ case 645:
+ C911.m(context, binder);
+ return;
+ case 646:
+ C912.m(context, binder);
+ C913.m(context, binder);
+ return;
+ case 647:
+ C914.m(context, binder);
+ return;
+ case 648:
+ C915.m(context, binder);
+ return;
+ case 649:
+ C916.m(context, binder);
+ return;
+ case 650:
+ C917.m(context, binder);
+ C918.m(context, binder);
+ return;
+ case 651:
+ C919.m(context, binder);
+ return;
+ case 652:
+ C920.m(context, binder);
+ return;
+ case 653:
+ C921.m(context, binder);
+ return;
+ case 654:
+ C922.m(context, binder);
+ return;
+ case 655:
+ C923.m(context, binder);
+ C924.m(context, binder);
+ C925.m(context, binder);
+ C926.m(context, binder);
+ C927.m(context, binder);
+ C928.m(context, binder);
+ C929.m(context, binder);
+ C930.m(context, binder);
+ C931.m(context, binder);
+ C932.m(context, binder);
+ C933.m(context, binder);
+ C934.m(context, binder);
+ C935.m(context, binder);
+ return;
+ case 656:
+ C936.m(context, binder);
+ return;
+ case 657:
+ C937.m(context, binder);
+ return;
+ case 658:
+ C938.m(context, binder);
+ return;
+ case 659:
+ C939.m(context, binder);
+ return;
+ case 660:
+ C940.m(context, binder);
+ return;
+ case 661:
+ C941.m(context, binder);
+ return;
+ case 662:
+ C942.m(context, binder);
+ return;
+ case 663:
+ C943.m(context, binder);
+ return;
+ case 664:
+ C944.m(context, binder);
+ return;
+ case 665:
+ C945.m(context, binder);
+ return;
+ case 666:
+ C946.m(context, binder);
+ return;
+ case 667:
+ C947.m(context, binder);
+ return;
+ case 668:
+ C948.m(context, binder);
+ return;
+ case 669:
+ C949.m(context, binder);
+ return;
+ case 670:
+ C950.m(context, binder);
+ return;
+ case 671:
+ C951.m(context, binder);
+ return;
+ case 672:
+ C952.m(context, binder);
+ C953.m(context, binder);
+ C954.m(context, binder);
+ C955.m(context, binder);
+ return;
+ case 673:
+ C956.m(context, binder);
+ return;
+ case 674:
+ C957.m(context, binder);
+ C958.m(context, binder);
+ return;
+ case 675:
+ C959.m(context, binder);
+ return;
+ case 676:
+ C960.m(context, binder);
+ C961.m(context, binder);
+ return;
+ case 677:
+ C962.m(context, binder);
+ return;
+ case 678:
+ C963.m(context, binder);
+ return;
+ case 679:
+ C964.m(context, binder);
+ return;
+ case 680:
+ C965.m(context, binder);
+ return;
+ case 681:
+ C966.m(context, binder);
+ return;
+ case 682:
+ C967.m(context, binder);
+ return;
+ case 683:
+ C968.m(context, binder);
+ return;
+ case 684:
+ C969.m(context, binder);
+ return;
+ case 685:
+ C970.m(context, binder);
+ return;
+ case 686:
+ C971.m(context, binder);
+ C972.m(context, binder);
+ return;
+ case 687:
+ C973.m(context, binder);
+ return;
+ case 688:
+ C974.m(context, binder);
+ return;
+ case 689:
+ C975.m(context, binder);
+ return;
+ case 690:
+ C976.m(context, binder);
+ return;
+ case 691:
+ C977.m(context, binder);
+ return;
+ case 692:
+ C978.m(context, binder);
+ return;
+ case 693:
+ C979.m(context, binder);
+ return;
+ case 694:
+ C980.m(context, binder);
+ return;
+ case 695:
+ C981.m(context, binder);
+ return;
+ case 696:
+ C982.m(context, binder);
+ return;
+ case 697:
+ C983.m(context, binder);
+ C984.m(context, binder);
+ C985.m(context, binder);
+ return;
+ case 698:
+ C986.m(context, binder);
+ return;
+ case 699:
+ C987.m(context, binder);
+ return;
+ case 700:
+ C988.m(context, binder);
+ return;
+ case 701:
+ C989.m(context, binder);
+ return;
+ case 702:
+ C990.m(context, binder);
+ return;
+ case 703:
+ C991.m(context, binder);
+ return;
+ case 704:
+ C992.m(context, binder);
+ return;
+ case 705:
+ C993.m(context, binder);
+ return;
+ case 706:
+ C994.m(context, binder);
+ return;
+ case 707:
+ C995.m(context, binder);
+ return;
+ case 708:
+ C996.m(context, binder);
+ return;
+ case 709:
+ C997.m(context, binder);
+ return;
+ case 710:
+ C998.m(context, binder);
+ return;
+ case 711:
+ C999.m(context, binder);
+ return;
+ case 712:
+ C1000.m(context, binder);
+ return;
+ case 713:
+ C1001.m(context, binder);
+ return;
+ case 714:
+ C1002.m(context, binder);
+ return;
+ case 715:
+ C1003.m(context, binder);
+ return;
+ case 716:
+ C1004.m(context, binder);
+ return;
+ case 717:
+ C1005.m(context, binder);
+ return;
+ case 718:
+ C1006.m(context, binder);
+ return;
+ case 719:
+ C1007.m(context, binder);
+ return;
+ case 720:
+ C1008.m(context, binder);
+ return;
+ case 721:
+ C1009.m(context, binder);
+ return;
+ case 722:
+ C1010.m(context, binder);
+ return;
+ case 723:
+ C1011.m(context, binder);
+ return;
+ case 724:
+ C1012.m(context, binder);
+ return;
+ case 725:
+ C1013.m(context, binder);
+ return;
+ case 726:
+ C1014.m(context, binder);
+ return;
+ case 727:
+ C1015.m(context, binder);
+ return;
+ case 728:
+ C577.mDebug(context, binder);
+ return;
+ case 729:
+ C1017.m(context, binder);
+ return;
+ case 730:
+ C1018.m(context, binder);
+ return;
+ case 731:
+ C1019.m(context, binder);
+ return;
+ case 732:
+ C1020.m(context, binder);
+ return;
+ case 733:
+ C1021.m(context, binder);
+ return;
+ case 734:
+ C1022.m(context, binder);
+ return;
+ case 735:
+ C1023.m(context, binder);
+ return;
+ case 736:
+ C1024.m(context, binder);
+ return;
+ case 737:
+ C1025.m(context, binder);
+ return;
+ case 738:
+ C1026.m(context, binder);
+ return;
+ case 739:
+ C1027.m(context, binder);
+ return;
+ case 740:
+ C1028.m(context, binder);
+ return;
+ case 741:
+ C1029.m(context, binder);
+ return;
+ case 742:
+ C1030.m(context, binder);
+ return;
+ case 743:
+ C1031.m(context, binder);
+ return;
+ case 744:
+ C1032.m(context, binder);
+ return;
+ case 745:
+ C1033.m(context, binder);
+ return;
+ case 746:
+ C1034.m(context, binder);
+ return;
+ case 747:
+ C1035.m(context, binder);
+ return;
+ case 748:
+ C1036.m(context, binder);
+ C1037.m(context, binder);
+ return;
+ case 749:
+ C1038.m(context, binder);
+ C1039.m(context, binder);
+ C1040.m(context, binder);
+ C1041.m(context, binder);
+ return;
+ case 750:
+ C1042.m(context, binder);
+ return;
+ case 751:
+ C1043.m(context, binder);
+ return;
+ case 752:
+ C1044.m(context, binder);
+ return;
+ case 753:
+ C1045.m(context, binder);
+ return;
+ case 754:
+ C1046.m(context, binder);
+ return;
+ case 755:
+ C1047.m(context, binder);
+ return;
+ case 756:
+ C848.mMap(context, binder);
+ return;
+ case 757:
+ C1049.m(context, binder);
+ return;
+ case 758:
+ C1050.m(context, binder);
+ return;
+ case 759:
+ C1051.m(context, binder);
+ return;
+ case 760:
+ C1052.m(context, binder);
+ return;
+ case 761:
+ C1053.m(context, binder);
+ return;
+ case 762:
+ C1054.m(context, binder);
+ return;
+ case 763:
+ C1055.m(context, binder);
+ return;
+ case 764:
+ C1056.m(context, binder);
+ return;
+ case 765:
+ C1057.m(context, binder);
+ return;
+ case 766:
+ C1058.m(context, binder);
+ return;
+ case 767:
+ C1059.m(context, binder);
+ return;
+ case 768:
+ C1060.m(context, binder);
+ return;
+ case 769:
+ C1061.m(context, binder);
+ return;
+ case 770:
+ C1062.m(context, binder);
+ return;
+ case 771:
+ C1063.m(context, binder);
+ return;
+ case 772:
+ C1064.m(context, binder);
+ return;
+ case 773:
+ C1065.m(context, binder);
+ return;
+ case 774:
+ C1066.m(context, binder);
+ return;
+ case 775:
+ C1067.m(context, binder);
+ return;
+ case 776:
+ C1068.m(context, binder);
+ return;
+ case 777:
+ C1069.m(context, binder);
+ return;
+ case 778:
+ C1070.m(context, binder);
+ return;
+ case 779:
+ C1071.m(context, binder);
+ return;
+ case 780:
+ C1072.m(context, binder);
+ return;
+ case 781:
+ C1073.m(context, binder);
+ C1074.m(context, binder);
+ C1075.m(context, binder);
+ C1076.m(context, binder);
+ C1077.m(context, binder);
+ C1078.m(context, binder);
+ C1079.m(context, binder);
+ C1080.m(context, binder);
+ return;
+ case 782:
+ C1081.m(context, binder);
+ return;
+ case 783:
+ C1082.m(context, binder);
+ return;
+ case 784:
+ C1083.m(context, binder);
+ return;
+ case 785:
+ C1084.m(context, binder);
+ return;
+ case 786:
+ C1085.m(context, binder);
+ return;
+ case 787:
+ C1086.m(context, binder);
+ return;
+ case 788:
+ C1087.m(context, binder);
+ return;
+ case 789:
+ C1088.m(context, binder);
+ return;
+ case 790:
+ C944.mManager(context, binder);
+ return;
+ case 791:
+ C1090.m(context, binder);
+ return;
+ case 792:
+ C1091.m(context, binder);
+ return;
+ case 793:
+ C1092.m(context, binder);
+ return;
+ case 794:
+ C1093.m(context, binder);
+ return;
+ case 795:
+ C1094.m(context, binder);
+ return;
+ case 796:
+ C1095.m(context, binder);
+ return;
+ case 797:
+ C1096.m(context, binder);
+ return;
+ case 798:
+ C1097.m(context, binder);
+ return;
+ case 799:
+ C1098.m(context, binder);
+ return;
+ case 800:
+ C1099.m(context, binder);
+ return;
+ case 801:
+ C1100.m(context, binder);
+ return;
+ case 802:
+ C1101.m(context, binder);
+ C1102.m(context, binder);
+ C1103.m(context, binder);
+ C1104.m(context, binder);
+ C1105.m(context, binder);
+ C1106.m(context, binder);
+ C1107.m(context, binder);
+ C1108.m(context, binder);
+ return;
+ case 803:
+ C1109.m(context, binder);
+ return;
+ case 804:
+ C1110.m(context, binder);
+ return;
+ case 805:
+ C1111.m(context, binder);
+ return;
+ case 806:
+ C1112.m(context, binder);
+ return;
+ case 807:
+ C1113.m(context, binder);
+ return;
+ case 808:
+ C1114.m(context, binder);
+ C1115.m(context, binder);
+ C1116.m(context, binder);
+ C1117.m(context, binder);
+ return;
+ case 809:
+ C1118.m(context, binder);
+ return;
+ case 810:
+ C1119.m(context, binder);
+ C1120.m(context, binder);
+ C1121.m(context, binder);
+ return;
+ case 811:
+ C1122.m(context, binder);
+ return;
+ case 812:
+ C1123.m(context, binder);
+ return;
+ case 813:
+ C1124.m(context, binder);
+ return;
+ case 814:
+ C1125.m(context, binder);
+ return;
+ case 815:
+ C1126.m(context, binder);
+ return;
+ case 816:
+ C1127.m(context, binder);
+ return;
+ case 817:
+ C1128.m(context, binder);
+ return;
+ case 818:
+ C1129.m(context, binder);
+ return;
+ case 819:
+ C1130.m(context, binder);
+ return;
+ case 820:
+ C1131.m(context, binder);
+ return;
+ case 821:
+ C1132.m(context, binder);
+ return;
+ case 822:
+ C1133.m(context, binder);
+ return;
+ case 823:
+ C1134.m(context, binder);
+ return;
+ case 824:
+ C1135.m(context, binder);
+ return;
+ case 825:
+ C1136.m(context, binder);
+ return;
+ case 826:
+ C1137.m(context, binder);
+ C1138.m(context, binder);
+ C1139.m(context, binder);
+ C1140.m(context, binder);
+ C1141.m(context, binder);
+ C1142.m(context, binder);
+ C1143.m(context, binder);
+ C1144.m(context, binder);
+ C1145.m(context, binder);
+ C1146.m(context, binder);
+ C1147.m(context, binder);
+ C1148.m(context, binder);
+ C1149.m(context, binder);
+ C1150.m(context, binder);
+ C1151.m(context, binder);
+ C1152.m(context, binder);
+ C1153.m(context, binder);
+ C1154.m(context, binder);
+ C1155.m(context, binder);
+ C1156.m(context, binder);
+ C1157.m(context, binder);
+ C1158.m(context, binder);
+ return;
+ case 827:
+ C1159.m(context, binder);
+ return;
+ case 828:
+ C457.mMap(context, binder);
+ return;
+ case 829:
+ C1161.m(context, binder);
+ C1162.m(context, binder);
+ C1163.m(context, binder);
+ C1164.m(context, binder);
+ C1165.m(context, binder);
+ C1166.m(context, binder);
+ C1167.m(context, binder);
+ C1168.m(context, binder);
+ C1169.m(context, binder);
+ C1170.m(context, binder);
+ C1171.m(context, binder);
+ C1172.m(context, binder);
+ C1173.m(context, binder);
+ C1174.m(context, binder);
+ C1175.m(context, binder);
+ C1176.m(context, binder);
+ C1177.m(context, binder);
+ C1178.m(context, binder);
+ C1179.m(context, binder);
+ C1180.m(context, binder);
+ C1181.m(context, binder);
+ C1181a.m(context, binder);
+ C1181b.m(context, binder);
+ return;
+ case 830:
+ C1184.m(context, binder);
+ return;
+ case 831:
+ C1185.m(context, binder);
+ return;
+ case 832:
+ C1186.m(context, binder);
+ return;
+ case 833:
+ C1187.m(context, binder);
+ return;
+ case 834:
+ C1188.m(context, binder);
+ return;
+ case 835:
+ C1189.m(context, binder);
+ return;
+ case 836:
+ C1190.m(context, binder);
+ return;
+ case 837:
+ C1191.m(context, binder);
+ return;
+ case 838:
+ C1192.m(context, binder);
+ return;
+ case 839:
+ C1193.m(context, binder);
+ return;
+ case 840:
+ C1194.m(context, binder);
+ return;
+ case 841:
+ C1195.m(context, binder);
+ return;
+ case 842:
+ C1196.m(context, binder);
+ return;
+ case 843:
+ C1197.m(context, binder);
+ return;
+ case 844:
+ C1198.m(context, binder);
+ return;
+ case 845:
+ C1199.m(context, binder);
+ return;
+ case 846:
+ C1200.m(context, binder);
+ return;
+ case 847:
+ C1201.m(context, binder);
+ return;
+ case 848:
+ C1202.m(context, binder);
+ return;
+ case 849:
+ C1203.m(context, binder);
+ return;
+ case 850:
+ C1204.m(context, binder);
+ return;
+ case 851:
+ C1205.m(context, binder);
+ return;
+ case 852:
+ C1206.m(context, binder);
+ return;
+ case 853:
+ C1207.m(context, binder);
+ return;
+ case 854:
+ C1208.m(context, binder);
+ return;
+ case 855:
+ C1209.m(context, binder);
+ return;
+ case 856:
+ C1210.m(context, binder);
+ return;
+ case 857:
+ C1211.m(context, binder);
+ return;
+ case 858:
+ C1212.m(context, binder);
+ return;
+ default:
+ return;
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ System.out.println("passed");
+ }
+
+ static boolean doThrow = false;
+}
diff --git a/test/636-wrong-static-access/expected.txt b/test/636-wrong-static-access/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/636-wrong-static-access/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/636-wrong-static-access/info.txt b/test/636-wrong-static-access/info.txt
new file mode 100644
index 0000000000..184d858cb9
--- /dev/null
+++ b/test/636-wrong-static-access/info.txt
@@ -0,0 +1,2 @@
+Test that the compiler checks if a resolved field is
+of the expected static/instance kind.
diff --git a/test/636-wrong-static-access/run b/test/636-wrong-static-access/run
new file mode 100755
index 0000000000..5e999209b8
--- /dev/null
+++ b/test/636-wrong-static-access/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make verification soft fail, to ensure the verifier does not flag
+# the method we want to compile as "non-compilable" because it sees
+# the method will throw IncompatibleClassChangeError.
+exec ${RUN} $@ --verify-soft-fail
diff --git a/test/636-wrong-static-access/src-ex/Foo.java b/test/636-wrong-static-access/src-ex/Foo.java
new file mode 100644
index 0000000000..9e3b7a74c8
--- /dev/null
+++ b/test/636-wrong-static-access/src-ex/Foo.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Foo {
+ public static void doTest() {
+ // Execute foo once to make sure the dex cache will be updated.
+ try {
+ foo();
+ throw new Error("Expected IncompatibleClassChangeError");
+ } catch (IncompatibleClassChangeError e) {
+ // Expected.
+ }
+ Main.ensureJitCompiled(Foo.class, "foo");
+ try {
+ foo();
+ throw new Error("Expected IncompatibleClassChangeError");
+ } catch (IncompatibleClassChangeError e) {
+ // Expected.
+ }
+ }
+
+ public static void foo() {
+ System.out.println(Holder.field);
+ }
+}
diff --git a/test/636-wrong-static-access/src/Holder.java b/test/636-wrong-static-access/src/Holder.java
new file mode 100644
index 0000000000..f3b1c5717c
--- /dev/null
+++ b/test/636-wrong-static-access/src/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+ public static int field = 42;
+}
diff --git a/test/636-wrong-static-access/src/Main.java b/test/636-wrong-static-access/src/Main.java
new file mode 100644
index 0000000000..bd8548e372
--- /dev/null
+++ b/test/636-wrong-static-access/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+ static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/636-wrong-static-access-ex.jar";
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+ if (pathClassLoader == null) {
+ throw new AssertionError("Couldn't find path class loader class");
+ }
+ Constructor<?> constructor =
+ pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
+ ClassLoader loader = (ClassLoader) constructor.newInstance(
+ DEX_FILE, ClassLoader.getSystemClassLoader());
+ Class<?> foo = loader.loadClass("Foo");
+ Method doTest = foo.getDeclaredMethod("doTest");
+ doTest.invoke(null);
+ }
+
+ public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/636-wrong-static-access/src2/Holder.java b/test/636-wrong-static-access/src2/Holder.java
new file mode 100644
index 0000000000..a26da24319
--- /dev/null
+++ b/test/636-wrong-static-access/src2/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+ public int field = 42;
+}
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/PrintThread.java
index 97815ccad9..136fd80d40 100644
--- a/test/911-get-stack-trace/src/PrintThread.java
+++ b/test/911-get-stack-trace/src/PrintThread.java
@@ -44,6 +44,9 @@ public class PrintThread {
if (name.contains("Daemon")) {
// Do not print daemon stacks, as they're non-deterministic.
stackSerialization = "<not printed>";
+ } else if (name.startsWith("Jit thread pool worker")) {
+ // Skip JIT thread pool. It may or may not be there depending on configuration.
+ continue;
} else {
StringBuilder sb = new StringBuilder();
for (String[] stackElement : (String[][])stackInfo[1]) {
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index d13436ebf6..c92e49f0eb 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -17,9 +17,14 @@
#include <stdio.h>
#include "base/macros.h"
+#include "class_linker.h"
#include "jni.h"
+#include "mirror/class_loader.h"
#include "openjdkjvmti/jvmti.h"
+#include "runtime.h"
#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "ti-agent/common_helper.h"
@@ -278,69 +283,11 @@ static std::string GetClassName(jvmtiEnv* jenv, JNIEnv* jni_env, jclass klass) {
return tmp;
}
-static std::string GetThreadName(jvmtiEnv* jenv, JNIEnv* jni_env, jthread thread) {
- jvmtiThreadInfo info;
- jvmtiError result = jenv->GetThreadInfo(thread, &info);
- if (result != JVMTI_ERROR_NONE) {
- if (jni_env != nullptr) {
- JvmtiErrorToException(jni_env, result);
- } else {
- printf("Failed to get thread name.\n");
- }
- return "";
- }
-
- std::string tmp(info.name);
- jenv->Deallocate(reinterpret_cast<unsigned char*>(info.name));
- jni_env->DeleteLocalRef(info.context_class_loader);
- jni_env->DeleteLocalRef(info.thread_group);
-
- return tmp;
-}
-
-static std::string GetThreadName(Thread* thread) {
- std::string tmp;
- thread->GetThreadName(tmp);
- return tmp;
-}
-
-static void JNICALL ClassPrepareCallback(jvmtiEnv* jenv,
- JNIEnv* jni_env,
- jthread thread,
- jclass klass) {
- std::string name = GetClassName(jenv, jni_env, klass);
- if (name == "") {
- return;
- }
- std::string thread_name = GetThreadName(jenv, jni_env, thread);
- if (thread_name == "") {
- return;
- }
- std::string cur_thread_name = GetThreadName(Thread::Current());
- printf("Prepare: %s on %s (cur=%s)\n",
- name.c_str(),
- thread_name.c_str(),
- cur_thread_name.c_str());
-}
-
-static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
- JNIEnv* jni_env,
- jthread thread,
- jclass klass) {
- std::string name = GetClassName(jenv, jni_env, klass);
- if (name == "") {
- return;
- }
- std::string thread_name = GetThreadName(jenv, jni_env, thread);
- if (thread_name == "") {
- return;
- }
- printf("Load: %s on %s\n", name.c_str(), thread_name.c_str());
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadEvents(
- JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
- if (b == JNI_FALSE) {
+static void EnableEvents(JNIEnv* env,
+ jboolean enable,
+ decltype(jvmtiEventCallbacks().ClassLoad) class_load,
+ decltype(jvmtiEventCallbacks().ClassPrepare) class_prepare) {
+ if (enable == JNI_FALSE) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_CLASS_LOAD,
nullptr);
@@ -356,8 +303,8 @@ extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadEvents(
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
- callbacks.ClassLoad = ClassLoadCallback;
- callbacks.ClassPrepare = ClassPrepareCallback;
+ callbacks.ClassLoad = class_load;
+ callbacks.ClassPrepare = class_prepare;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
if (JvmtiErrorToException(env, ret)) {
return;
@@ -375,5 +322,217 @@ extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadEvents(
JvmtiErrorToException(env, ret);
}
+class ClassLoadPreparePrinter {
+ public:
+ static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
+ JNIEnv* jni_env,
+ jthread thread,
+ jclass klass) {
+ std::string name = GetClassName(jenv, jni_env, klass);
+ if (name == "") {
+ return;
+ }
+ std::string thread_name = GetThreadName(jenv, jni_env, thread);
+ if (thread_name == "") {
+ return;
+ }
+ printf("Load: %s on %s\n", name.c_str(), thread_name.c_str());
+ }
+
+ static void JNICALL ClassPrepareCallback(jvmtiEnv* jenv,
+ JNIEnv* jni_env,
+ jthread thread,
+ jclass klass) {
+ std::string name = GetClassName(jenv, jni_env, klass);
+ if (name == "") {
+ return;
+ }
+ std::string thread_name = GetThreadName(jenv, jni_env, thread);
+ if (thread_name == "") {
+ return;
+ }
+ std::string cur_thread_name = GetThreadName(Thread::Current());
+ printf("Prepare: %s on %s (cur=%s)\n",
+ name.c_str(),
+ thread_name.c_str(),
+ cur_thread_name.c_str());
+ }
+
+ private:
+ static std::string GetThreadName(jvmtiEnv* jenv, JNIEnv* jni_env, jthread thread) {
+ jvmtiThreadInfo info;
+ jvmtiError result = jenv->GetThreadInfo(thread, &info);
+ if (result != JVMTI_ERROR_NONE) {
+ if (jni_env != nullptr) {
+ JvmtiErrorToException(jni_env, result);
+ } else {
+ printf("Failed to get thread name.\n");
+ }
+ return "";
+ }
+
+ std::string tmp(info.name);
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+ jni_env->DeleteLocalRef(info.context_class_loader);
+ jni_env->DeleteLocalRef(info.thread_group);
+
+ return tmp;
+ }
+
+ static std::string GetThreadName(Thread* thread) {
+ std::string tmp;
+ thread->GetThreadName(tmp);
+ return tmp;
+ }
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadPreparePrintEvents(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean enable) {
+ EnableEvents(env,
+ enable,
+ ClassLoadPreparePrinter::ClassLoadCallback,
+ ClassLoadPreparePrinter::ClassPrepareCallback);
+}
+
+struct ClassLoadSeen {
+ static void JNICALL ClassLoadSeenCallback(jvmtiEnv* jenv ATTRIBUTE_UNUSED,
+ JNIEnv* jni_env ATTRIBUTE_UNUSED,
+ jthread thread ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ saw_event = true;
+ }
+
+ static bool saw_event;
+};
+bool ClassLoadSeen::saw_event = false;
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadSeenEvents(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
+ EnableEvents(env, b, ClassLoadSeen::ClassLoadSeenCallback, nullptr);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hadLoadEvent(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass Main_klass ATTRIBUTE_UNUSED) {
+ return ClassLoadSeen::saw_event ? JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isLoadedClass(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jstring class_name) {
+ ScopedUtfChars name(env, class_name);
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime* current = Runtime::Current();
+ ClassLinker* class_linker = current->GetClassLinker();
+ bool found =
+ class_linker->LookupClass(
+ soa.Self(),
+ name.c_str(),
+ soa.Decode<mirror::ClassLoader>(current->GetSystemClassLoader())) != nullptr;
+ return found ? JNI_TRUE : JNI_FALSE;
+}
+
+class ClassLoadPrepareEquality {
+ public:
+ static constexpr const char* kClassName = "LMain$ClassE;";
+ static constexpr const char* kStorageClassName = "Main$ClassF";
+ static constexpr const char* kStorageFieldName = "STATIC";
+ static constexpr const char* kStorageFieldSig = "Ljava/lang/Object;";
+
+ static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
+ JNIEnv* jni_env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jclass klass) {
+ std::string name = GetClassName(jenv, jni_env, klass);
+ if (name == kClassName) {
+ found_ = true;
+ stored_class_ = jni_env->NewGlobalRef(klass);
+ weakly_stored_class_ = jni_env->NewWeakGlobalRef(klass);
+ // The following is bad and relies on implementation details. But otherwise a test would be
+ // a lot more complicated.
+ local_stored_class_ = jni_env->NewLocalRef(klass);
+ // Store the value into a field in the heap.
+ SetOrCompare(jni_env, klass, true);
+ }
+ }
+
+ static void JNICALL ClassPrepareCallback(jvmtiEnv* jenv,
+ JNIEnv* jni_env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jclass klass) {
+ std::string name = GetClassName(jenv, jni_env, klass);
+ if (name == kClassName) {
+ CHECK(stored_class_ != nullptr);
+ CHECK(jni_env->IsSameObject(stored_class_, klass));
+ CHECK(jni_env->IsSameObject(weakly_stored_class_, klass));
+ CHECK(jni_env->IsSameObject(local_stored_class_, klass));
+ // Look up the value in a field in the heap.
+ SetOrCompare(jni_env, klass, false);
+ compared_ = true;
+ }
+ }
+
+ static void SetOrCompare(JNIEnv* jni_env, jobject value, bool set) {
+ CHECK(storage_class_ != nullptr);
+ jfieldID field = jni_env->GetStaticFieldID(storage_class_, kStorageFieldName, kStorageFieldSig);
+ CHECK(field != nullptr);
+
+ if (set) {
+ jni_env->SetStaticObjectField(storage_class_, field, value);
+ CHECK(!jni_env->ExceptionCheck());
+ } else {
+ ScopedLocalRef<jobject> stored(jni_env, jni_env->GetStaticObjectField(storage_class_, field));
+ CHECK(jni_env->IsSameObject(value, stored.get()));
+ }
+ }
+
+ static void CheckFound() {
+ CHECK(found_);
+ CHECK(compared_);
+ }
+
+ static void Free(JNIEnv* env) {
+ if (stored_class_ != nullptr) {
+ env->DeleteGlobalRef(stored_class_);
+ DCHECK(weakly_stored_class_ != nullptr);
+ env->DeleteWeakGlobalRef(weakly_stored_class_);
+ // Do not attempt to delete the local ref. It will be out of date by now.
+ }
+ }
+
+ static jclass storage_class_;
+
+ private:
+ static jobject stored_class_;
+ static jweak weakly_stored_class_;
+ static jobject local_stored_class_;
+ static bool found_;
+ static bool compared_;
+};
+jclass ClassLoadPrepareEquality::storage_class_ = nullptr;
+jobject ClassLoadPrepareEquality::stored_class_ = nullptr;
+jweak ClassLoadPrepareEquality::weakly_stored_class_ = nullptr;
+jobject ClassLoadPrepareEquality::local_stored_class_ = nullptr;
+bool ClassLoadPrepareEquality::found_ = false;
+bool ClassLoadPrepareEquality::compared_ = false;
+
+extern "C" JNIEXPORT void JNICALL Java_Main_setEqualityEventStorageClass(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jclass klass) {
+ ClassLoadPrepareEquality::storage_class_ =
+ reinterpret_cast<jclass>(env->NewGlobalRef(klass));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadPrepareEqualityEvents(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
+ EnableEvents(env,
+ b,
+ ClassLoadPrepareEquality::ClassLoadCallback,
+ ClassLoadPrepareEquality::ClassPrepareCallback);
+ if (b == JNI_FALSE) {
+ ClassLoadPrepareEquality::Free(env);
+ ClassLoadPrepareEquality::CheckFound();
+ env->DeleteGlobalRef(ClassLoadPrepareEquality::storage_class_);
+ ClassLoadPrepareEquality::storage_class_ = nullptr;
+ }
+}
+
} // namespace Test912Classes
} // namespace art
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index 6ad23a4869..52a5194138 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -219,6 +219,15 @@ public class Main {
}
final ClassLoader boot = cl;
+ // The JIT may deeply inline and load some classes. Preload these for test determinism.
+ final String PRELOAD_FOR_JIT[] = {
+ "java.nio.charset.CoderMalfunctionError",
+ "java.util.NoSuchElementException"
+ };
+ for (String s : PRELOAD_FOR_JIT) {
+ Class.forName(s);
+ }
+
Runnable r = new Runnable() {
@Override
public void run() {
@@ -238,7 +247,7 @@ public class Main {
ensureJitCompiled(Main.class, "testClassEvents");
- enableClassLoadEvents(true);
+ enableClassLoadPreparePrintEvents(true);
ClassLoader cl1 = create(boot, DEX1, DEX2);
System.out.println("B, false");
@@ -270,7 +279,49 @@ public class Main {
t.start();
t.join();
- enableClassLoadEvents(false);
+ enableClassLoadPreparePrintEvents(false);
+
+ // Note: the JIT part of this test is about the JIT pulling in a class not yet touched by
+ // anything else in the system. This could be the verifier or the interpreter. We
+ // block the interpreter by calling ensureJitCompiled. The verifier, however, must
+ // run in configurations where dex2oat didn't verify the class itself. So explicitly
+ // check whether the class has been already loaded, and skip then.
+ // TODO: Add multiple configurations to the run script once that becomes easier to do.
+ if (hasJit() && !isLoadedClass("Main$ClassD")) {
+ testClassEventsJit();
+ }
+
+ testClassLoadPrepareEquality();
+ }
+
+ private static void testClassEventsJit() throws Exception {
+ enableClassLoadSeenEvents(true);
+
+ testClassEventsJitImpl();
+
+ enableClassLoadSeenEvents(false);
+
+ if (!hadLoadEvent()) {
+ throw new RuntimeException("Did not get expected load event.");
+ }
+ }
+
+ private static void testClassEventsJitImpl() throws Exception {
+ ensureJitCompiled(Main.class, "testClassEventsJitImpl");
+
+ if (ClassD.x != 1) {
+ throw new RuntimeException("Unexpected value");
+ }
+ }
+
+ private static void testClassLoadPrepareEquality() throws Exception {
+ setEqualityEventStorageClass(ClassF.class);
+
+ enableClassLoadPrepareEqualityEvents(true);
+
+ Class.forName("Main$ClassE");
+
+ enableClassLoadPrepareEqualityEvents(false);
}
private static void printClassLoaderClasses(ClassLoader cl) {
@@ -335,9 +386,17 @@ public class Main {
private static native int[] getClassVersion(Class<?> c);
- private static native void enableClassLoadEvents(boolean b);
+ private static native void enableClassLoadPreparePrintEvents(boolean b);
+
+ private static native void ensureJitCompiled(Class<?> c, String name);
- private static native void ensureJitCompiled(Class c, String name);
+ private static native boolean hasJit();
+ private static native boolean isLoadedClass(String name);
+ private static native void enableClassLoadSeenEvents(boolean b);
+ private static native boolean hadLoadEvent();
+
+ private static native void setEqualityEventStorageClass(Class<?> c);
+ private static native void enableClassLoadPrepareEqualityEvents(boolean b);
private static class TestForNonInit {
public static double dummy = Math.random(); // So it can't be compile-time initialized.
@@ -361,6 +420,21 @@ public class Main {
public abstract static class ClassC implements InfA, InfC {
}
+ public static class ClassD {
+ static int x = 1;
+ }
+
+ public static class ClassE {
+ public void foo() {
+ }
+ public void bar() {
+ }
+ }
+
+ public static class ClassF {
+ public static Object STATIC = null;
+ }
+
private static final String DEX1 = System.getenv("DEX_LOCATION") + "/912-classes.jar";
private static final String DEX2 = System.getenv("DEX_LOCATION") + "/912-classes-ex.jar";
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index 9615e6b33d..a5dc10d59c 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -1,3 +1,6 @@
+hello - Verification
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_FAILS_VERIFICATION)
+hello - Verification
hello - NewName
Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
hello - NewName
@@ -29,3 +32,21 @@ hello2 - MultiRetrans
Transformation error : java.lang.Exception(Failed to retransform classes <LTransform;, LTransform2;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
hello - MultiRetrans
hello2 - MultiRetrans
+hello - NewMethod
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+hello - NewMethod
+hello2 - MissingMethod
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform3;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED)
+hello2 - MissingMethod
+hello - MethodChange
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED)
+hello - MethodChange
+hello - NewField
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello - NewField
+hello there - MissingField
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello there - MissingField
+hello there again - FieldChange
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello there again - FieldChange
diff --git a/test/921-hello-failure/src/FieldChange.java b/test/921-hello-failure/src/FieldChange.java
new file mode 100644
index 0000000000..cc2ea284d9
--- /dev/null
+++ b/test/921-hello-failure/src/FieldChange.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class FieldChange {
+ // The following is a base64 encoding of the following class.
+ // class Transform4 {
+ // private Object greeting;
+ // public Transform4(String hi) { }
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFwoABgAQBwARCAASCgACABMHABQHABUBAAhncmVldGluZwEAEkxqYXZhL2xhbmcv" +
+ "T2JqZWN0OwEABjxpbml0PgEAFShMamF2YS9sYW5nL1N0cmluZzspVgEABENvZGUBAA9MaW5lTnVt" +
+ "YmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA9UcmFuc2Zvcm00LmphdmEMAAkAFgEAD2ph" +
+ "dmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3QgYmUgY2FsbGVkIQwACQAKAQAKVHJhbnNmb3JtNAEA" +
+ "EGphdmEvbGFuZy9PYmplY3QBAAMoKVYAIAAFAAYAAAABAAIABwAIAAAAAgABAAkACgABAAsAAAAd" +
+ "AAEAAgAAAAUqtwABsQAAAAEADAAAAAYAAQAAAAMAAQANAAoAAQALAAAAIgADAAIAAAAKuwACWRID" +
+ "twAEvwAAAAEADAAAAAYAAQAAAAUAAQAOAAAAAgAP");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQASXs5yszuhud+/w4q07495k9eO7Yb+l8u4AgAAcAAAAHhWNBIAAAAAAAAAABgCAAAM" +
+ "AAAAcAAAAAUAAACgAAAAAgAAALQAAAABAAAAzAAAAAQAAADUAAAAAQAAAPQAAACkAQAAFAEAAFYB" +
+ "AABeAQAAbAEAAH8BAACTAQAApwEAAL4BAADPAQAA0gEAANYBAADqAQAA9AEAAAEAAAACAAAAAwAA" +
+ "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABQAQAAAAACAAoAAAAAAAEAAAAAAAAAAQALAAAA" +
+ "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAAcCAAAAAAAAAgACAAEAAAD7" +
+ "AQAABAAAAHAQAwAAAA4ABAACAAIAAAABAgAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMA" +
+ "Bjxpbml0PgAMTFRyYW5zZm9ybTQ7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmpl" +
+ "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3Jt" +
+ "NC5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIyAAhncmVldGluZwAFc2F5SGkAAwEABw4A" +
+ "BQEABw4AAAEBAQACAIGABJQCAQGsAgANAAAAAAAAAAEAAAAAAAAAAQAAAAwAAABwAAAAAgAAAAUA" +
+ "AACgAAAAAwAAAAIAAAC0AAAABAAAAAEAAADMAAAABQAAAAQAAADUAAAABgAAAAEAAAD0AAAAASAA" +
+ "AAIAAAAUAQAAARAAAAEAAABQAQAAAiAAAAwAAABWAQAAAyAAAAIAAAD7AQAAACAAAAEAAAAHAgAA" +
+ "ABAAAAEAAAAYAgAA");
+
+ public static void doTest(Transform4 t) {
+ t.sayHi("FieldChange");
+ try {
+ Main.doCommonClassRedefinition(Transform4.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("FieldChange");
+ }
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 67ca1e15d6..5bbe2b5479 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -18,6 +18,7 @@ import java.util.ArrayList;
public class Main {
public static void main(String[] args) {
+ Verification.doTest(new Transform());
NewName.doTest(new Transform());
DifferentAccess.doTest(new Transform());
NewInterface.doTest(new Transform2());
@@ -25,6 +26,12 @@ public class Main {
ReorderInterface.doTest(new Transform2());
MultiRedef.doTest(new Transform(), new Transform2());
MultiRetrans.doTest(new Transform(), new Transform2());
+ NewMethod.doTest(new Transform());
+ MissingMethod.doTest(new Transform3());
+ MethodChange.doTest(new Transform());
+ NewField.doTest(new Transform());
+ MissingField.doTest(new Transform4("there"));
+ FieldChange.doTest(new Transform4("there again"));
}
// Transforms the class. This throws an exception if something goes wrong.
diff --git a/test/921-hello-failure/src/MethodChange.java b/test/921-hello-failure/src/MethodChange.java
new file mode 100644
index 0000000000..16f57788c8
--- /dev/null
+++ b/test/921-hello-failure/src/MethodChange.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MethodChange {
+ // The following is a base64 encoding of the following class.
+ // class Transform {
+ // void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQAOVHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3Qg" +
+ "YmUgY2FsbGVkIQwABwAMAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUABgAAAAAA" +
+ "AgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAIAAAALAAwAAQAJAAAA" +
+ "IgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAQAAQANAAAAAgAO");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCrV81cy4Q+YKMMMqc0bZEO5Y1X5u7irPeQAgAAcAAAAHhWNBIAAAAAAAAAAPwBAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACIAQAACAEAAEoB" +
+ "AABSAQAAXwEAAHIBAACGAQAAmgEAALEBAADBAQAAxAEAAMgBAADcAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAO4BAAAAAAAAAQABAAEAAADjAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADoAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgALTFRy" +
+ "YW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0uamF2YQABVgACVkwA" +
+ "EmVtaXR0ZXI6IGphY2stNC4yNAAFc2F5SGkAAgAHDgAEAQAHDgAAAAEBAICABIgCAQCgAgwAAAAA" +
+ "AAAAAQAAAAAAAAABAAAACwAAAHAAAAACAAAABQAAAJwAAAADAAAAAgAAALAAAAAFAAAABAAAAMgA" +
+ "AAAGAAAAAQAAAOgAAAABIAAAAgAAAAgBAAABEAAAAQAAAEQBAAACIAAACwAAAEoBAAADIAAAAgAA" +
+ "AOMBAAAAIAAAAQAAAO4BAAAAEAAAAQAAAPwBAAA=");
+
+ public static void doTest(Transform t) {
+ t.sayHi("MethodChange");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("MethodChange");
+ }
+}
diff --git a/test/921-hello-failure/src/MissingField.java b/test/921-hello-failure/src/MissingField.java
new file mode 100644
index 0000000000..2f643cc871
--- /dev/null
+++ b/test/921-hello-failure/src/MissingField.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MissingField {
+ // The following is a base64 encoding of the following class.
+ // class Transform4 {
+ // public Transform4(String s) { }
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAOBwAPCAAQCgACABEHABIHABMBAAY8aW5pdD4BABUoTGphdmEvbGFuZy9T" +
+ "dHJpbmc7KVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBAApTb3VyY2VGaWxlAQAP" +
+ "VHJhbnNmb3JtNC5qYXZhDAAHABQBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQgbm90IGJlIGNh" +
+ "bGxlZCEMAAcACAEAClRyYW5zZm9ybTQBABBqYXZhL2xhbmcvT2JqZWN0AQADKClWACAABQAGAAAA" +
+ "AAACAAEABwAIAAEACQAAAB0AAQACAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAgABAAsACAABAAkA" +
+ "AAAiAAMAAgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAAwAAAACAA0=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDBVUVrMUEFx3lYkgJF54evq9vHvOUDZveUAgAAcAAAAHhWNBIAAAAAAAAAAAACAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACMAQAACAEAAEoB" +
+ "AABSAQAAYAEAAHMBAACHAQAAmwEAALIBAADDAQAAxgEAAMoBAADeAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAEAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAPEBAAAAAAAAAgACAAEAAADlAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADrAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAMTFRy" +
+ "YW5zZm9ybTQ7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+ "YW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtNC5qYXZhAAFWAAJW" +
+ "TAASZW1pdHRlcjogamFjay00LjIyAAVzYXlIaQACAQAHDgAEAQAHDgAAAAEBAIGABIgCAQGgAgAM" +
+ "AAAAAAAAAAEAAAAAAAAAAQAAAAsAAABwAAAAAgAAAAUAAACcAAAAAwAAAAIAAACwAAAABQAAAAQA" +
+ "AADIAAAABgAAAAEAAADoAAAAASAAAAIAAAAIAQAAARAAAAEAAABEAQAAAiAAAAsAAABKAQAAAyAA" +
+ "AAIAAADlAQAAACAAAAEAAADxAQAAABAAAAEAAAAAAgAA");
+
+ public static void doTest(Transform4 t) {
+ t.sayHi("MissingField");
+ try {
+ Main.doCommonClassRedefinition(Transform4.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("MissingField");
+ }
+}
diff --git a/test/921-hello-failure/src/MissingMethod.java b/test/921-hello-failure/src/MissingMethod.java
new file mode 100644
index 0000000000..3f1925c9ad
--- /dev/null
+++ b/test/921-hello-failure/src/MissingMethod.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MissingMethod {
+ // The following is a base64 encoding of the following class.
+ // class Transform3 {
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQAPVHJhbnNmb3JtMy5qYXZhDAAHAAgBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQgbm90" +
+ "IGJlIGNhbGxlZCEMAAcADAEAClRyYW5zZm9ybTMBABBqYXZhL2xhbmcvT2JqZWN0ACAABQAGAAAA" +
+ "AAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAgABAAsADAABAAkA" +
+ "AAAiAAMAAgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAA0AAAACAA4=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDnVQvyn7XrwDiCC/SE55zBCtEqk4pzA2mUAgAAcAAAAHhWNBIAAAAAAAAAAAACAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACMAQAACAEAAEoB" +
+ "AABSAQAAYAEAAHMBAACHAQAAmwEAALIBAADDAQAAxgEAAMoBAADeAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAPABAAAAAAAAAQABAAEAAADlAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADqAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAMTFRy" +
+ "YW5zZm9ybTM7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+ "YW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtMy5qYXZhAAFWAAJW" +
+ "TAASZW1pdHRlcjogamFjay00LjI0AAVzYXlIaQACAAcOAAQBAAcOAAAAAQEAgIAEiAIBAaACAAAM" +
+ "AAAAAAAAAAEAAAAAAAAAAQAAAAsAAABwAAAAAgAAAAUAAACcAAAAAwAAAAIAAACwAAAABQAAAAQA" +
+ "AADIAAAABgAAAAEAAADoAAAAASAAAAIAAAAIAQAAARAAAAEAAABEAQAAAiAAAAsAAABKAQAAAyAA" +
+ "AAIAAADlAQAAACAAAAEAAADwAQAAABAAAAEAAAAAAgAA");
+
+ public static void doTest(Transform3 t) {
+ t.sayHi("MissingMethod");
+ try {
+ Main.doCommonClassRedefinition(Transform3.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("MissingMethod");
+ }
+}
diff --git a/test/921-hello-failure/src/NewField.java b/test/921-hello-failure/src/NewField.java
new file mode 100644
index 0000000000..c85b79e824
--- /dev/null
+++ b/test/921-hello-failure/src/NewField.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class NewField {
+ // The following is a base64 encoding of the following class.
+ // class Transform {
+ // private Object field;
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFwoABgARBwASCAATCgACABQHABUHABYBAAVmaWVsZAEAEkxqYXZhL2xhbmcvT2Jq" +
+ "ZWN0OwEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAFShM" +
+ "amF2YS9sYW5nL1N0cmluZzspVgEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwACQAKAQAP" +
+ "amF2YS9sYW5nL0Vycm9yAQAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhDAAJAA4BAAlUcmFuc2Zvcm0B" +
+ "ABBqYXZhL2xhbmcvT2JqZWN0ACAABQAGAAAAAQACAAcACAAAAAIAAAAJAAoAAQALAAAAHQABAAEA" +
+ "AAAFKrcAAbEAAAABAAwAAAAGAAEAAAABAAEADQAOAAEACwAAACIAAwACAAAACrsAAlkSA7cABL8A" +
+ "AAABAAwAAAAGAAEAAAAEAAEADwAAAAIAEA==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBNWknL2iyjim487p0EIH/8V5OjOeLgw5e0AgAAcAAAAHhWNBIAAAAAAAAAABQCAAAM" +
+ "AAAAcAAAAAUAAACgAAAAAgAAALQAAAABAAAAzAAAAAQAAADUAAAAAQAAAPQAAACgAQAAFAEAAFYB" +
+ "AABeAQAAawEAAH4BAACSAQAApgEAAL0BAADNAQAA0AEAANQBAADoAQAA7wEAAAEAAAACAAAAAwAA" +
+ "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABQAQAAAAACAAoAAAAAAAAAAAAAAAAAAQALAAAA" +
+ "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAAECAAAAAAAAAQABAAEAAAD2" +
+ "AQAABAAAAHAQAwAAAA4ABAACAAIAAAD7AQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMA" +
+ "Bjxpbml0PgALTFRyYW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVj" +
+ "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0u" +
+ "amF2YQABVgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMgAFZmllbGQABXNheUhpAAEABw4ABAEABw4A" +
+ "AAEBAQACAICABJQCAQGsAgAAAA0AAAAAAAAAAQAAAAAAAAABAAAADAAAAHAAAAACAAAABQAAAKAA" +
+ "AAADAAAAAgAAALQAAAAEAAAAAQAAAMwAAAAFAAAABAAAANQAAAAGAAAAAQAAAPQAAAABIAAAAgAA" +
+ "ABQBAAABEAAAAQAAAFABAAACIAAADAAAAFYBAAADIAAAAgAAAPYBAAAAIAAAAQAAAAECAAAAEAAA" +
+ "AQAAABQCAAA=");
+
+ public static void doTest(Transform t) {
+ t.sayHi("NewField");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("NewField");
+ }
+}
diff --git a/test/921-hello-failure/src/NewMethod.java b/test/921-hello-failure/src/NewMethod.java
new file mode 100644
index 0000000000..5eac670c68
--- /dev/null
+++ b/test/921-hello-failure/src/NewMethod.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class NewMethod {
+ // The following is a base64 encoding of the following class.
+ // class Transform {
+ // public void extraMethod() {}
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFgoABgAQBwARCAASCgACABMHABQHABUBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQALZXh0cmFNZXRob2QBAAVzYXlIaQEAFShMamF2YS9sYW5nL1N0cmlu" +
+ "ZzspVgEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwABwAIAQAPamF2YS9sYW5nL0Vycm9y" +
+ "AQAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhDAAHAA0BAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+ "ZWN0ACAABQAGAAAAAAADAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAA" +
+ "AQABAAsACAABAAkAAAAZAAAAAQAAAAGxAAAAAQAKAAAABgABAAAAAgABAAwADQABAAkAAAAiAAMA" +
+ "AgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAA4AAAACAA8=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBeV7dLAwN1GBTa/yRlkuiIQatNHghVdrnIAgAAcAAAAHhWNBIAAAAAAAAAADQCAAAM" +
+ "AAAAcAAAAAUAAACgAAAAAgAAALQAAAAAAAAAAAAAAAUAAADMAAAAAQAAAPQAAAC0AQAAFAEAAGoB" +
+ "AAByAQAAfwEAAJIBAACmAQAAugEAANEBAADhAQAA5AEAAOgBAAD8AQAACQIAAAEAAAACAAAAAwAA" +
+ "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABkAQAAAAAAAAAAAAAAAAAACgAAAAAAAQALAAAA" +
+ "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAACACAAAAAAAAAQABAAEAAAAQ" +
+ "AgAABAAAAHAQBAAAAA4AAQABAAAAAAAVAgAAAQAAAA4AAAAEAAIAAgAAABoCAAAJAAAAIgABABsB" +
+ "BQAAAHAgAwAQACcAAAABAAAAAwAGPGluaXQ+AAtMVHJhbnNmb3JtOwARTGphdmEvbGFuZy9FcnJv" +
+ "cjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABVTaG91bGQgbm90IGJl" +
+ "IGNhbGxlZCEADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIyAAtleHRy" +
+ "YU1ldGhvZAAFc2F5SGkAAQAHDgACAAcOAAQBAAcOAAAAAQIAgIAElAIBAawCAQHAAgAADAAAAAAA" +
+ "AAABAAAAAAAAAAEAAAAMAAAAcAAAAAIAAAAFAAAAoAAAAAMAAAACAAAAtAAAAAUAAAAFAAAAzAAA" +
+ "AAYAAAABAAAA9AAAAAEgAAADAAAAFAEAAAEQAAABAAAAZAEAAAIgAAAMAAAAagEAAAMgAAADAAAA" +
+ "EAIAAAAgAAABAAAAIAIAAAAQAAABAAAANAIAAA==");
+
+ public static void doTest(Transform t) {
+ t.sayHi("NewMethod");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("NewMethod");
+ }
+}
diff --git a/test/921-hello-failure/src/Transform3.java b/test/921-hello-failure/src/Transform3.java
new file mode 100644
index 0000000000..d2cb064956
--- /dev/null
+++ b/test/921-hello-failure/src/Transform3.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform3 {
+ public void extraMethod(String name) {
+ System.out.println("extraMethod - " + name);
+ }
+ public void sayHi(String name) {
+ System.out.println("hello2 - " + name);
+ }
+}
diff --git a/test/921-hello-failure/src/Transform4.java b/test/921-hello-failure/src/Transform4.java
new file mode 100644
index 0000000000..fd763386ba
--- /dev/null
+++ b/test/921-hello-failure/src/Transform4.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform4 {
+ private String greeting;
+ public Transform4(String hi) {
+ greeting = hi;
+ }
+ public void sayHi(String name) {
+ System.out.println("hello " + greeting + " - " + name);
+ }
+}
diff --git a/test/921-hello-failure/src/Verification.java b/test/921-hello-failure/src/Verification.java
new file mode 100644
index 0000000000..242b5d2b44
--- /dev/null
+++ b/test/921-hello-failure/src/Verification.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class Verification {
+ // Jasmin program:
+ //
+ // .source Transform.java
+ // .class Transform
+ // .super java/lang/Object
+ // .method <init>()V
+ // .limit stack 1
+ // .limit locals 1
+ // aload_0
+ // invokespecial java/lang/Object/<init>()V
+ // return
+ // .end method
+ // .method sayHi(Ljava/lang/String;)V
+ // .limit stack 1
+ // .limit locals 2
+ // aload_1
+ // areturn
+ // .end method
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgADAC0ADgoADQAHBwAIAQAQamF2YS9sYW5nL09iamVjdAEAClNvdXJjZUZpbGUBAAY8aW5p" +
+ "dD4BAAVzYXlIaQwABQAKAQAJVHJhbnNmb3JtAQAEQ29kZQEAAygpVgEADlRyYW5zZm9ybS5qYXZh" +
+ "AQAVKExqYXZhL2xhbmcvU3RyaW5nOylWBwADACAAAgANAAAAAAACAAAABQAKAAEACQAAABEAAQAB" +
+ "AAAABSq3AAGxAAAAAAABAAYADAABAAkAAAAOAAEAAgAAAAIrsAAAAAAAAQAEAAAAAgAL");
+
+ // Smali program:
+ //
+ // .class LTransform;
+ // .super Ljava/lang/Object;
+ // .source "Transform.java"
+ // # direct methods
+ // .method constructor <init>()V
+ // .registers 1
+ // invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ // return-void
+ // .end method
+ // # virtual methods
+ // .method public sayHi(Ljava/lang/String;)V
+ // .registers 2
+ // return-object p1
+ // .end method
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQClOAc4ZDMXaHMezhYcqZxcjUeVCWRYUkooAgAAcAAAAHhWNBIAAAAAAAAAAJQBAAAI" +
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAA4AQAA8AAAAPAA" +
+ "AAD4AAAABQEAABkBAAAtAQAAPQEAAEABAABEAQAAAQAAAAIAAAADAAAABQAAAAUAAAADAAAAAAAA" +
+ "AAYAAAADAAAATAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAEAAAA" +
+ "AAAAAIYBAAAAAAAABjxpbml0PgALTFRyYW5zZm9ybTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGph" +
+ "dmEvbGFuZy9TdHJpbmc7AA5UcmFuc2Zvcm0uamF2YQABVgACVkwABXNheUhpAAABAAAAAgAAAAAA" +
+ "AAAAAAAAAQABAAEAAAAAAAAABAAAAHAQAgAAAA4AAgACAAAAAAAAAAAAAQAAABEBAAABAQCAgATc" +
+ "AgEB9AIMAAAAAAAAAAEAAAAAAAAAAQAAAAgAAABwAAAAAgAAAAQAAACQAAAAAwAAAAIAAACgAAAA" +
+ "BQAAAAMAAAC4AAAABgAAAAEAAADQAAAAAiAAAAgAAADwAAAAARAAAAEAAABMAQAAAxAAAAIAAABU" +
+ "AQAAASAAAAIAAABcAQAAACAAAAEAAACGAQAAABAAAAEAAACUAQAA");
+
+ public static void doTest(Transform t) {
+ t.sayHi("Verification");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("Verification");
+ }
+}
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
index 29c4aa330c..f18d70e8e1 100644
--- a/test/924-threads/src/Main.java
+++ b/test/924-threads/src/Main.java
@@ -20,6 +20,7 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.concurrent.CountDownLatch;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -162,8 +163,20 @@ public class Main {
private static void doAllThreadsTests() {
Thread[] threads = getAllThreads();
- Arrays.sort(threads, THREAD_COMP);
- System.out.println(Arrays.toString(threads));
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+ System.out.println(threadList);
}
private static void doTLSTests() throws Exception {
diff --git a/test/925-threadgroups/src/Main.java b/test/925-threadgroups/src/Main.java
index 3d7a4ca740..bf7441f9bf 100644
--- a/test/925-threadgroups/src/Main.java
+++ b/test/925-threadgroups/src/Main.java
@@ -14,8 +14,12 @@
* limitations under the License.
*/
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
public class Main {
public static void main(String[] args) throws Exception {
@@ -64,10 +68,23 @@ public class Main {
Thread[] threads = (Thread[])data[0];
ThreadGroup[] groups = (ThreadGroup[])data[1];
- Arrays.sort(threads, THREAD_COMP);
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+
Arrays.sort(groups, THREADGROUP_COMP);
System.out.println(tg.getName() + ":");
- System.out.println(" " + Arrays.toString(threads));
+ System.out.println(" " + threadList);
System.out.println(" " + Arrays.toString(groups));
if (tg.getParent() != null) {
diff --git a/test/931-agent-thread/agent_thread.cc b/test/931-agent-thread/agent_thread.cc
index 6ace4cea68..a488d9a803 100644
--- a/test/931-agent-thread/agent_thread.cc
+++ b/test/931-agent-thread/agent_thread.cc
@@ -15,6 +15,7 @@
*/
#include <inttypes.h>
+#include <sched.h>
#include "barrier.h"
#include "base/logging.h"
@@ -125,6 +126,24 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testAgentThread(
data.b.Wait(Thread::Current());
+ // Scheduling may mean that the agent thread is put to sleep. Wait until it's dead in an effort
+ // to not unload the plugin and crash.
+ for (;;) {
+ NanoSleep(1000 * 1000);
+ jint thread_state;
+ jvmtiError state_result = jvmti_env->GetThreadState(thread.get(), &thread_state);
+ if (JvmtiErrorToException(env, state_result)) {
+ return;
+ }
+ if (thread_state == 0 || // Was never alive.
+ (thread_state & JVMTI_THREAD_STATE_TERMINATED) != 0) { // Was alive and died.
+ break;
+ }
+ }
+ // Yield and sleep a bit more, to give the plugin time to tear down the native thread structure.
+ sched_yield();
+ NanoSleep(100 * 1000 * 1000);
+
env->DeleteGlobalRef(data.main_thread);
}
diff --git a/test/942-private-recursive/src/Transform.java b/test/942-private-recursive/src/Transform.java
index dd5452cac8..7714326066 100644
--- a/test/942-private-recursive/src/Transform.java
+++ b/test/942-private-recursive/src/Transform.java
@@ -15,10 +15,6 @@
*/
class Transform {
- public void sayHi(int recur, Runnable r) {
- privateSayHi(recur, r);
- }
-
private void privateSayHi(int recur, Runnable r) {
System.out.println("hello" + recur);
if (recur == 1) {
@@ -29,4 +25,8 @@ class Transform {
}
System.out.println("goodbye" + recur);
}
+
+ public void sayHi(int recur, Runnable r) {
+ privateSayHi(recur, r);
+ }
}
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index 801904d9c9..fc9f030559 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -676,6 +676,13 @@ public class Main {
Integer.class, MethodType.methodType(Integer.class, Integer.class));
fail("Unexpected success for non-void type for findConstructor");
} catch (NoSuchMethodException e) {}
+
+ // Array class constructor.
+ try {
+ MethodHandle foo = MethodHandles.lookup().findConstructor(
+ Object[].class, MethodType.methodType(void.class));
+ fail("Unexpected success for array class type for findConstructor");
+ } catch (NoSuchMethodException e) {}
}
public static void testStringConstructors() throws Throwable {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 742353da46..1938b92db8 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -533,26 +533,13 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# also uses Generic JNI instead of the JNI compiler.
# Test 906 iterates the heap filtering with different options. No instances should be created
# between those runs to be able to have precise checks.
-# Test 902 hits races with the JIT compiler. b/32821077
# Test 629 requires compilation.
-# Test 914, 915, 917, & 919 are very sensitive to the exact state of the stack,
-# including the jit-inserted runtime frames. This causes them to be somewhat
-# flaky as JIT tests. This should be fixed once b/33630159 or b/33616143 are
-# resolved but until then just disable them. Test 916 already checks this
-# feature for JIT use cases in a way that is resilient to the jit frames.
# 912: b/34655682
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
629-vdex-speed \
- 902-hello-transformation \
904-object-allocation \
906-iterate-heap \
- 912-classes \
- 914-hello-obsolescence \
- 915-obsolete-2 \
- 917-fields-transformation \
- 919-obsolete-fields \
- 926-multi-obsolescence \
ifneq (,$(filter jit,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -863,6 +850,69 @@ ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
endif
+# Host executables.
+host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+
+# Classpath for Jack compilation for host.
+host_prereq_rules += $(HOST_JACK_CLASSPATH_DEPENDENCIES)
+
+# Required for dx, jasmin, smali, dexmerger, jack.
+host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
+
+host_prereq_rules += $(HOST_OUT_EXECUTABLES)/hprof-conv
+
+# Classpath for Jack compilation for target.
+target_prereq_rules := $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
+
+# Sync test files to the target, depends upon all things that must be pushed
+#to the target.
+target_prereq_rules += test-art-target-sync
+
+define core-image-dependencies
+ image_suffix := $(3)
+ ifeq ($(3),regalloc_gc)
+ image_suffix:=optimizing
+ else
+ ifeq ($(3),jit)
+ image_suffix:=interpreter
+ endif
+ endif
+ ifeq ($(2),no-image)
+ $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_pic_$(4))
+ else
+ ifeq ($(2),npicimage)
+ $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_no-pic_$(4))
+ else
+ ifeq ($(2),picimage)
+ $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_pic_$(4))
+ else
+ ifeq ($(2),multinpicimage)
+ $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(4))
+ else
+ ifeq ($(2),multipicimage)
+ $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_pic_multi_$(4))
+ endif
+ endif
+ endif
+ endif
+ endif
+endef
+
+# Add core image dependencies required for given target - HOST or TARGET,
+# IMAGE_TYPE, COMPILER_TYPE and ADDRESS_SIZE to the prereq_rules.
+$(foreach target, $(TARGET_TYPES), \
+ $(foreach image, $(IMAGE_TYPES), \
+ $(foreach compiler, $(COMPILER_TYPES), \
+ $(foreach address_size, $(ALL_ADDRESS_SIZES), $(eval \
+ $(call core-image-dependencies,$(target),$(image),$(compiler),$(address_size)))))))
+
+test-art-host-run-test-dependencies : $(host_prereq_rules)
+test-art-target-run-test-dependencies : $(target_prereq_rules)
+test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
+
+host_prereq_rules :=
+target_prereq_rules :=
+
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
# {4: interpreter optimizing jit interp-ac}-
diff --git a/test/knownfailures.json b/test/knownfailures.json
new file mode 100644
index 0000000000..784f49c4b9
--- /dev/null
+++ b/test/knownfailures.json
@@ -0,0 +1,338 @@
+[
+ {
+ "test": "153-reference-stress",
+ "description": ["Disable 153-reference-stress temporarily until a fix",
+ "arrives."],
+ "bug": "http://b/33389022"
+ },
+ {
+ "test": "080-oom-fragmentation",
+ "description": "Disable 080-oom-fragmentation due to flakes.",
+ "bug": "http://b/33795328"
+ },
+ {
+ "tests": ["497-inlining-and-class-loader",
+ "542-unresolved-access-check"],
+ "description": ["Disable 497-inlining-and-class-loader and ",
+ "542-unresolved-access-check until they are rewritten.",
+ "These tests use a broken class loader that tries to",
+ "register a dex file that's already registered with a",
+ "different loader."],
+ "bug": "http://b/34193123"
+ },
+ {
+ "test": "149-suspend-all-stress",
+ "description": "Disable 149-suspend-all-stress, its output is flaky",
+ "bug": "http://b/28988206"
+ },
+ {
+ "test": "577-profile-foreign-dex",
+ "description": "Disable 577-profile-foreign-dex",
+ "bug": "http://b/27454772"
+ },
+ {
+ "tests": ["002-sleep",
+ "053-wait-some",
+ "055-enum-performance",
+ "133-static-invoke-super"],
+ "description": ["Tests that are timing sensitive and flaky on heavily",
+ "loaded systems."]
+ },
+ {
+ "test": "147-stripped-dex-fallback",
+ "variant": "target",
+ "description": ["147-stripped-dex-fallback isn't supported on device",
+ "because --strip-dex requires the zip command."]
+ },
+ {
+ "test": "569-checker-pattern-replacement",
+ "variant": "target",
+ "description": ["569-checker-pattern-replacement tests behaviour",
+ "present only on host."]
+ },
+ {
+ "tests": ["116-nodex2oat",
+ "118-noimage-dex2oat",
+ "134-nodex2oat-nofallback"],
+ "variant": "prebuild",
+ "description": ["Note 116-nodex2oat is not broken per-se it just",
+ "doesn't (and isn't meant to) work with --prebuild."]
+ },
+ {
+ "test": "554-jit-profile-file",
+ "variant": "no-prebuild | interpreter",
+ "description": ["554-jit-profile-file is disabled because it needs a",
+ "primary oat file to know what it should save."]
+ },
+ {
+ "tests": ["529-checker-unresolved", "555-checker-regression-x86const"],
+ "variant": "no-prebuild",
+ "bug": "http://b/27784033"
+ },
+ {
+ "tests": ["117-nopatchoat",
+ "147-stripped-dex-fallback",
+ "608-checker-unresolved-lse"],
+ "variant": "no-prebuild"
+ },
+ {
+ "tests": ["117-nopatchoat",
+ "118-noimage-dex2oat",
+ "119-noimage-patchoat",
+ "554-jit-profile-file"],
+ "variant": "no-relocate",
+ "description": ["117-nopatchoat is not broken per-se it just doesn't",
+ "work (and isn't meant to) without --prebuild",
+ "--relocate"]
+ },
+ {
+ "test": "137-cfi",
+ "variant": "interp-ac",
+ "description": ["Temporarily disable some broken tests when forcing",
+ "access checks in interpreter"],
+ "bug": "http://b/22414682"
+ },
+ {
+ "test" : "629-vdex-speed",
+ "variant": "interp-ac | no-dex2oat | interpreter | jit | relocate-npatchoat",
+ "description": "629 requires compilation."
+ },
+ {
+ "test": "137-cfi",
+ "variant": "gcstress",
+ "description": ["137-cfi needs to unwind a second forked process. We're",
+ "using a primitive sleep to wait till we hope the",
+ "second process got into the expected state. The",
+ "slowness of gcstress makes this bad."]
+ },
+ {
+ "tests": ["908-gc-start-finish",
+ "913-heaps"],
+ "variant": "gcstress",
+ "description": ["908-gc-start-finish expects GCs only to be run at",
+ "clear points. The reduced heap size makes this",
+ "non-deterministic. Same for 913."]
+ },
+ {
+ "test": "961-default-iface-resolution-gen",
+ "variant": "gcstress",
+ "description": ["961-default-iface-resolution-gen and",
+ "964-default-iface-init-genare very long tests that",
+ "often will take more than the timeout to run when",
+ "gcstress is enabled. This is because gcstress slows",
+ "down allocations significantly which these tests do a",
+ "lot."]
+ },
+ {
+ "tests": ["964-default-iface-init-gen",
+ "154-gc-loop"],
+ "variant": "gcstress"
+ },
+ {
+ "test": "115-native-bridge",
+ "variant": "target",
+ "description": ["115-native-bridge setup is complicated. Need to",
+ "implement it correctly for the target."]
+ },
+ {
+ "test": "130-hprof",
+ "variant": "target",
+ "desription": ["130-hprof dumps the heap and runs hprof-conv to check",
+ "whether the file is somewhat readable. Thi is only",
+ "possible on the host. TODO: Turn off all the other",
+ "combinations, this is more about testing actual ART",
+ "code. A gtest is very hard to write here, as (for a",
+ "complete test) JDWP must be set up."]
+ },
+ {
+ "test": "131-structural-change",
+ "variant": "debug",
+ "description": ["131 is an old test. The functionality has been",
+ "implemented at an earlier stage and is checked",
+ "in tests 138. Blacklisted for debug builds since",
+ "these builds have duplicate classes checks which",
+ "punt to interpreter"]
+ },
+ {
+ "test": "138-duplicate-classes-check",
+ "variant": "ndebug",
+ "description": ["Turned on for debug builds since debug builds have",
+ "duplicate classes checks enabled"],
+ "bug": "http://b/2133391"
+ },
+ {
+ "test": "147-stripped-dex-fallback",
+ "variant": "no-dex2oat | no-image | relocate-npatchoat",
+ "description": ["147-stripped-dex-fallback is disabled because it",
+ "requires --prebuild."]
+ },
+ {
+ "test": "554-jit-profile-file",
+ "variant": "no-dex2oat | no-image | relocate-npatchoat",
+ "description": ["554-jit-profile-file is disabled because it needs a",
+ "primary oat file to know what it should save."]
+ },
+ {
+ "tests": ["116-nodex2oat",
+ "117-nopatchoat",
+ "118-noimage-dex2oat",
+ "119-noimage-patchoat",
+ "137-cfi",
+ "138-duplicate-classes-check2"],
+ "variant": "no-dex2oat | no-image | relocate-npatchoat",
+ "description": ["All these tests check that we have sane behavior if we",
+ "don't have a patchoat or dex2oat. Therefore we",
+ "shouldn't run them in situations where we actually",
+ "don't have these since they explicitly test for them.",
+ "These all also assume we have an image."]
+ },
+ {
+ "tests": ["137-cfi",
+ "138-duplicate-classes-check",
+ "018-stack-overflow",
+ "961-default-iface-resolution-gen",
+ "964-default-iface-init"],
+ "variant": "no-image",
+ "description": ["This test fails without an image. 018, 961, 964 often",
+ "time out."],
+ "bug": "http://b/34369284"
+ },
+ {
+ "test": "137-cfi",
+ "description": ["This test unrolls and expects managed frames, but",
+ "tracing means we run the interpreter."],
+ "variant": "trace | stream"
+ },
+ {
+ "tests": ["802-deoptimization",
+ "570-checker-osr"],
+ "description": ["This test dynamically enables tracing to force a",
+ "deoptimization. This makes the test meaningless",
+ "when already tracing, and writes an error message",
+ "that we do not want to check for."],
+ "variant": "trace | stream"
+ },
+ {
+ "test": "130-hprof",
+ "description": "130 occasional timeout",
+ "bug": "http://b/32383962",
+ "variant": "trace | stream"
+ },
+ {
+ "tests": ["087-gc-after-link",
+ "141-class-unload"],
+ "variant": "trace | stream"
+ },
+ {
+ "tests": ["604-hot-static-interface",
+ "612-jit-dex-cache",
+ "613-inlining-dex-cache",
+ "616-cha",
+ "626-set-resolved-string"],
+ "variant": "trace | stream",
+ "description": ["These tests expect JIT compilation, which is",
+ "suppressed when tracing."]
+ },
+ {
+ "test": "137-cfi",
+ "description": ["CFI unwinding expects managed frames, and the test",
+ "does not iterate enough to even compile. JIT also",
+ "uses Generic JNI instead of the JNI compiler."],
+ "variant": "interpreter | jit"
+ },
+ {
+ "test": "906-iterate-heap",
+ "description": ["Test 906 iterates the heap filtering with different",
+ "options. No instances should be created between those",
+ "runs to be able to have precise checks."],
+ "variant": "jit"
+ },
+ {
+ "tests": ["904-object-allocation"],
+ "variant": "jit"
+ },
+ {
+ "tests": ["570-checker-select",
+ "484-checker-register-hints"],
+ "description": ["These tests were based on the linear scan allocator,",
+ "which makes different decisions than the graph",
+ "coloring allocator. (These attempt to test for code",
+ "quality, not correctness.)"],
+ "variant": "regalloc_gc"
+ },
+ {
+ "tests": ["454-get-vreg",
+ "457-regs",
+ "602-deoptimizeable"],
+ "description": ["Tests that should fail when the optimizing compiler ",
+ "compiles them non-debuggable."],
+ "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable"
+ },
+ {
+ "test": "596-app-images",
+ "variant": "npictest"
+ },
+ {
+ "test": "055-enum-performance",
+ "variant": "optimizing | regalloc_gc",
+ "description": ["055: Exceeds run time limits due to heap poisoning ",
+ "instrumentation (on ARM and ARM64 devices)."]
+ },
+ {
+ "test": "909-attach-agent",
+ "variant": "debuggable",
+ "description": "Tests that check semantics for a non-debuggable app."
+ },
+ {
+ "test": "137-cfi",
+ "variant": "debuggable",
+ "description": ["The test relies on AOT code and debuggable makes us",
+ "JIT always."]
+ },
+ {
+ "tests": ["000-nop",
+ "134-nodex2oat-nofallback",
+ "147-stripped-dex-fallback",
+ "595-profile-saving"],
+ "description": "The doesn't compile anything",
+ "env_vars": {"ART_TEST_BISECTION": "true"},
+ "variant": "optimizing | regalloc_gc"
+ },
+ {
+ "tests": "089-many-methods",
+ "description": "The test tests a build failure",
+ "env_vars": {"ART_TEST_BISECTION": "true"},
+ "variant": "optimizing | regalloc_gc"
+ },
+ {
+ "tests": ["018-stack-overflow",
+ "116-nodex2oat",
+ "117-nopatchoat",
+ "118-noimage-dex2oat",
+ "119-noimage-patchoat",
+ "126-miranda-multidex",
+ "137-cfi"],
+ "description": "The test run dalvikvm more than once.",
+ "env_vars": {"ART_TEST_BISECTION": "true"},
+ "variant": "optimizing | regalloc_gc"
+ },
+ {
+ "tests": ["115-native-bridge",
+ "088-monitor-verification"],
+ "description": "The test assume they are always compiled.",
+ "env_vars": {"ART_TEST_BISECTION": "true"},
+ "variant": "optimizing | regalloc_gc"
+ },
+ {
+ "test": "055-enum-performance",
+ "description": ["The test tests performance which degrades during",
+ "bisecting."],
+ "env_vars": {"ART_TEST_BISECTION": "true"},
+ "variant": "optimizing | regalloc_gc"
+ },
+ {
+ "test": "537-checker-arraycopy",
+ "env_vars": {"ART_USE_READ_BARRIER": "true"},
+ "variant": "interpreter | optimizing | regalloc_gc | jit"
+ }
+]
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
new file mode 100644
index 0000000000..1dc8ce552c
--- /dev/null
+++ b/test/testrunner/env.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import tempfile
+import subprocess
+
+env = dict(os.environ)
+
+def getEnvBoolean(var, default):
+ val = env.get(var)
+ if val:
+ if val == "True" or val == "true":
+ return True
+ if val == "False" or val == "false":
+ return False
+ return default
+
+_DUMP_MANY_VARS_LIST = ['HOST_2ND_ARCH_PREFIX',
+ 'TARGET_2ND_ARCH',
+ 'TARGET_ARCH',
+ 'HOST_PREFER_32_BIT',
+ 'HOST_OUT_EXECUTABLES']
+_DUMP_MANY_VARS = None # To be set to a dictionary with above list being the keys,
+ # and the build variable being the value.
+def dump_many_vars(var_name):
+ """
+ Reach into the Android build system to dump many build vars simultaneously.
+ Since the make system is so slow, we want to avoid calling into build frequently.
+ """
+ global _DUMP_MANY_VARS
+ global _DUMP_MANY_VARS_LIST
+
+ # Look up var from cache.
+ if _DUMP_MANY_VARS:
+ return _DUMP_MANY_VARS[var_name]
+
+ all_vars=" ".join(_DUMP_MANY_VARS_LIST)
+
+ # The command is taken from build/envsetup.sh to fetch build variables.
+ command = ("CALLED_FROM_SETUP=true " # Enable the 'dump-many-vars' make target.
+ "BUILD_SYSTEM=build/core " # Set up lookup path for make includes.
+ "make --no-print-directory -C \"%s\" -f build/core/config.mk "
+ "dump-many-vars DUMP_MANY_VARS=\"%s\"") % (ANDROID_BUILD_TOP, all_vars)
+
+ config = subprocess.Popen(command, stdout=subprocess.PIPE,
+ shell=True).communicate()[0] # read until EOF, select stdin
+ # Prints out something like:
+ # TARGET_ARCH='arm64'
+ # HOST_ARCH='x86_64'
+ _DUMP_MANY_VARS = {}
+ for line in config.split("\n"):
+ # Split out "$key='$value'" via regex.
+ match = re.search("([^=]+)='([^']*)", line)
+ if not match:
+ continue
+ key = match.group(1)
+ value = match.group(2)
+ _DUMP_MANY_VARS[key] = value
+
+ return _DUMP_MANY_VARS[var_name]
+
+def get_build_var(var_name):
+ return dump_many_vars(var_name)
+
+def get_env(key):
+ return env.get(key)
+
+ANDROID_BUILD_TOP = env.get('ANDROID_BUILD_TOP', os.getcwd())
+
+# Directory used for temporary test files on the host.
+ART_HOST_TEST_DIR = tempfile.mkdtemp(prefix = 'test-art-')
+
+# Keep going after encountering a test failure?
+ART_TEST_KEEP_GOING = getEnvBoolean('ART_TEST_KEEP_GOING', True)
+
+# Do you want all tests, even those that are time consuming?
+ART_TEST_FULL = getEnvBoolean('ART_TEST_FULL', False)
+
+# Do you want interpreter tests run?
+ART_TEST_INTERPRETER = getEnvBoolean('ART_TEST_INTERPRETER', ART_TEST_FULL)
+ART_TEST_INTERPRETER_ACCESS_CHECKS = getEnvBoolean('ART_TEST_INTERPRETER_ACCESS_CHECKS',
+ ART_TEST_FULL)
+
+# Do you want JIT tests run?
+ART_TEST_JIT = getEnvBoolean('ART_TEST_JIT', ART_TEST_FULL)
+
+# Do you want optimizing compiler tests run?
+ART_TEST_OPTIMIZING = getEnvBoolean('ART_TEST_OPTIMIZING', True)
+
+# Do you want to test the optimizing compiler with graph coloring register allocation?
+ART_TEST_OPTIMIZING_GRAPH_COLOR = getEnvBoolean('ART_TEST_OPTIMIZING_GRAPH_COLOR', ART_TEST_FULL)
+
+# Do we want to test a non-PIC-compiled core image?
+ART_TEST_NPIC_IMAGE = getEnvBoolean('ART_TEST_NPIC_IMAGE', ART_TEST_FULL)
+
+# Do we want to test PIC-compiled tests ("apps")?
+ART_TEST_PIC_TEST = getEnvBoolean('ART_TEST_PIC_TEST', ART_TEST_FULL)
+# Do you want tracing tests run?
+ART_TEST_TRACE = getEnvBoolean('ART_TEST_TRACE', ART_TEST_FULL)
+
+# Do you want tracing tests (streaming mode) run?
+ART_TEST_TRACE_STREAM = getEnvBoolean('ART_TEST_TRACE_STREAM', ART_TEST_FULL)
+
+# Do you want tests with GC verification enabled run?
+ART_TEST_GC_VERIFY = getEnvBoolean('ART_TEST_GC_VERIFY', ART_TEST_FULL)
+
+# Do you want tests with the GC stress mode enabled run?
+ART_TEST_GC_STRESS = getEnvBoolean('ART_TEST_GC_STRESS', ART_TEST_FULL)
+
+# Do you want tests with the JNI forcecopy mode enabled run?
+ART_TEST_JNI_FORCECOPY = getEnvBoolean('ART_TEST_JNI_FORCECOPY', ART_TEST_FULL)
+
+# Do you want run-tests with relocation disabled run?
+ART_TEST_RUN_TEST_RELOCATE = getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE', ART_TEST_FULL)
+
+# Do you want run-tests with prebuilding?
+ART_TEST_RUN_TEST_PREBUILD = getEnvBoolean('ART_TEST_RUN_TEST_PREBUILD', True)
+
+# Do you want run-tests with no prebuilding enabled run?
+ART_TEST_RUN_TEST_NO_PREBUILD = getEnvBoolean('ART_TEST_RUN_TEST_NO_PREBUILD', ART_TEST_FULL)
+
+# Do you want run-tests with a pregenerated core.art?
+ART_TEST_RUN_TEST_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_IMAGE', True)
+
+# Do you want run-tests without a pregenerated core.art?
+ART_TEST_RUN_TEST_NO_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_NO_IMAGE', ART_TEST_FULL)
+
+# Do you want run-tests with relocation enabled but patchoat failing?
+ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT = getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT',
+ ART_TEST_FULL)
+
+# Do you want run-tests without a dex2oat?
+ART_TEST_RUN_TEST_NO_DEX2OAT = getEnvBoolean('ART_TEST_RUN_TEST_NO_DEX2OAT', ART_TEST_FULL)
+
+# Do you want run-tests with libartd.so?
+ART_TEST_RUN_TEST_DEBUG = getEnvBoolean('ART_TEST_RUN_TEST_DEBUG', True)
+
+# Do you want run-tests with libart.so?
+ART_TEST_RUN_TEST_NDEBUG = getEnvBoolean('ART_TEST_RUN_TEST_NDEBUG', ART_TEST_FULL)
+
+# Do you want failed tests to have their artifacts cleaned up?
+ART_TEST_RUN_TEST_ALWAYS_CLEAN = getEnvBoolean('ART_TEST_RUN_TEST_ALWAYS_CLEAN', True)
+
+# Do you want run-tests with the --debuggable flag
+ART_TEST_RUN_TEST_DEBUGGABLE = getEnvBoolean('ART_TEST_RUN_TEST_DEBUGGABLE', ART_TEST_FULL)
+
+# Do you want to test multi-part boot-image functionality?
+ART_TEST_RUN_TEST_MULTI_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_MULTI_IMAGE', ART_TEST_FULL)
+
+ART_TEST_DEBUG_GC = getEnvBoolean('ART_TEST_DEBUG_GC', False)
+
+ART_TEST_BISECTION = getEnvBoolean('ART_TEST_BISECTION', False)
+
+DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = env.get('DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
+
+# Do you want run-tests with the host/target's second arch?
+ART_TEST_RUN_TEST_2ND_ARCH = getEnvBoolean('ART_TEST_RUN_TEST_2ND_ARCH', True)
+
+HOST_2ND_ARCH_PREFIX = get_build_var('HOST_2ND_ARCH_PREFIX')
+HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = env.get(
+ HOST_2ND_ARCH_PREFIX + 'DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
+
+ART_TEST_ANDROID_ROOT = env.get('ART_TEST_ANDROID_ROOT')
+
+ART_TEST_WITH_STRACE = getEnvBoolean('ART_TEST_DEBUG_GC', False)
+
+TARGET_2ND_ARCH = get_build_var('TARGET_2ND_ARCH')
+TARGET_ARCH = get_build_var('TARGET_ARCH')
+if TARGET_2ND_ARCH:
+ if "64" in TARGET_ARCH:
+ ART_PHONY_TEST_TARGET_SUFFIX = "64"
+ _2ND_ART_PHONY_TEST_TARGET_SUFFIX = "32"
+ else:
+ ART_PHONY_TEST_TARGET_SUFFIX = "32"
+ _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+else:
+ if "64" in TARGET_ARCH:
+ ART_PHONY_TEST_TARGET_SUFFIX = "64"
+ _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+ else:
+ ART_PHONY_TEST_TARGET_SUFFIX = "32"
+ _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+
+HOST_PREFER_32_BIT = get_build_var('HOST_PREFER_32_BIT')
+if HOST_PREFER_32_BIT == "true":
+ ART_PHONY_TEST_HOST_SUFFIX = "32"
+ _2ND_ART_PHONY_TEST_HOST_SUFFIX = ""
+else:
+ ART_PHONY_TEST_HOST_SUFFIX = "64"
+ _2ND_ART_PHONY_TEST_HOST_SUFFIX = "32"
+
+HOST_OUT_EXECUTABLES = os.path.join(ANDROID_BUILD_TOP,
+ get_build_var("HOST_OUT_EXECUTABLES"))
+os.environ['JACK'] = HOST_OUT_EXECUTABLES + '/jack'
+os.environ['DX'] = HOST_OUT_EXECUTABLES + '/dx'
+os.environ['SMALI'] = HOST_OUT_EXECUTABLES + '/smali'
+os.environ['JASMIN'] = HOST_OUT_EXECUTABLES + '/jasmin'
+os.environ['DXMERGER'] = HOST_OUT_EXECUTABLES + '/dexmerger'
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
new file mode 100755
index 0000000000..81b7953f3b
--- /dev/null
+++ b/test/testrunner/testrunner.py
@@ -0,0 +1,797 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""ART Run-Test TestRunner
+
+The testrunner runs the ART run-tests by simply invoking the script.
+It fetches the list of eligible tests from art/test directory, and list of
+disabled tests from art/test/knownfailures.json. It runs the tests by
+invoking art/test/run-test script and checks the exit value to decide if the
+test passed or failed.
+
+Before invoking the script, first build all the tests dependencies.
+There are two major build targets for building target and host tests
+dependencies:
+1) test-art-host-run-test
+2) test-art-target-run-test
+
+There are various options to invoke the script which are:
+-t: Either the test name as in art/test or the test name including the variant
+ information. Eg, "-t 001-HelloWorld",
+ "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32"
+-j: Number of thread workers to be used. Eg - "-j64"
+--dry-run: Instead of running the test name, just print its name.
+--verbose
+-b / --build-dependencies: to build the dependencies before running the test
+
+To specify any specific variants for the test, use --<<variant-name>>.
+For eg, for compiler type as optimizing, use --optimizing.
+
+
+In the end, the script will print the failed and skipped tests if any.
+
+"""
+import fnmatch
+import itertools
+import json
+from optparse import OptionParser
+import os
+import re
+import subprocess
+import sys
+import threading
+import time
+
+import env
+
+TARGET_TYPES = set()
+RUN_TYPES = set()
+PREBUILD_TYPES = set()
+COMPILER_TYPES = set()
+RELOCATE_TYPES = set()
+TRACE_TYPES = set()
+GC_TYPES = set()
+JNI_TYPES = set()
+IMAGE_TYPES = set()
+PICTEST_TYPES = set()
+DEBUGGABLE_TYPES = set()
+ADDRESS_SIZES = set()
+OPTIMIZING_COMPILER_TYPES = set()
+ADDRESS_SIZES_TARGET = {'host': set(), 'target': set()}
+
+# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
+# that has key as the test name (like 001-HelloWorld), and value as set of
+# variants that the test is disabled for.
+DISABLED_TEST_CONTAINER = {}
+
+# The Dict contains the list of all possible variants for a given type. For example,
+# for key TARGET, the value would be target and host. The list is used to parse
+# the test name given as the argument to run.
+VARIANT_TYPE_DICT = {}
+
+# The set contains all the variants of each time.
+TOTAL_VARIANTS_SET = set()
+
+# The colors are used in the output. When a test passes, COLOR_PASS is used,
+# and so on.
+COLOR_ERROR = '\033[91m'
+COLOR_PASS = '\033[92m'
+COLOR_SKIP = '\033[93m'
+COLOR_NORMAL = '\033[0m'
+
+# The mutex object is used by the threads for exclusive access of test_count
+# to make any changes in its value.
+test_count_mutex = threading.Lock()
+# The set contains the list of all the possible run tests that are in art/test
+# directory.
+RUN_TEST_SET = set()
+# The semaphore object is used by the testrunner to limit the number of
+# threads to the user requested concurrency value.
+semaphore = threading.Semaphore(1)
+# The mutex object is used to provide exclusive access to a thread to print
+# its output.
+print_mutex = threading.Lock()
+failed_tests = []
+skipped_tests = []
+
+# Flags
+n_thread = 1
+test_count = 0
+total_test_count = 0
+verbose = False
+last_print_length = 0
+dry_run = False
+build = False
+gdb = False
+gdb_arg = ''
+stop_testrunner = False
+
+def gather_test_info():
+ """The method gathers test information about the test to be run which includes
+ generating the list of total tests from the art/test directory and the list
+ of disabled test. It also maps various variants to types.
+ """
+ global TOTAL_VARIANTS_SET
+ global DISABLED_TEST_CONTAINER
+ # TODO: Avoid duplication of the variant names in different lists.
+ VARIANT_TYPE_DICT['pictest'] = {'pictest', 'npictest'}
+ VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
+ VARIANT_TYPE_DICT['target'] = {'target', 'host'}
+ VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
+ VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image', 'npicimage',
+ 'multinpicimage', 'multipicimage'}
+ VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
+ VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
+ VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'no-dex2oat', 'prebuild'}
+ VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
+ VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
+ VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
+ VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
+ 'regalloc_gc'}
+
+ for v_type in VARIANT_TYPE_DICT:
+ TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
+
+ test_dir = env.ANDROID_BUILD_TOP + '/art/test'
+ for f in os.listdir(test_dir):
+ if fnmatch.fnmatch(f, '[0-9]*'):
+ RUN_TEST_SET.add(f)
+ DISABLED_TEST_CONTAINER = get_disabled_test_info()
+
+
+def setup_test_env():
+ """The method sets default value for the various variants of the tests if they
+ are already not set.
+ """
+ if env.ART_TEST_BISECTION:
+ env.ART_TEST_RUN_TEST_NO_PREBUILD = True
+ env.ART_TEST_RUN_TEST_PREBUILD = False
+ # Bisection search writes to standard output.
+ env.ART_TEST_QUIET = False
+
+ if not TARGET_TYPES:
+ TARGET_TYPES.add('host')
+ TARGET_TYPES.add('target')
+
+ if env.ART_TEST_RUN_TEST_PREBUILD:
+ PREBUILD_TYPES.add('prebuild')
+ if env.ART_TEST_RUN_TEST_NO_PREBUILD:
+ PREBUILD_TYPES.add('no-prebuild')
+ if env.ART_TEST_RUN_TEST_NO_DEX2OAT:
+ PREBUILD_TYPES.add('no-dex2oat')
+
+ if env.ART_TEST_INTERPRETER_ACCESS_CHECKS:
+ COMPILER_TYPES.add('interp-ac')
+ if env.ART_TEST_INTERPRETER:
+ COMPILER_TYPES.add('interpreter')
+ if env.ART_TEST_JIT:
+ COMPILER_TYPES.add('jit')
+
+ if env.ART_TEST_OPTIMIZING:
+ COMPILER_TYPES.add('optimizing')
+ OPTIMIZING_COMPILER_TYPES.add('optimizing')
+ if env.ART_TEST_OPTIMIZING_GRAPH_COLOR:
+ COMPILER_TYPES.add('regalloc_gc')
+ OPTIMIZING_COMPILER_TYPES.add('regalloc_gc')
+
+ if not RELOCATE_TYPES:
+ RELOCATE_TYPES.add('no-relocate')
+ if env.ART_TEST_RUN_TEST_RELOCATE:
+ RELOCATE_TYPES.add('relocate')
+ if env.ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT:
+ RELOCATE_TYPES.add('relocate-npatchoat')
+
+ if not TRACE_TYPES:
+ TRACE_TYPES.add('ntrace')
+ if env.ART_TEST_TRACE:
+ TRACE_TYPES.add('trace')
+ if env.ART_TEST_TRACE_STREAM:
+ TRACE_TYPES.add('stream')
+
+ if not GC_TYPES:
+ GC_TYPES.add('cms')
+ if env.ART_TEST_GC_STRESS:
+ GC_TYPES.add('gcstress')
+ if env.ART_TEST_GC_VERIFY:
+ GC_TYPES.add('gcverify')
+
+ if not JNI_TYPES:
+ JNI_TYPES.add('checkjni')
+ if env.ART_TEST_JNI_FORCECOPY:
+ JNI_TYPES.add('forcecopy')
+
+ if env.ART_TEST_RUN_TEST_IMAGE:
+ IMAGE_TYPES.add('picimage')
+ if env.ART_TEST_RUN_TEST_NO_IMAGE:
+ IMAGE_TYPES.add('no-image')
+ if env.ART_TEST_RUN_TEST_MULTI_IMAGE:
+ IMAGE_TYPES.add('multipicimage')
+ if env.ART_TEST_NPIC_IMAGE:
+ IMAGE_TYPES.add('npicimage')
+ if env.ART_TEST_RUN_TEST_MULTI_IMAGE:
+ IMAGE_TYPES.add('multinpicimage')
+
+ if not PICTEST_TYPES:
+ PICTEST_TYPES.add('npictest')
+ if env.ART_TEST_PIC_TEST:
+ PICTEST_TYPES.add('pictest')
+
+ if env.ART_TEST_RUN_TEST_DEBUG:
+ RUN_TYPES.add('debug')
+ if env.ART_TEST_RUN_TEST_NDEBUG:
+ RUN_TYPES.add('ndebug')
+
+ if not DEBUGGABLE_TYPES:
+ DEBUGGABLE_TYPES.add('ndebuggable')
+
+ if env.ART_TEST_RUN_TEST_DEBUGGABLE:
+ DEBUGGABLE_TYPES.add('debuggable')
+
+ if not ADDRESS_SIZES:
+ ADDRESS_SIZES_TARGET['target'].add(env.ART_PHONY_TEST_TARGET_SUFFIX)
+ ADDRESS_SIZES_TARGET['host'].add(env.ART_PHONY_TEST_HOST_SUFFIX)
+ if env.ART_TEST_RUN_TEST_2ND_ARCH:
+ ADDRESS_SIZES_TARGET['host'].add(env._2ND_ART_PHONY_TEST_HOST_SUFFIX)
+ ADDRESS_SIZES_TARGET['target'].add(env._2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ else:
+ ADDRESS_SIZES_TARGET['host'] = ADDRESS_SIZES_TARGET['host'].union(ADDRESS_SIZES)
+ ADDRESS_SIZES_TARGET['target'] = ADDRESS_SIZES_TARGET['target'].union(ADDRESS_SIZES)
+
+ global semaphore
+ semaphore = threading.Semaphore(n_thread)
+
+
+def run_tests(tests):
+ """Creates thread workers to run the tests.
+
+ The method generates command and thread worker to run the tests. Depending on
+ the user input for the number of threads to be used, the method uses a
+ semaphore object to keep a count in control for the thread workers. When a new
+ worker is created, it acquires the semaphore object, and when the number of
+ workers reaches the maximum allowed concurrency, the method wait for an
+ existing thread worker to release the semaphore object. Worker releases the
+ semaphore object when they finish printing the output.
+
+ Args:
+ tests: The set of tests to be run.
+ """
+ options_all = ''
+ global total_test_count
+ total_test_count = len(tests)
+ total_test_count *= len(RUN_TYPES)
+ total_test_count *= len(PREBUILD_TYPES)
+ total_test_count *= len(RELOCATE_TYPES)
+ total_test_count *= len(TRACE_TYPES)
+ total_test_count *= len(GC_TYPES)
+ total_test_count *= len(JNI_TYPES)
+ total_test_count *= len(IMAGE_TYPES)
+ total_test_count *= len(PICTEST_TYPES)
+ total_test_count *= len(DEBUGGABLE_TYPES)
+ total_test_count *= len(COMPILER_TYPES)
+ target_address_combinations = 0
+ for target in TARGET_TYPES:
+ for address_size in ADDRESS_SIZES_TARGET[target]:
+ target_address_combinations += 1
+ total_test_count *= target_address_combinations
+
+ if env.ART_TEST_WITH_STRACE:
+ options_all += ' --strace'
+
+ if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
+ options_all += ' --always-clean'
+
+ if env.ART_TEST_BISECTION:
+ options_all += ' --bisection-search'
+
+ if env.ART_TEST_ANDROID_ROOT:
+ options_all += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
+
+ if gdb:
+ options_all += ' --gdb'
+ if gdb_arg:
+ options_all += ' --gdb-arg ' + gdb_arg
+
+ config = itertools.product(tests, TARGET_TYPES, RUN_TYPES, PREBUILD_TYPES,
+ COMPILER_TYPES, RELOCATE_TYPES, TRACE_TYPES,
+ GC_TYPES, JNI_TYPES, IMAGE_TYPES, PICTEST_TYPES,
+ DEBUGGABLE_TYPES)
+
+ for test, target, run, prebuild, compiler, relocate, trace, gc, \
+ jni, image, pictest, debuggable in config:
+ for address_size in ADDRESS_SIZES_TARGET[target]:
+ if stop_testrunner:
+ # When ART_TEST_KEEP_GOING is set to false, then as soon as a test
+ # fails, stop_testrunner is set to True. When this happens, the method
+ # stops creating any any thread and wait for all the exising threads
+ # to end.
+ while threading.active_count() > 2:
+ time.sleep(0.1)
+ return
+ test_name = 'test-art-'
+ test_name += target + '-run-test-'
+ test_name += run + '-'
+ test_name += prebuild + '-'
+ test_name += compiler + '-'
+ test_name += relocate + '-'
+ test_name += trace + '-'
+ test_name += gc + '-'
+ test_name += jni + '-'
+ test_name += image + '-'
+ test_name += pictest + '-'
+ test_name += debuggable + '-'
+ test_name += test
+ test_name += address_size
+
+ variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
+ image, pictest, debuggable, address_size}
+
+ options_test = options_all
+
+ if target == 'host':
+ options_test += ' --host'
+
+ if run == 'ndebug':
+ options_test += ' -O'
+
+ if prebuild == 'prebuild':
+ options_test += ' --prebuild'
+ elif prebuild == 'no-prebuild':
+ options_test += ' --no-prebuild'
+ elif prebuild == 'no-dex2oat':
+ options_test += ' --no-prebuild --no-dex2oat'
+
+ if compiler == 'optimizing':
+ options_test += ' --optimizing'
+ elif compiler == 'regalloc_gc':
+ options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color'
+ elif compiler == 'interpreter':
+ options_test += ' --interpreter'
+ elif compiler == 'interp-ac':
+ options_test += ' --interpreter --verify-soft-fail'
+ elif compiler == 'jit':
+ options_test += ' --jit'
+
+ if relocate == 'relocate':
+ options_test += ' --relocate'
+ elif relocate == 'no-relocate':
+ options_test += ' --no-relocate'
+ elif relocate == 'relocate-npatchoat':
+ options_test += ' --relocate --no-patchoat'
+
+ if trace == 'trace':
+ options_test += ' --trace'
+ elif trace == 'stream':
+ options_test += ' --trace --stream'
+
+ if gc == 'gcverify':
+ options_test += ' --gcverify'
+ elif gc == 'gcstress':
+ options_test += ' --gcstress'
+
+ if jni == 'forcecopy':
+ options_test += ' --runtime-option -Xjniopts:forcecopy'
+ elif jni == 'checkjni':
+ options_test += ' --runtime-option -Xcheck:jni'
+
+ if image == 'no-image':
+ options_test += ' --no-image'
+ elif image == 'npicimage':
+ options_test += ' --npic-image'
+ elif image == 'multinpicimage':
+ options_test += ' --npic-image --multi-image'
+ elif image == 'multipicimage':
+ options_test += ' --multi-image'
+
+ if pictest == 'pictest':
+ options_test += ' --pic-test'
+
+ if debuggable == 'debuggable':
+ options_test += ' --debuggable'
+
+ if address_size == '64':
+ options_test += ' --64'
+
+ if env.DEX2OAT_HOST_INSTRUCTION_SET_FEATURES:
+ options_test += ' --instruction-set-features' + env.DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
+
+ elif address_size == '32':
+ if env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES:
+ options_test += ' --instruction-set-features ' + \
+ env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
+
+ options_test = (' --output-path %s/run-test-output/%s') % (
+ env.ART_HOST_TEST_DIR, test_name) + options_test
+
+ run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
+ command = run_test_sh + ' ' + options_test + ' ' + test
+
+ semaphore.acquire()
+ worker = threading.Thread(target=run_test, args=(command, test, variant_set, test_name))
+ worker.daemon = True
+ worker.start()
+
+ while threading.active_count() > 2:
+ time.sleep(0.1)
+
+
+def run_test(command, test, test_variant, test_name):
+ """Runs the test.
+
+ It invokes art/test/run-test script to run the test. The output of the script
+ is checked, and if it ends with "Succeeded!", it assumes that the tests
+ passed, otherwise, put it in the list of failed test. Before actually running
+ the test, it also checks if the test is placed in the list of disabled tests,
+ and if yes, it skips running it, and adds the test in the list of skipped
+ tests. The method uses print_text method to actually print the output. After
+ successfully running and capturing the output for the test, it releases the
+ semaphore object.
+
+ Args:
+ command: The command to be used to invoke the script
+ test: The name of the test without the variant information.
+ test_variant: The set of variant for the test.
+ test_name: The name of the test along with the variants.
+ """
+ global last_print_length
+ global test_count
+ global stop_testrunner
+ if is_test_disabled(test, test_variant):
+ test_skipped = True
+ else:
+ test_skipped = False
+ proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
+ script_output = proc.stdout.read().strip()
+ test_passed = not proc.wait()
+
+ # If verbose is set to True, every test information is printed on a new line.
+ # If not, the information is printed on the same line overriding the
+ # previous test output.
+ if not verbose:
+ suffix = '\r'
+ prefix = ' ' * last_print_length + '\r'
+ else:
+ suffix = '\n'
+ prefix = ''
+ test_count_mutex.acquire()
+ test_count += 1
+ percent = (test_count * 100) / total_test_count
+ out = '[ ' + str(percent) + '% ' + str(test_count) + '/' + str(total_test_count) + ' ] '
+ test_count_mutex.release()
+ out += test_name + ' '
+ if not test_skipped:
+ if test_passed:
+ out += COLOR_PASS + 'PASS' + COLOR_NORMAL
+ last_print_length = len(out)
+ else:
+ failed_tests.append(test_name)
+ out += COLOR_ERROR + 'FAIL' + COLOR_NORMAL
+ out += '\n' + command + '\n' + script_output
+ if not env.ART_TEST_KEEP_GOING:
+ stop_testrunner = True
+ last_print_length = 0
+ elif not dry_run:
+ out += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
+ last_print_length = len(out)
+ skipped_tests.append(test_name)
+ print_mutex.acquire()
+ print_text(prefix + out + suffix)
+ print_mutex.release()
+ semaphore.release()
+
+
+def get_disabled_test_info():
+ """Generate set of known failures.
+
+ It parses the art/test/knownfailures.json file to generate the list of
+ disabled tests.
+
+ Returns:
+ The method returns a dict of tests mapped to the variants list
+ for which the test should not be run.
+ """
+ known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
+ with open(known_failures_file) as known_failures_json:
+ known_failures_info = json.loads(known_failures_json.read())
+
+ disabled_test_info = {}
+ for failure in known_failures_info:
+ tests = failure.get('test')
+ if tests:
+ tests = [tests]
+ else:
+ tests = failure.get('tests', [])
+ variants = parse_variants(failure.get('variant'))
+ env_vars = failure.get('env_vars')
+ if check_env_vars(env_vars):
+ for test in tests:
+ if test in disabled_test_info:
+ disabled_test_info[test] = disabled_test_info[test].union(variants)
+ else:
+ disabled_test_info[test] = variants
+ return disabled_test_info
+
+
+def check_env_vars(env_vars):
+ """Checks if the env variables are set as required to run the test.
+
+ Returns:
+ True if all the env variables are set as required, otherwise False.
+ """
+
+ if not env_vars:
+ return True
+ for key in env_vars:
+ if env.get_env(key) != env_vars.get(key):
+ return False
+ return True
+
+
+def is_test_disabled(test, variant_set):
+ """Checks if the test along with the variant_set is disabled.
+
+ Args:
+ test: The name of the test as in art/test directory.
+ variant_set: Variants to be used for the test.
+ Returns:
+ True, if the test is disabled.
+ """
+ if dry_run:
+ return True
+ variants_list = DISABLED_TEST_CONTAINER.get(test, {})
+ for variants in variants_list:
+ variants_present = True
+ for variant in variants:
+ if variant not in variant_set:
+ variants_present = False
+ break
+ if variants_present:
+ return True
+ return False
+
+
+def parse_variants(variants):
+ """Parse variants fetched from art/test/knownfailures.json.
+ """
+ if not variants:
+ variants = ''
+ for variant in TOTAL_VARIANTS_SET:
+ variants += variant
+ variants += '|'
+ variants = variants[:-1]
+ variant_list = set()
+ or_variants = variants.split('|')
+ for or_variant in or_variants:
+ and_variants = or_variant.split('&')
+ variant = set()
+ for and_variant in and_variants:
+ and_variant = and_variant.strip()
+ variant.add(and_variant)
+ variant_list.add(frozenset(variant))
+ return variant_list
+
+def print_text(output):
+ sys.stdout.write(output)
+ sys.stdout.flush()
+
+def print_analysis():
+ if not verbose:
+ print_text(' ' * last_print_length + '\r')
+ if skipped_tests:
+ print_text(COLOR_SKIP + 'SKIPPED TESTS' + COLOR_NORMAL + '\n')
+ for test in skipped_tests:
+ print_text(test + '\n')
+ print_text('\n')
+
+ if failed_tests:
+ print_text(COLOR_ERROR + 'FAILED TESTS' + COLOR_NORMAL + '\n')
+ for test in failed_tests:
+ print_text(test + '\n')
+
+
+def parse_test_name(test_name):
+ """Parses the testname provided by the user.
+ It supports two types of test_name:
+ 1) Like 001-HelloWorld. In this case, it will just verify if the test actually
+ exists and if it does, it returns the testname.
+ 2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32
+ In this case, it will parse all the variants and check if they are placed
+ correctly. If yes, it will set the various VARIANT_TYPES to use the
+ variants required to run the test. Again, it returns the test_name
+ without the variant information like 001-HelloWorld.
+ """
+ if test_name in RUN_TEST_SET:
+ return {test_name}
+
+ regex = '^test-art-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
+ regex += 'run-test-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['pictest']) + ')-'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
+ regex += '(' + '|'.join(RUN_TEST_SET) + ')'
+ regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
+ match = re.match(regex, test_name)
+ if match:
+ TARGET_TYPES.add(match.group(1))
+ RUN_TYPES.add(match.group(2))
+ PREBUILD_TYPES.add(match.group(3))
+ COMPILER_TYPES.add(match.group(4))
+ RELOCATE_TYPES.add(match.group(5))
+ TRACE_TYPES.add(match.group(6))
+ GC_TYPES.add(match.group(7))
+ JNI_TYPES.add(match.group(8))
+ IMAGE_TYPES.add(match.group(9))
+ PICTEST_TYPES.add(match.group(10))
+ DEBUGGABLE_TYPES.add(match.group(11))
+ ADDRESS_SIZES.add(match.group(13))
+ return {match.group(12)}
+
+
+def parse_option():
+ global verbose
+ global dry_run
+ global n_thread
+ global build
+ global gdb
+ global gdb_arg
+
+ parser = OptionParser()
+ parser.add_option('-t', '--test', dest='test', help='name of the test')
+ parser.add_option('-j', type='int', dest='n_thread')
+ for variant in TOTAL_VARIANTS_SET:
+ flag = '--' + variant
+ flag_dest = variant.replace('-', '_')
+ if variant == '32' or variant == '64':
+ flag_dest = 'n' + flag_dest
+ parser.add_option(flag, action='store_true', dest=flag_dest)
+ parser.add_option('--verbose', '-v', action='store_true', dest='verbose')
+ parser.add_option('--dry-run', action='store_true', dest='dry_run')
+ parser.add_option('-b', '--build-dependencies', action='store_true', dest='build')
+ parser.add_option('--gdb', action='store_true', dest='gdb')
+ parser.add_option('--gdb-arg', dest='gdb_arg')
+
+ options = parser.parse_args()[0]
+ test = ''
+ if options.test:
+ test = parse_test_name(options.test)
+ if options.pictest:
+ PICTEST_TYPES.add('pictest')
+ if options.ndebug:
+ RUN_TYPES.add('ndebug')
+ if options.interp_ac:
+ COMPILER_TYPES.add('interp-ac')
+ if options.picimage:
+ IMAGE_TYPES.add('picimage')
+ if options.n64:
+ ADDRESS_SIZES.add('64')
+ if options.interpreter:
+ COMPILER_TYPES.add('interpreter')
+ if options.jni:
+ JNI_TYPES.add('jni')
+ if options.relocate_npatchoat:
+ RELOCATE_TYPES.add('relocate-npatchoat')
+ if options.no_prebuild:
+ PREBUILD_TYPES.add('no-prebuild')
+ if options.npictest:
+ PICTEST_TYPES.add('npictest')
+ if options.no_dex2oat:
+ PREBUILD_TYPES.add('no-dex2oat')
+ if options.jit:
+ COMPILER_TYPES.add('jit')
+ if options.relocate:
+ RELOCATE_TYPES.add('relocate')
+ if options.ndebuggable:
+ DEBUGGABLE_TYPES.add('ndebuggable')
+ if options.no_image:
+ IMAGE_TYPES.add('no-image')
+ if options.optimizing:
+ COMPILER_TYPES.add('optimizing')
+ if options.trace:
+ TRACE_TYPES.add('trace')
+ if options.gcstress:
+ GC_TYPES.add('gcstress')
+ if options.no_relocate:
+ RELOCATE_TYPES.add('no-relocate')
+ if options.target:
+ TARGET_TYPES.add('target')
+ if options.forcecopy:
+ JNI_TYPES.add('forcecopy')
+ if options.n32:
+ ADDRESS_SIZES.add('32')
+ if options.host:
+ TARGET_TYPES.add('host')
+ if options.gcverify:
+ GC_TYPES.add('gcverify')
+ if options.debuggable:
+ DEBUGGABLE_TYPES.add('debuggable')
+ if options.prebuild:
+ PREBUILD_TYPES.add('prebuild')
+ if options.debug:
+ RUN_TYPES.add('debug')
+ if options.checkjni:
+ JNI_TYPES.add('checkjni')
+ if options.ntrace:
+ TRACE_TYPES.add('ntrace')
+ if options.cms:
+ GC_TYPES.add('cms')
+ if options.npicimage:
+ IMAGE_TYPES.add('npicimage')
+ if options.multinpicimage:
+ IMAGE_TYPES.add('multinpicimage')
+ if options.multipicimage:
+ IMAGE_TYPES.add('multipicimage')
+ if options.verbose:
+ verbose = True
+ if options.n_thread:
+ n_thread = max(1, options.n_thread)
+ if options.dry_run:
+ dry_run = True
+ verbose = True
+ if options.build:
+ build = True
+ if options.gdb:
+ n_thread = 1
+ gdb = True
+ if options.gdb_arg:
+ gdb_arg = options.gdb_arg
+
+ return test
+
+def main():
+ gather_test_info()
+ user_requested_test = parse_option()
+ setup_test_env()
+ if build:
+ build_targets = ''
+ if 'host' in TARGET_TYPES:
+ build_targets += 'test-art-host-run-test-dependencies'
+ if 'target' in TARGET_TYPES:
+ build_targets += 'test-art-target-run-test-dependencies'
+ build_command = 'make'
+ build_command += ' -j' + str(n_thread)
+ build_command += ' -C ' + env.ANDROID_BUILD_TOP
+ build_command += ' ' + build_targets
+ if subprocess.call(build_command.split()):
+ sys.exit(1)
+ if user_requested_test:
+ test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_test,))
+ else:
+ test_runner_thread = threading.Thread(target=run_tests, args=(RUN_TEST_SET,))
+ test_runner_thread.daemon = True
+ try:
+ test_runner_thread.start()
+ while threading.active_count() > 1:
+ time.sleep(0.1)
+ print_analysis()
+ if failed_tests:
+ sys.exit(1)
+ sys.exit(0)
+ except SystemExit:
+ pass
+ except:
+ print_analysis()
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index 94934a2831..a062afdaef 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -26,6 +26,7 @@ import com.android.tools.perflib.heap.RootObj;
import com.android.tools.perflib.heap.Type;
import java.awt.image.BufferedImage;
+import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -87,22 +88,27 @@ class InstanceUtils {
// is a char[], use that directly as the value, otherwise use the value
// field of the string object. The field accesses for count and offset
// later on will work okay regardless of what type the inst object is.
- Object value = inst;
- if (isInstanceOfClass(inst, "java.lang.String")) {
- value = getField(inst, "value");
- }
+ boolean isString = isInstanceOfClass(inst, "java.lang.String");
+ Object value = isString ? getField(inst, "value") : inst;
if (!(value instanceof ArrayInstance)) {
return null;
}
ArrayInstance chars = (ArrayInstance) value;
+ int numChars = chars.getLength();
+ int offset = getIntField(inst, "offset", 0);
+ int count = getIntField(inst, "count", numChars);
+
+ // With string compression enabled, the array type can be BYTE but in that case
+ // offset must be 0 and count must match numChars.
+ if (isString && (chars.getArrayType() == Type.BYTE) && (offset == 0) && (count == numChars)) {
+ int length = (0 <= maxChars && maxChars < numChars) ? maxChars : numChars;
+ return new String(chars.asRawByteArray(/* offset */ 0, length), StandardCharsets.US_ASCII);
+ }
if (chars.getArrayType() != Type.CHAR) {
return null;
}
-
- int numChars = chars.getLength();
- int count = getIntField(inst, "count", numChars);
if (count == 0) {
return "";
}
@@ -110,7 +116,6 @@ class InstanceUtils {
count = maxChars;
}
- int offset = getIntField(inst, "offset", 0);
int end = offset + count - 1;
if (offset >= 0 && offset < numChars && end >= 0 && end < numChars) {
return new String(chars.asCharArray(offset, count));
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index e08df67b13..587d9defef 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -45,6 +45,8 @@ public class Main {
// class and reading the desired field.
public static class DumpedStuff {
public String basicString = "hello, world";
+ public String nonAscii = "Sigma (\u01a9) is not ASCII";
+ public String embeddedZero = "embedded\0..."; // Non-ASCII for string compression purposes.
public char[] charArray = "char thing".toCharArray();
public String nullString = null;
public Object anObject = new Object();
diff --git a/tools/ahat/test/InstanceUtilsTest.java b/tools/ahat/test/InstanceUtilsTest.java
index ec77e70da1..fe2706d7d4 100644
--- a/tools/ahat/test/InstanceUtilsTest.java
+++ b/tools/ahat/test/InstanceUtilsTest.java
@@ -37,6 +37,20 @@ public class InstanceUtilsTest {
}
@Test
+ public void asStringNonAscii() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("nonAscii");
+ assertEquals("Sigma (\u01a9) is not ASCII", InstanceUtils.asString(str));
+ }
+
+ @Test
+ public void asStringEmbeddedZero() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("embeddedZero");
+ assertEquals("embedded\0...", InstanceUtils.asString(str));
+ }
+
+ @Test
public void asStringCharArray() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance str = (Instance)dump.getDumpedThing("charArray");
@@ -51,6 +65,20 @@ public class InstanceUtilsTest {
}
@Test
+ public void asStringTruncatedNonAscii() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("nonAscii");
+ assertEquals("Sigma (\u01a9)", InstanceUtils.asString(str, 9));
+ }
+
+ @Test
+ public void asStringTruncatedEmbeddedZero() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("embeddedZero");
+ assertEquals("embed", InstanceUtils.asString(str, 5));
+ }
+
+ @Test
public void asStringCharArrayTruncated() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance str = (Instance)dump.getDumpedThing("charArray");
@@ -65,6 +93,20 @@ public class InstanceUtilsTest {
}
@Test
+ public void asStringExactMaxNonAscii() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("nonAscii");
+ assertEquals("Sigma (\u01a9) is not ASCII", InstanceUtils.asString(str, 22));
+ }
+
+ @Test
+ public void asStringExactMaxEmbeddedZero() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("embeddedZero");
+ assertEquals("embedded\0...", InstanceUtils.asString(str, 12));
+ }
+
+ @Test
public void asStringCharArrayExactMax() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance str = (Instance)dump.getDumpedThing("charArray");
@@ -79,6 +121,20 @@ public class InstanceUtilsTest {
}
@Test
+ public void asStringNotTruncatedNonAscii() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("nonAscii");
+ assertEquals("Sigma (\u01a9) is not ASCII", InstanceUtils.asString(str, 50));
+ }
+
+ @Test
+ public void asStringNotTruncatedEmbeddedZero() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("embeddedZero");
+ assertEquals("embedded\0...", InstanceUtils.asString(str, 50));
+ }
+
+ @Test
public void asStringCharArrayNotTruncated() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance str = (Instance)dump.getDumpedThing("charArray");
@@ -93,6 +149,20 @@ public class InstanceUtilsTest {
}
@Test
+ public void asStringNegativeMaxNonAscii() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("nonAscii");
+ assertEquals("Sigma (\u01a9) is not ASCII", InstanceUtils.asString(str, -3));
+ }
+
+ @Test
+ public void asStringNegativeMaxEmbeddedZero() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("embeddedZero");
+ assertEquals("embedded\0...", InstanceUtils.asString(str, -3));
+ }
+
+ @Test
public void asStringCharArrayNegativeMax() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance str = (Instance)dump.getDumpedThing("charArray");
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index d792e906ef..59c52117eb 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -20,7 +20,7 @@
//
// In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
-art_cc_binary {
+cc_binary { // Do not use art_cc_binary because HOST_PREFER_32_BIT is incompatible with genrule.
name: "cpp-define-generator-data",
host_supported: true,
device_supported: false,
@@ -34,3 +34,14 @@ art_cc_binary {
"libbase",
],
}
+
+// Note: See $OUT_DIR/soong/build.ninja
+// For the exact filename that this generates to run make command on just
+// this rule later.
+genrule {
+ name: "cpp-define-generator-asm-support",
+ out: ["asm_support_gen.h"],
+ tools: ["cpp-define-generator-data"],
+ tool_files: ["verify-asm-support"],
+ cmd: "$(location verify-asm-support) --quiet \"$(location cpp-define-generator-data)\" \"$(out)\""
+}
diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def
index 82cdbb20f1..5fa5194d00 100644
--- a/tools/cpp-define-generator/constant_jit.def
+++ b/tools/cpp-define-generator/constant_jit.def
@@ -25,6 +25,5 @@
DEFINE_JIT_CONSTANT(CHECK_OSR, int16_t, art::jit::kJitCheckForOSR)
DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled)
-DEFINE_JIT_CONSTANT(CHECK_OSR_THRESHOLD, int16_t, art::jit::Jit::kJitRecheckOSRThreshold)
#undef DEFINE_JIT_CONSTANT
diff --git a/tools/cpp-define-generator/presubmit-check-files-up-to-date b/tools/cpp-define-generator/presubmit-check-files-up-to-date
new file mode 100755
index 0000000000..67a702adc7
--- /dev/null
+++ b/tools/cpp-define-generator/presubmit-check-files-up-to-date
@@ -0,0 +1,67 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ---------------------------------------------------------------------------
+
+# Generates asm_support_gen.h into a temporary location.
+# Then verifies it is the same as our local stored copy.
+
+GEN_TOOL=cpp-define-generator-data
+
+if ! which "$GEN_TOOL"; then
+ echo "ERROR: Please build cpp-define-generator-data or source build/envsetup.sh" >&2
+ exit 1
+fi
+
+#######################
+#######################
+
+PREUPLOAD_COMMIT_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
+BUILD_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
+
+function finish() {
+ # Delete temp files.
+ [[ -f "$PREUPLOAD_COMMIT_COPY" ]] && rm "$PREUPLOAD_COMMIT_COPY"
+ [[ -f "$BUILD_COPY" ]] && rm "$BUILD_COPY"
+}
+trap finish EXIT
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ART_DIR="$( cd "$DIR/../.." && pwd )"
+ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
+
+# Repo upload hook runs inside of the top-level git directory.
+# If we run this script manually, be in the right place for git.
+cd "$ART_DIR"
+
+if [[ -z $PREUPLOAD_COMMIT ]]; then
+ echo "WARNING: Not running as a pre-upload hook. Assuming commit to check = 'HEAD'"
+ PREUPLOAD_COMMIT=HEAD
+fi
+
+# Get version we are about to push into git.
+git show "$PREUPLOAD_COMMIT:$ASM_SUPPORT_GEN_CHECKED_IN_COPY" > "$PREUPLOAD_COMMIT_COPY" || exit 1
+# Get version that our build would have made.
+"$GEN_TOOL" > "$BUILD_COPY" || exit 1
+
+if ! diff "$PREUPLOAD_COMMIT_COPY" "$BUILD_COPY"; then
+ echo "asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
+ echo " has diverged from the build copy." >&2
+ echo " Please re-run the 'generate-asm-support' command to resync the header." >&2
+ exit 1
+fi
+
+# Success. Print nothing to avoid spamming users.
diff --git a/tools/cpp-define-generator/verify-asm-support b/tools/cpp-define-generator/verify-asm-support
new file mode 100755
index 0000000000..745b1153c9
--- /dev/null
+++ b/tools/cpp-define-generator/verify-asm-support
@@ -0,0 +1,101 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ---------------------------------------------------------------------------
+
+# Generates asm_support_gen.h into the $OUT directory in the build.
+# Then verifies that it is the same as in runtime/generated/asm_support_gen.h
+
+# Validates that art/runtime/generated/asm_support_gen.h
+# - This must be run after a build since it uses cpp-define-generator-data
+
+# Path to asm_support_gen.h that we check into our git repository.
+ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
+# Instead of producing an error if checked-in copy differs from the generated version,
+# overwrite the local checked-in copy instead.
+OVERWRITE_CHECKED_IN_COPY_IF_CHANGED="n"
+
+#######################
+#######################
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ART_DIR="$( cd "$DIR/../.." && pwd )"
+ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY="$ART_DIR/runtime/generated/asm_support_gen.h"
+
+# Sanity check that we haven't moved the file around.
+# If we did, perhaps the above constant should be updated.
+if ! [[ -f "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" ]]; then
+ echo "ERROR: Missing asm_support_gen.h, expected to be in '$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY'" >&2
+ exit 1
+fi
+
+# The absolute path to cpp-define-generator is in $1
+# Generate the file as part of the build into the out location specified by $2.
+
+# Compare that the generated file matches our golden copy that's checked into git.
+# If not, it is a fatal error and the user needs to run 'generate-asm-support' to rebuild.
+
+if [[ $# -lt 2 ]]; then
+ echo "Usage: $0 [--quiet] [--presubmit] <path-to-cpp-define-generator-data-binary> <output-file>'" >&2
+ exit 1
+fi
+
+# Supress 'chatty' messages during the build.
+# If anything is printed in a success case then
+# the main Android build can't reuse the same line for
+# showing multiple commands being executed.
+QUIET=false
+if [[ "$1" == "--quiet" ]]; then
+ QUIET=true
+ shift
+fi
+
+CPP_DEFINE_GENERATOR_TOOL="$1"
+OUTPUT_FILE="$2"
+
+function pecho() {
+ if ! $QUIET; then
+ echo "$@"
+ fi
+}
+
+# Generate the header. Print the command we're running to console for readability.
+pecho "cpp-define-generator-data > \"$OUTPUT_FILE\""
+"$CPP_DEFINE_GENERATOR_TOOL" > "$OUTPUT_FILE"
+retval="$?"
+
+if [[ $retval -ne 0 ]]; then
+ echo "verify-asm-support: FATAL: Error while running cpp-define-generator-data" >&2
+ exit $retval
+fi
+
+if ! diff "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" "$OUTPUT_FILE"; then
+
+ if [[ $OVERWRITE_CHECKED_IN_COPY_IF_CHANGED == "y" ]]; then
+ cp "$OUTPUT_FILE" "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY"
+ echo "verify-asm-support: OK: Overwrote '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' with build copy."
+ echo " Please 'git add $ASM_SUPPORT_GEN_CHECKED_IN_COPY'."
+ else
+ echo "---------------------------------------------------------------------------------------------" >&2
+ echo "verify-asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
+ echo " has diverged from the build copy." >&2
+ echo " Please re-run the 'generate-asm-support' command to resync the header." >&2
+ [[ -f "$OUTPUT_FILE" ]] && rm "$OUTPUT_FILE"
+ exit 1
+ fi
+fi
+
+pecho "verify-asm-support: SUCCESS. Built '$OUTPUT_FILE' which matches our checked in copy."
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 41faa69c31..6e123ce7e4 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -14,6 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Exit as a stop-gap measure for b/35308152.
+exit 0
+
if [ ! -d libcore ]; then
echo "Script needs to be run at the root of the android tree"
exit 1
@@ -123,7 +126,7 @@ done
vogar_args="$vogar_args --timeout 480"
# Use Jack with "1.8" configuration.
-vogar_args="$vogar_args --toolchain jack --language JN"
+vogar_args="$vogar_args --toolchain jack --language JO"
# JIT settings.
if $use_jit; then