Merge "MIPS64: Add direct calls to math intrinsics."
diff --git a/Android.mk b/Android.mk
index f8c5378..7beb30f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -81,6 +81,7 @@
include $(art_path)/tools/ahat/Android.mk
include $(art_path)/tools/dexfuzz/Android.mk
include $(art_path)/libart_fake/Android.mk
+include $(art_path)/test/Android.run-test-jvmti-java-library.mk
ART_HOST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index cf1832b..0ed230c 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,2 +1,3 @@
[Hook Scripts]
check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date
+check_cpplint_on_changed_files = tools/cpplint_presubmit.py
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 1876efc..1ae79ac 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -66,6 +66,9 @@
# Do you want to test the optimizing compiler with graph coloring register allocation?
ART_TEST_OPTIMIZING_GRAPH_COLOR ?= $(ART_TEST_FULL)
+# Do you want to do run-tests with profiles?
+ART_TEST_SPEED_PROFILE ?= $(ART_TEST_FULL)
+
# Do we want to test PIC-compiled tests ("apps")?
ART_TEST_PIC_TEST ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c27f8db..ed34a8d 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -25,6 +25,7 @@
GTEST_DEX_DIRECTORIES := \
AbstractMethod \
AllFields \
+ DefaultMethods \
DexToDexDecompiler \
ErroneousA \
ErroneousB \
@@ -104,7 +105,7 @@
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex
ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
-ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB
+ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 550e8c4..1a2b9cd 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -300,6 +300,13 @@
log_verbosity.oat = true;
EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose);
}
+
+ {
+ const char* log_args = "-verbose:dex";
+ LogVerbosity log_verbosity = LogVerbosity();
+ log_verbosity.dex = true;
+ EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose);
+ }
} // TEST_F
// TODO: Enable this b/19274810
@@ -476,7 +483,7 @@
* -Xps-*
*/
TEST_F(CmdlineParserTest, ProfileSaverOptions) {
- ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7);
+ ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc");
EXPECT_SINGLE_PARSE_VALUE(opt,
"-Xjitsaveprofilinginfo "
@@ -486,7 +493,8 @@
"-Xps-min-methods-to-save:4 "
"-Xps-min-classes-to-save:5 "
"-Xps-min-notification-before-wake:6 "
- "-Xps-max-notification-before-wake:7",
+ "-Xps-max-notification-before-wake:7 "
+ "-Xps-profile-path:abc",
M::ProfileSaverOpts);
} // TEST_F
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index f1123eb..e33a207 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -401,19 +401,19 @@
};
template <>
-struct CmdlineType<std::vector<ti::Agent>> : CmdlineTypeParser<std::vector<ti::Agent>> {
+struct CmdlineType<std::list<ti::Agent>> : CmdlineTypeParser<std::list<ti::Agent>> {
Result Parse(const std::string& args) {
- assert(false && "Use AppendValues() for an Agent vector type");
- return Result::Failure("Unconditional failure: Agent vector must be appended: " + args);
+ assert(false && "Use AppendValues() for an Agent list type");
+ return Result::Failure("Unconditional failure: Agent list must be appended: " + args);
}
Result ParseAndAppend(const std::string& args,
- std::vector<ti::Agent>& existing_value) {
+ std::list<ti::Agent>& existing_value) {
existing_value.emplace_back(args);
return Result::SuccessNoValue();
}
- static const char* Name() { return "std::vector<ti::Agent>"; }
+ static const char* Name() { return "std::list<ti::Agent>"; }
};
template <>
@@ -656,6 +656,8 @@
log_verbosity.systrace_lock_logging = true;
} else if (verbose_options[j] == "agents") {
log_verbosity.agents = true;
+ } else if (verbose_options[j] == "dex") {
+ log_verbosity.dex = true;
} else {
return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]);
}
@@ -752,9 +754,13 @@
return ParseInto(existing,
&ProfileSaverOptions::max_notification_before_wake_,
type_parser.Parse(suffix));
- } else {
- return Result::Failure(std::string("Invalid suboption '") + option + "'");
}
+ if (android::base::StartsWith(option, "profile-path:")) {
+ existing.profile_path_ = suffix;
+ return Result::SuccessNoValue();
+ }
+
+ return Result::Failure(std::string("Invalid suboption '") + option + "'");
}
static const char* Name() { return "ProfileSaverOptions"; }
@@ -774,6 +780,5 @@
static const char* Name() { return "ExperimentalFlags"; }
};
-
} // namespace art
#endif // ART_CMDLINE_CMDLINE_TYPES_H_
diff --git a/compiler/Android.bp b/compiler/Android.bp
index d57f301..312fc7b 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -106,7 +106,9 @@
"linker/arm/relative_patcher_arm_base.cc",
"linker/arm/relative_patcher_thumb2.cc",
"optimizing/code_generator_arm.cc",
+ "optimizing/code_generator_vector_arm.cc",
"optimizing/code_generator_arm_vixl.cc",
+ "optimizing/code_generator_vector_arm_vixl.cc",
"optimizing/dex_cache_array_fixups_arm.cc",
"optimizing/instruction_simplifier_arm.cc",
"optimizing/instruction_simplifier_shared.cc",
@@ -126,6 +128,7 @@
"jni/quick/arm64/calling_convention_arm64.cc",
"linker/arm64/relative_patcher_arm64.cc",
"optimizing/code_generator_arm64.cc",
+ "optimizing/code_generator_vector_arm64.cc",
"optimizing/scheduler_arm64.cc",
"optimizing/instruction_simplifier_arm64.cc",
"optimizing/intrinsics_arm64.cc",
@@ -139,6 +142,7 @@
"jni/quick/mips/calling_convention_mips.cc",
"linker/mips/relative_patcher_mips.cc",
"optimizing/code_generator_mips.cc",
+ "optimizing/code_generator_vector_mips.cc",
"optimizing/dex_cache_array_fixups_mips.cc",
"optimizing/intrinsics_mips.cc",
"optimizing/pc_relative_fixups_mips.cc",
@@ -151,6 +155,7 @@
"jni/quick/mips64/calling_convention_mips64.cc",
"linker/mips64/relative_patcher_mips64.cc",
"optimizing/code_generator_mips64.cc",
+ "optimizing/code_generator_vector_mips64.cc",
"optimizing/intrinsics_mips64.cc",
"utils/mips64/assembler_mips64.cc",
"utils/mips64/managed_register_mips64.cc",
@@ -162,6 +167,7 @@
"linker/x86/relative_patcher_x86.cc",
"linker/x86/relative_patcher_x86_base.cc",
"optimizing/code_generator_x86.cc",
+ "optimizing/code_generator_vector_x86.cc",
"optimizing/intrinsics_x86.cc",
"optimizing/pc_relative_fixups_x86.cc",
"optimizing/x86_memory_gen.cc",
@@ -176,6 +182,7 @@
"linker/x86_64/relative_patcher_x86_64.cc",
"optimizing/intrinsics_x86_64.cc",
"optimizing/code_generator_x86_64.cc",
+ "optimizing/code_generator_vector_x86_64.cc",
"utils/x86_64/assembler_x86_64.cc",
"utils/x86_64/jni_macro_assembler_x86_64.cc",
"utils/x86_64/managed_register_x86_64.cc",
@@ -391,6 +398,7 @@
mips64: {
srcs: [
"linker/mips64/relative_patcher_mips64_test.cc",
+ "utils/mips64/managed_register_mips64_test.cc",
],
},
x86: {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 98dcf20..0683577 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -117,25 +117,6 @@
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
};
-// TODO: When read barrier works with all Optimizing back ends, get rid of this.
-#define TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS() \
- if (kUseReadBarrier && GetCompilerKind() == Compiler::kOptimizing) { \
- switch (GetInstructionSet()) { \
- case kArm64: \
- case kThumb2: \
- case kX86: \
- case kX86_64: \
- /* Instruction set has read barrier support. */ \
- break; \
- \
- default: \
- /* Instruction set does not have barrier support. */ \
- printf("WARNING: TEST DISABLED FOR READ BARRIER WITH OPTIMIZING " \
- "FOR THIS INSTRUCTION SET\n"); \
- return; \
- } \
- }
-
} // namespace art
#endif // ART_COMPILER_COMMON_COMPILER_TEST_H_
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 808e28c..538fe93 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -70,10 +70,6 @@
return *unit_.GetDexFile();
}
- bool PerformOptimizations() const {
- return dex_to_dex_compilation_level_ >= DexToDexCompilationLevel::kOptimize;
- }
-
// Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
// a barrier is required.
void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
@@ -114,7 +110,7 @@
};
void DexCompiler::Compile() {
- DCHECK_GE(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kRequired);
+ DCHECK_EQ(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kOptimize);
const DexFile::CodeItem* code_item = unit_.GetCodeItem();
const uint16_t* insns = code_item->insns_;
const uint32_t insns_size = code_item->insns_size_in_code_units_;
@@ -221,7 +217,7 @@
}
Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) {
- if (!kEnableCheckCastEllision || !PerformOptimizations()) {
+ if (!kEnableCheckCastEllision) {
return inst;
}
if (!driver_.IsSafeCast(&unit_, dex_pc)) {
@@ -254,7 +250,7 @@
uint32_t dex_pc,
Instruction::Code new_opcode,
bool is_put) {
- if (!kEnableQuickening || !PerformOptimizations()) {
+ if (!kEnableQuickening) {
return;
}
uint32_t field_idx = inst->VRegC_22c();
@@ -279,7 +275,7 @@
void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
Instruction::Code new_opcode, bool is_range) {
- if (!kEnableQuickening || !PerformOptimizations()) {
+ if (!kEnableQuickening) {
return;
}
uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 00c596d..87ddb39 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -34,8 +34,7 @@
enum class DexToDexCompilationLevel {
kDontDexToDexCompile, // Only meaning wrt image time interpretation.
- kRequired, // Dex-to-dex compilation required for correctness.
- kOptimize // Perform required transformation and peep-hole optimizations.
+ kOptimize // Perform peep-hole optimizations.
};
std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9950987..e823f67 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -532,16 +532,13 @@
if (driver.GetCompilerOptions().GetDebuggable()) {
// We are debuggable so definitions of classes might be changed. We don't want to do any
// optimizations that could break that.
- max_level = optimizer::DexToDexCompilationLevel::kRequired;
+ max_level = optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
}
if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
return max_level;
- } else if (klass->ShouldVerifyAtRuntime()) {
- // Class verification has soft-failed. Anyway, ensure at least correctness.
- return optimizer::DexToDexCompilationLevel::kRequired;
} else {
- // Class verification has failed: do not run DEX-to-DEX compilation.
+ // Class verification has failed: do not run DEX-to-DEX optimizations.
return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
}
}
@@ -611,7 +608,7 @@
dex_file,
(verified_method != nullptr)
? dex_to_dex_compilation_level
- : optimizer::DexToDexCompilationLevel::kRequired);
+ : optimizer::DexToDexCompilationLevel::kDontDexToDexCompile);
}
} else if ((access_flags & kAccNative) != 0) {
// Are we extracting only and have support for generic JNI down calls?
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 35aa1ee..fa1b3a3 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -148,7 +148,6 @@
}
TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
@@ -191,7 +190,6 @@
};
TEST_F(CompilerDriverMethodsTest, Selection) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
{
@@ -299,7 +297,6 @@
};
TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
{
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 34ad1c5..a0c0a2a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -27,7 +27,6 @@
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
- inline_depth_limit_(kUnsetInlineDepthLimit),
inline_max_code_units_(kUnsetInlineMaxCodeUnits),
no_inline_from_(nullptr),
boot_image_(false),
@@ -62,7 +61,6 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
- size_t inline_depth_limit,
size_t inline_max_code_units,
const std::vector<const DexFile*>* no_inline_from,
double top_k_profile_threshold,
@@ -86,7 +84,6 @@
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
num_dex_methods_threshold_(num_dex_methods_threshold),
- inline_depth_limit_(inline_depth_limit),
inline_max_code_units_(inline_max_code_units),
no_inline_from_(no_inline_from),
boot_image_(false),
@@ -130,10 +127,6 @@
ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage);
}
-void CompilerOptions::ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--inline-depth-limit", &inline_depth_limit_, Usage);
-}
-
void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) {
ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage);
}
@@ -183,8 +176,6 @@
ParseTinyMethodMax(option, Usage);
} else if (option.starts_with("--num-dex-methods=")) {
ParseNumDexMethods(option, Usage);
- } else if (option.starts_with("--inline-depth-limit=")) {
- ParseInlineDepthLimit(option, Usage);
} else if (option.starts_with("--inline-max-code-units=")) {
ParseInlineMaxCodeUnits(option, Usage);
} else if (option == "--generate-debug-info" || option == "-g") {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 2e3e55f..2376fbf 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -46,15 +46,9 @@
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultGenerateDebugInfo = false;
static const bool kDefaultGenerateMiniDebugInfo = false;
- static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 32;
- static constexpr size_t kUnsetInlineDepthLimit = -1;
static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
- // Default inlining settings when the space filter is used.
- static constexpr size_t kSpaceFilterInlineDepthLimit = 3;
- static constexpr size_t kSpaceFilterInlineMaxCodeUnits = 10;
-
CompilerOptions();
~CompilerOptions();
@@ -64,7 +58,6 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
- size_t inline_depth_limit,
size_t inline_max_code_units,
const std::vector<const DexFile*>* no_inline_from,
double top_k_profile_threshold,
@@ -155,13 +148,6 @@
return num_dex_methods_threshold_;
}
- size_t GetInlineDepthLimit() const {
- return inline_depth_limit_;
- }
- void SetInlineDepthLimit(size_t limit) {
- inline_depth_limit_ = limit;
- }
-
size_t GetInlineMaxCodeUnits() const {
return inline_max_code_units_;
}
@@ -275,7 +261,6 @@
void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage);
- void ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage);
void ParseNumDexMethods(const StringPiece& option, UsageFn Usage);
void ParseTinyMethodMax(const StringPiece& option, UsageFn Usage);
void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
@@ -289,7 +274,6 @@
size_t small_method_threshold_;
size_t tiny_method_threshold_;
size_t num_dex_methods_threshold_;
- size_t inline_depth_limit_;
size_t inline_max_code_units_;
// Dex files from which we should not inline code.
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 89e8a67..897d819 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -76,7 +76,7 @@
void Compile(ImageHeader::StorageMode storage_mode,
CompilationHelper& out_helper,
const std::string& extra_dex = "",
- const std::string& image_class = "");
+ const std::initializer_list<std::string>& image_classes = {});
void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
CommonCompilerTest::SetUpRuntimeOptions(options);
@@ -90,6 +90,18 @@
return new std::unordered_set<std::string>(image_classes_);
}
+ ArtMethod* FindCopiedMethod(ArtMethod* origin, mirror::Class* klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
+ for (ArtMethod& m : klass->GetCopiedMethods(pointer_size)) {
+ if (strcmp(origin->GetName(), m.GetName()) == 0 &&
+ origin->GetSignature() == m.GetSignature()) {
+ return &m;
+ }
+ }
+ return nullptr;
+ }
+
private:
std::unordered_set<std::string> image_classes_;
};
@@ -345,26 +357,27 @@
void ImageTest::Compile(ImageHeader::StorageMode storage_mode,
CompilationHelper& helper,
const std::string& extra_dex,
- const std::string& image_class) {
- if (!image_class.empty()) {
+ const std::initializer_list<std::string>& image_classes) {
+ for (const std::string& image_class : image_classes) {
image_classes_.insert(image_class);
}
CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U);
// Set inline filter values.
- compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit);
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
image_classes_.clear();
if (!extra_dex.empty()) {
helper.extra_dex_files = OpenTestDexFiles(extra_dex.c_str());
}
helper.Compile(compiler_driver_.get(), storage_mode);
- if (!image_class.empty()) {
+ if (image_classes.begin() != image_classes.end()) {
// Make sure the class got initialized.
ScopedObjectAccess soa(Thread::Current());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* klass = class_linker->FindSystemClass(Thread::Current(), image_class.c_str());
- EXPECT_TRUE(klass != nullptr);
- EXPECT_TRUE(klass->IsInitialized());
+ for (const std::string& image_class : image_classes) {
+ mirror::Class* klass = class_linker->FindSystemClass(Thread::Current(), image_class.c_str());
+ EXPECT_TRUE(klass != nullptr);
+ EXPECT_TRUE(klass->IsInitialized());
+ }
}
}
@@ -492,7 +505,7 @@
// Compile multi-image with ImageLayoutA being the last image.
{
CompilationHelper helper;
- Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutA", "LMyClass;");
+ Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutA", {"LMyClass;"});
image_sizes = helper.GetImageObjectSectionSizes();
}
TearDown();
@@ -501,7 +514,7 @@
// Compile multi-image with ImageLayoutB being the last image.
{
CompilationHelper helper;
- Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutB", "LMyClass;");
+ Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutB", {"LMyClass;"});
image_sizes_extra = helper.GetImageObjectSectionSizes();
}
// Make sure that the new stuff in the clinit in ImageLayoutB is in the last image and not in the
@@ -553,4 +566,63 @@
ASSERT_FALSE(image_header.IsValid());
}
+// Test that pointer to quick code is the same in
+// a default method of an interface and in a copied method
+// of a class which implements the interface. This should be true
+// only if the copied method and the origin method are located in the
+// same oat file.
+TEST_F(ImageTest, TestDefaultMethods) {
+ CompilationHelper helper;
+ Compile(ImageHeader::kStorageModeUncompressed,
+ helper,
+ "DefaultMethods",
+ {"LIface;", "LImpl;", "LIterableBase;"});
+
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ // Test the pointer to quick code is the same in origin method
+ // and in the copied method form the same oat file.
+ mirror::Class* iface_klass = class_linker_->LookupClass(
+ self, "LIface;", ObjPtr<mirror::ClassLoader>());
+ ASSERT_NE(nullptr, iface_klass);
+ ArtMethod* origin = iface_klass->FindDeclaredVirtualMethod(
+ "defaultMethod", "()V", pointer_size);
+ ASSERT_NE(nullptr, origin);
+ const void* code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
+ // The origin method should have a pointer to quick code
+ ASSERT_NE(nullptr, code);
+ ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code));
+ mirror::Class* impl_klass = class_linker_->LookupClass(
+ self, "LImpl;", ObjPtr<mirror::ClassLoader>());
+ ASSERT_NE(nullptr, impl_klass);
+ ArtMethod* copied = FindCopiedMethod(origin, impl_klass);
+ ASSERT_NE(nullptr, copied);
+ // the copied method should have pointer to the same quick code as the origin method
+ ASSERT_EQ(code, copied->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size));
+
+ // Test the origin method has pointer to quick code
+ // but the copied method has pointer to interpreter
+ // because these methods are in different oat files.
+ mirror::Class* iterable_klass = class_linker_->LookupClass(
+ self, "Ljava/lang/Iterable;", ObjPtr<mirror::ClassLoader>());
+ ASSERT_NE(nullptr, iterable_klass);
+ origin = iterable_klass->FindDeclaredVirtualMethod(
+ "forEach", "(Ljava/util/function/Consumer;)V", pointer_size);
+ ASSERT_NE(nullptr, origin);
+ code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
+ // the origin method should have a pointer to quick code
+ ASSERT_NE(nullptr, code);
+ ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code));
+ mirror::Class* iterablebase_klass = class_linker_->LookupClass(
+ self, "LIterableBase;", ObjPtr<mirror::ClassLoader>());
+ ASSERT_NE(nullptr, iterablebase_klass);
+ copied = FindCopiedMethod(origin, iterablebase_klass);
+ ASSERT_NE(nullptr, copied);
+ code = copied->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
+ // the copied method should have a pointer to interpreter
+ ASSERT_TRUE(class_linker_->IsQuickToInterpreterBridge(code));
+}
+
} // namespace art
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index aefdb54..d129249 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -714,7 +714,8 @@
class_linker->VisitClassesWithoutClassesLock(&visitor);
}
-static bool IsBootClassLoaderClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+static bool IsBootClassLoaderClass(ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return klass->GetClassLoader() == nullptr;
}
@@ -722,33 +723,33 @@
return IsBootClassLoaderClass(klass) && !IsInBootImage(klass);
}
-bool ImageWriter::PruneAppImageClass(mirror::Class* klass) {
+bool ImageWriter::PruneAppImageClass(ObjPtr<mirror::Class> klass) {
bool early_exit = false;
std::unordered_set<mirror::Class*> visited;
return PruneAppImageClassInternal(klass, &early_exit, &visited);
}
bool ImageWriter::PruneAppImageClassInternal(
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
bool* early_exit,
std::unordered_set<mirror::Class*>* visited) {
DCHECK(early_exit != nullptr);
DCHECK(visited != nullptr);
DCHECK(compile_app_image_);
- if (klass == nullptr || IsInBootImage(klass)) {
+ if (klass == nullptr || IsInBootImage(klass.Ptr())) {
return false;
}
- auto found = prune_class_memo_.find(klass);
+ auto found = prune_class_memo_.find(klass.Ptr());
if (found != prune_class_memo_.end()) {
// Already computed, return the found value.
return found->second;
}
// Circular dependencies, return false but do not store the result in the memoization table.
- if (visited->find(klass) != visited->end()) {
+ if (visited->find(klass.Ptr()) != visited->end()) {
*early_exit = true;
return false;
}
- visited->emplace(klass);
+ visited->emplace(klass.Ptr());
bool result = IsBootClassLoaderClass(klass);
std::string temp;
// Prune if not an image class, this handles any broken sets of image classes such as having a
@@ -812,20 +813,20 @@
dex_file_oat_index_map_.find(dex_cache->GetDexFile()) == dex_file_oat_index_map_.end();
}
// Erase the element we stored earlier since we are exiting the function.
- auto it = visited->find(klass);
+ auto it = visited->find(klass.Ptr());
DCHECK(it != visited->end());
visited->erase(it);
// Only store result if it is true or none of the calls early exited due to circular
// dependencies. If visited is empty then we are the root caller, in this case the cycle was in
// a child call and we can remember the result.
if (result == true || !my_early_exit || visited->empty()) {
- prune_class_memo_[klass] = result;
+ prune_class_memo_[klass.Ptr()] = result;
}
*early_exit |= my_early_exit;
return result;
}
-bool ImageWriter::KeepClass(Class* klass) {
+bool ImageWriter::KeepClass(ObjPtr<mirror::Class> klass) {
if (klass == nullptr) {
return false;
}
@@ -896,15 +897,27 @@
Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
class_table->Visit(classes_visitor);
removed_class_count_ += classes_visitor.Prune();
+
+ // Record app image class loader. The fake boot class loader should not get registered
+ // and we should end up with only one class loader for an app and none for boot image.
+ if (class_loader != nullptr && class_table != nullptr) {
+ DCHECK(class_loader_ == nullptr);
+ class_loader_ = class_loader;
+ }
}
size_t GetRemovedClassCount() const {
return removed_class_count_;
}
+ ObjPtr<mirror::ClassLoader> GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return class_loader_;
+ }
+
private:
ImageWriter* const image_writer_;
size_t removed_class_count_;
+ ObjPtr<mirror::ClassLoader> class_loader_;
};
void ImageWriter::VisitClassLoaders(ClassLoaderVisitor* visitor) {
@@ -913,69 +926,149 @@
Runtime::Current()->GetClassLinker()->VisitClassLoaders(visitor);
}
+void ImageWriter::PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ // To ensure deterministic contents of the hash-based arrays, each slot shall contain
+ // the candidate with the lowest index. As we're processing entries in increasing index
+ // order, this means trying to look up the entry for the current index if the slot is
+ // empty or if it contains a higher index.
+
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ ArtMethod* resolution_method = runtime->GetResolutionMethod();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ // Prune methods.
+ ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
+ for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) {
+ ArtMethod* method =
+ mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_);
+ DCHECK(method != nullptr) << "Expected resolution method instead of null method";
+ mirror::Class* declaring_class = method->GetDeclaringClass();
+ // Copied methods may be held live by a class which was not an image class but have a
+ // declaring class which is an image class. Set it to the resolution method to be safe and
+ // prevent dangling pointers.
+ if (method->IsCopied() || !KeepClass(declaring_class)) {
+ mirror::DexCache::SetElementPtrSize(resolved_methods,
+ i,
+ resolution_method,
+ target_ptr_size_);
+ } else if (kIsDebugBuild) {
+ // Check that the class is still in the classes table.
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ CHECK(class_linker->ClassInClassTable(declaring_class)) << "Class "
+ << Class::PrettyClass(declaring_class) << " not in class linker table";
+ }
+ }
+ // Prune fields and make the contents of the field array deterministic.
+ mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
+ dex::TypeIndex last_class_idx; // Initialized to invalid index.
+ ObjPtr<mirror::Class> last_class = nullptr;
+ for (size_t i = 0, end = dex_file.NumFieldIds(); i < end; ++i) {
+ uint32_t slot_idx = dex_cache->FieldSlotIndex(i);
+ auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, slot_idx, target_ptr_size_);
+ uint32_t stored_index = pair.index;
+ ArtField* field = pair.object;
+ if (field != nullptr && i > stored_index) {
+ continue; // Already checked.
+ }
+ // Check if the referenced class is in the image. Note that we want to check the referenced
+ // class rather than the declaring class to preserve the semantics, i.e. using a FieldId
+ // results in resolving the referenced class and that can for example throw OOME.
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(i);
+ if (field_id.class_idx_ != last_class_idx) {
+ last_class_idx = field_id.class_idx_;
+ last_class = class_linker->LookupResolvedType(
+ dex_file, last_class_idx, dex_cache, class_loader);
+ if (last_class != nullptr && !KeepClass(last_class)) {
+ last_class = nullptr;
+ }
+ }
+ if (field == nullptr || i < stored_index) {
+ if (last_class != nullptr) {
+ const char* name = dex_file.StringDataByIdx(field_id.name_idx_);
+ const char* type = dex_file.StringByTypeIdx(field_id.type_idx_);
+ field = mirror::Class::FindField(Thread::Current(), last_class, name, type);
+ if (field != nullptr) {
+ // If the referenced class is in the image, the defining class must also be there.
+ DCHECK(KeepClass(field->GetDeclaringClass()));
+ dex_cache->SetResolvedField(i, field, target_ptr_size_);
+ }
+ }
+ } else {
+ DCHECK_EQ(i, stored_index);
+ if (last_class == nullptr) {
+ dex_cache->ClearResolvedField(stored_index, target_ptr_size_);
+ }
+ }
+ }
+ // Prune types and make the contents of the type array deterministic.
+ // This is done after fields and methods as their lookup can touch the types array.
+ for (size_t i = 0, end = dex_cache->GetDexFile()->NumTypeIds(); i < end; ++i) {
+ dex::TypeIndex type_idx(i);
+ uint32_t slot_idx = dex_cache->TypeSlotIndex(type_idx);
+ mirror::TypeDexCachePair pair =
+ dex_cache->GetResolvedTypes()[slot_idx].load(std::memory_order_relaxed);
+ uint32_t stored_index = pair.index;
+ ObjPtr<mirror::Class> klass = pair.object.Read();
+ if (klass == nullptr || i < stored_index) {
+ klass = class_linker->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader);
+ if (klass != nullptr) {
+ DCHECK_EQ(dex_cache->GetResolvedType(type_idx), klass);
+ stored_index = i; // For correct clearing below if not keeping the `klass`.
+ }
+ } else if (i == stored_index && !KeepClass(klass)) {
+ dex_cache->ClearResolvedType(dex::TypeIndex(stored_index));
+ }
+ }
+ // Strings do not need pruning, but the contents of the string array must be deterministic.
+ for (size_t i = 0, end = dex_cache->GetDexFile()->NumStringIds(); i < end; ++i) {
+ dex::StringIndex string_idx(i);
+ uint32_t slot_idx = dex_cache->StringSlotIndex(string_idx);
+ mirror::StringDexCachePair pair =
+ dex_cache->GetStrings()[slot_idx].load(std::memory_order_relaxed);
+ uint32_t stored_index = pair.index;
+ ObjPtr<mirror::String> string = pair.object.Read();
+ if (string == nullptr || i < stored_index) {
+ string = class_linker->LookupString(dex_file, string_idx, dex_cache);
+ DCHECK(string == nullptr || dex_cache->GetResolvedString(string_idx) == string);
+ }
+ }
+}
+
void ImageWriter::PruneNonImageClasses() {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
Thread* self = Thread::Current();
+ ScopedAssertNoThreadSuspension sa(__FUNCTION__);
// Clear class table strong roots so that dex caches can get pruned. We require pruning the class
// path dex caches.
class_linker->ClearClassTableStrongRoots();
// Remove the undesired classes from the class roots.
+ ObjPtr<mirror::ClassLoader> class_loader;
{
PruneClassLoaderClassesVisitor class_loader_visitor(this);
VisitClassLoaders(&class_loader_visitor);
VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes";
+ class_loader = class_loader_visitor.GetClassLoader();
+ DCHECK_EQ(class_loader != nullptr, compile_app_image_);
}
// Clear references to removed classes from the DexCaches.
- ArtMethod* resolution_method = runtime->GetResolutionMethod();
-
- ScopedAssertNoThreadSuspension sa(__FUNCTION__);
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
- ReaderMutexLock mu2(self, *Locks::dex_lock_);
- for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
- if (self->IsJWeakCleared(data.weak_root)) {
- continue;
- }
- ObjPtr<mirror::DexCache> dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache();
- for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- mirror::TypeDexCachePair pair =
- dex_cache->GetResolvedTypes()[i].load(std::memory_order_relaxed);
- mirror::Class* klass = pair.object.Read();
- if (klass != nullptr && !KeepClass(klass)) {
- dex_cache->ClearResolvedType(dex::TypeIndex(pair.index));
+ std::vector<ObjPtr<mirror::DexCache>> dex_caches;
+ {
+ ReaderMutexLock mu2(self, *Locks::dex_lock_);
+ dex_caches.reserve(class_linker->GetDexCachesData().size());
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+ if (self->IsJWeakCleared(data.weak_root)) {
+ continue;
}
+ dex_caches.push_back(self->DecodeJObject(data.weak_root)->AsDexCache());
}
- ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
- for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) {
- ArtMethod* method =
- mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_);
- DCHECK(method != nullptr) << "Expected resolution method instead of null method";
- mirror::Class* declaring_class = method->GetDeclaringClass();
- // Copied methods may be held live by a class which was not an image class but have a
- // declaring class which is an image class. Set it to the resolution method to be safe and
- // prevent dangling pointers.
- if (method->IsCopied() || !KeepClass(declaring_class)) {
- mirror::DexCache::SetElementPtrSize(resolved_methods,
- i,
- resolution_method,
- target_ptr_size_);
- } else {
- // Check that the class is still in the classes table.
- DCHECK(class_linker->ClassInClassTable(declaring_class)) << "Class "
- << Class::PrettyClass(declaring_class) << " not in class linker table";
- }
- }
- mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
- for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, i, target_ptr_size_);
- ArtField* field = pair.object;
- if (field != nullptr && !KeepClass(field->GetDeclaringClass().Ptr())) {
- dex_cache->ClearResolvedField(pair.index, target_ptr_size_);
- }
- }
+ }
+ for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
+ PruneAndPreloadDexCache(dex_cache, class_loader);
}
// Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -1245,21 +1338,20 @@
// live.
if (as_klass->ShouldHaveImt()) {
ImTable* imt = as_klass->GetImt(target_ptr_size_);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- ArtMethod* imt_method = imt->Get(i, target_ptr_size_);
- DCHECK(imt_method != nullptr);
- if (imt_method->IsRuntimeMethod() &&
- !IsInBootImage(imt_method) &&
- !NativeRelocationAssigned(imt_method)) {
- AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index);
+ if (TryAssignImTableOffset(imt, oat_index)) {
+ // Since imt's can be shared only do this the first time to not double count imt method
+ // fixups.
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ ArtMethod* imt_method = imt->Get(i, target_ptr_size_);
+ DCHECK(imt_method != nullptr);
+ if (imt_method->IsRuntimeMethod() &&
+ !IsInBootImage(imt_method) &&
+ !NativeRelocationAssigned(imt_method)) {
+ AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index);
+ }
}
}
}
-
- if (as_klass->ShouldHaveImt()) {
- ImTable* imt = as_klass->GetImt(target_ptr_size_);
- TryAssignImTableOffset(imt, oat_index);
- }
} else if (obj->IsClassLoader()) {
// Register the class loader if it has a class table.
// The fake boot class loader should not get registered and we should end up with only one
@@ -1293,10 +1385,10 @@
return native_object_relocations_.find(ptr) != native_object_relocations_.end();
}
-void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) {
+bool ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) {
// No offset, or already assigned.
if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) {
- return;
+ return false;
}
// If the method is a conflict method we also want to assign the conflict table offset.
ImageInfo& image_info = GetImageInfo(oat_index);
@@ -1308,6 +1400,7 @@
image_info.bin_slot_sizes_[kBinImTable],
kNativeObjectRelocationTypeIMTable});
image_info.bin_slot_sizes_[kBinImTable] += size;
+ return true;
}
void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
@@ -1406,8 +1499,7 @@
ALWAYS_INLINE void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- ref->SetReferent</*kTransactionActive*/false>(
- VisitReference(ref->GetReferent<kWithoutReadBarrier>()));
+ operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
}
private:
@@ -1565,7 +1657,7 @@
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
- // Calculate the sizes of the intern tables and class tables.
+ // Calculate the sizes of the intern tables, class tables, and fixup tables.
for (ImageInfo& image_info : image_infos_) {
// Calculate how big the intern table will be after being serialized.
InternTable* const intern_table = image_info.intern_table_.get();
@@ -1573,6 +1665,7 @@
if (intern_table->StrongSize() != 0u) {
image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
}
+
// Calculate the size of the class table.
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
DCHECK_EQ(image_info.class_table_->NumReferencedZygoteClasses(), 0u);
@@ -1625,8 +1718,6 @@
// Transform each object's bin slot into an offset which will be used to do the final copy.
heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
- // DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
-
size_t i = 0;
for (ImageInfo& image_info : image_infos_) {
image_info.image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots[i].Get()));
@@ -1640,8 +1731,6 @@
ImageInfo& image_info = GetImageInfo(relocation.oat_index);
relocation.offset += image_info.bin_slot_offsets_[bin_type];
}
-
- // Note that image_info.image_end_ is left at end of used mirror object section.
}
size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
@@ -1683,7 +1772,6 @@
ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
*dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray],
bin_slot_sizes_[kBinDexCacheArray]);
-
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
// Calculate the size of the interned strings.
@@ -1775,18 +1863,18 @@
explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {
}
- void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
+ size_t count ATTRIBUTE_UNUSED,
+ const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- for (size_t i = 0; i < count; ++i) {
- *roots[i] = image_writer_->GetImageAddress(*roots[i]);
- }
+ LOG(FATAL) << "Unsupported";
}
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
+ image_writer_->CopyReference(roots[i], roots[i]->AsMirrorPtr());
}
}
@@ -1797,7 +1885,9 @@
void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
for (size_t i = 0; i < ImTable::kSize; ++i) {
ArtMethod* method = orig->Get(i, target_ptr_size_);
- copy->Set(i, NativeLocationInImage(method), target_ptr_size_);
+ void** address = reinterpret_cast<void**>(copy->AddressOfElement(i, target_ptr_size_));
+ CopyAndFixupPointer(address, method);
+ DCHECK_EQ(copy->Get(i, target_ptr_size_), NativeLocationInImage(method));
}
}
@@ -1806,10 +1896,13 @@
for (size_t i = 0; i < count; ++i) {
ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
- copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method));
- copy->SetImplementationMethod(i,
- target_ptr_size_,
- NativeLocationInImage(implementation_method));
+ CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_), interface_method);
+ CopyAndFixupPointer(copy->AddressOfImplementationMethod(i, target_ptr_size_),
+ implementation_method);
+ DCHECK_EQ(copy->GetInterfaceMethod(i, target_ptr_size_),
+ NativeLocationInImage(interface_method));
+ DCHECK_EQ(copy->GetImplementationMethod(i, target_ptr_size_),
+ NativeLocationInImage(implementation_method));
}
}
@@ -1828,8 +1921,9 @@
switch (relocation.type) {
case kNativeObjectRelocationTypeArtField: {
memcpy(dest, pair.first, sizeof(ArtField));
- reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
- GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass().Ptr()));
+ CopyReference(
+ reinterpret_cast<ArtField*>(dest)->GetDeclaringClassAddressWithoutBarrier(),
+ reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass().Ptr());
break;
}
case kNativeObjectRelocationTypeRuntimeMethod:
@@ -1946,8 +2040,10 @@
reinterpret_cast<ImageWriter*>(arg)->CopyAndFixupObject(obj);
}
-void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr,
- mirror::Class* klass, Bin array_type) {
+void ImageWriter::FixupPointerArray(mirror::Object* dst,
+ mirror::PointerArray* arr,
+ mirror::Class* klass,
+ Bin array_type) {
CHECK(klass->IsArrayClass());
CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr;
// Fixup int and long pointers for the ArtMethod or ArtField arrays.
@@ -1956,7 +2052,7 @@
auto* dest_array = down_cast<mirror::PointerArray*>(dst);
for (size_t i = 0, count = num_elements; i < count; ++i) {
void* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
- if (elem != nullptr && !IsInBootImage(elem)) {
+ if (kIsDebugBuild && elem != nullptr && !IsInBootImage(elem)) {
auto it = native_object_relocations_.find(elem);
if (UNLIKELY(it == native_object_relocations_.end())) {
if (it->second.IsArtMethodRelocation()) {
@@ -1972,12 +2068,9 @@
<< Class::PrettyClass(field->GetDeclaringClass());
}
UNREACHABLE();
- } else {
- ImageInfo& image_info = GetImageInfo(it->second.oat_index);
- elem = image_info.image_begin_ + it->second.offset;
}
}
- dest_array->SetElementPtrSize<false, true>(i, elem, target_ptr_size_);
+ CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem);
}
}
@@ -2025,22 +2118,19 @@
void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
- // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
- // image.
- copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
- offset,
- image_writer_->GetImageAddress(ref.Ptr()));
+ // Copy the reference and record the fixup if necessary.
+ image_writer_->CopyReference(
+ copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset),
+ ref.Ptr());
}
// java.lang.ref.Reference visitor.
void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
- copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
- mirror::Reference::ReferentOffset(),
- image_writer_->GetImageAddress(ref->GetReferent()));
+ operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
}
protected:
@@ -2118,7 +2208,10 @@
explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
template <typename T>
- T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ T* operator()(T* ptr, void** dest_addr = nullptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (dest_addr != nullptr) {
+ image_writer_->CopyAndFixupPointer(dest_addr, ptr);
+ }
return image_writer_->NativeLocationInImage(ptr);
}
@@ -2181,10 +2274,10 @@
}
}
-
-class ImageAddressVisitor {
+class ImageWriter::ImageAddressVisitorForDexCacheArray {
public:
- explicit ImageAddressVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
+ explicit ImageAddressVisitorForDexCacheArray(ImageWriter* image_writer)
+ : image_writer_(image_writer) {}
template <typename T>
T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2195,9 +2288,9 @@
ImageWriter* const image_writer_;
};
-
void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache,
mirror::DexCache* copy_dex_cache) {
+ ImageAddressVisitorForDexCacheArray fixup_visitor(this);
// Though the DexCache array fields are usually treated as native pointers, we set the full
// 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
// done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
@@ -2207,8 +2300,7 @@
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
NativeLocationInImage(orig_strings),
PointerSize::k64);
- orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
- ImageAddressVisitor(this));
+ orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache), fixup_visitor);
}
mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
if (orig_types != nullptr) {
@@ -2216,7 +2308,7 @@
NativeLocationInImage(orig_types),
PointerSize::k64);
orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
- ImageAddressVisitor(this));
+ fixup_visitor);
}
ArtMethod** orig_methods = orig_dex_cache->GetResolvedMethods();
if (orig_methods != nullptr) {
@@ -2240,7 +2332,8 @@
for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
mirror::FieldDexCachePair orig =
mirror::DexCache::GetNativePairPtrSize(orig_fields, i, target_ptr_size_);
- mirror::FieldDexCachePair copy(NativeLocationInImage(orig.object), orig.index);
+ mirror::FieldDexCachePair copy = orig;
+ copy.object = NativeLocationInImage(orig.object);
mirror::DexCache::SetNativePairPtrSize(copy_fields, i, copy, target_ptr_size_);
}
}
@@ -2250,7 +2343,7 @@
NativeLocationInImage(orig_method_types),
PointerSize::k64);
orig_dex_cache->FixupResolvedMethodTypes(NativeCopyLocation(orig_method_types, orig_dex_cache),
- ImageAddressVisitor(this));
+ fixup_visitor);
}
GcRoot<mirror::CallSite>* orig_call_sites = orig_dex_cache->GetResolvedCallSites();
if (orig_call_sites != nullptr) {
@@ -2258,7 +2351,7 @@
NativeLocationInImage(orig_call_sites),
PointerSize::k64);
orig_dex_cache->FixupResolvedCallSites(NativeCopyLocation(orig_call_sites, orig_dex_cache),
- ImageAddressVisitor(this));
+ fixup_visitor);
}
// Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving
@@ -2366,7 +2459,8 @@
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
- copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
+ CopyReference(copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked());
+
ArtMethod** orig_resolved_methods = orig->GetDexCacheResolvedMethods(target_ptr_size_);
copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods), target_ptr_size_);
@@ -2478,7 +2572,7 @@
return GetDefaultOatIndex();
}
auto it = oat_index_map_.find(obj);
- DCHECK(it != oat_index_map_.end());
+ DCHECK(it != oat_index_map_.end()) << obj;
return it->second;
}
@@ -2579,4 +2673,31 @@
: intern_table_(new InternTable),
class_table_(new ClassTable) {}
+void ImageWriter::CopyReference(mirror::HeapReference<mirror::Object>* dest,
+ ObjPtr<mirror::Object> src) {
+ dest->Assign(GetImageAddress(src.Ptr()));
+}
+
+void ImageWriter::CopyReference(mirror::CompressedReference<mirror::Object>* dest,
+ ObjPtr<mirror::Object> src) {
+ dest->Assign(GetImageAddress(src.Ptr()));
+}
+
+void ImageWriter::CopyAndFixupPointer(void** target, void* value) {
+ void* new_value = value;
+ if (value != nullptr && !IsInBootImage(value)) {
+ auto it = native_object_relocations_.find(value);
+ CHECK(it != native_object_relocations_.end()) << value;
+ const NativeObjectRelocation& relocation = it->second;
+ ImageInfo& image_info = GetImageInfo(relocation.oat_index);
+ new_value = reinterpret_cast<void*>(image_info.image_begin_ + relocation.offset);
+ }
+ if (target_ptr_size_ == PointerSize::k32) {
+ *reinterpret_cast<uint32_t*>(target) = PointerToLowMemUInt32(new_value);
+ } else {
+ *reinterpret_cast<uint64_t*>(target) = reinterpret_cast<uintptr_t>(new_value);
+ }
+}
+
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index bdc7146..39113c8 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -38,8 +38,9 @@
#include "image.h"
#include "lock_word.h"
#include "mem_map.h"
-#include "oat_file.h"
#include "mirror/dex_cache.h"
+#include "obj_ptr.h"
+#include "oat_file.h"
#include "os.h"
#include "safe_map.h"
#include "utils.h"
@@ -317,6 +318,12 @@
// Number of image class table bytes.
size_t class_table_bytes_ = 0;
+ // Number of object fixup bytes.
+ size_t object_fixup_bytes_ = 0;
+
+ // Number of pointer fixup bytes.
+ size_t pointer_fixup_bytes_ = 0;
+
// Intern table associated with this image for serialization.
std::unique_ptr<InternTable> intern_table_;
@@ -376,7 +383,7 @@
}
// Returns true if the class was in the original requested image classes list.
- bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool KeepClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Debug aid that list of requested image classes.
void DumpImageClasses();
@@ -391,6 +398,12 @@
// Remove unwanted classes from various roots.
void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Remove unwanted classes from the DexCache roots and preload deterministic DexCache contents.
+ void PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
+
// Verify unwanted classes removed.
void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
@@ -458,7 +471,8 @@
size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return true if imt was newly inserted.
+ bool TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
// Assign the offset for an IMT conflict table. Does nothing if the table already has a native
// relocation.
@@ -473,11 +487,11 @@
// we also cannot have any classes which refer to these boot class loader non image classes.
// PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
// driver.
- bool PruneAppImageClass(mirror::Class* klass)
+ bool PruneAppImageClass(ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// early_exit is true if we had a cyclic dependency anywhere down the chain.
- bool PruneAppImageClassInternal(mirror::Class* klass,
+ bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
bool* early_exit,
std::unordered_set<mirror::Class*>* visited)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -528,6 +542,14 @@
// Return true if there already exists a native allocation for an object.
bool NativeRelocationAssigned(void* ptr) const;
+ void CopyReference(mirror::HeapReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void CopyReference(mirror::CompressedReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void CopyAndFixupPointer(void** target, void* value);
+
const CompilerDriver& compiler_driver_;
// Beginning target image address for the first image.
@@ -602,9 +624,11 @@
class FixupRootVisitor;
class FixupVisitor;
class GetRootsVisitor;
+ class ImageAddressVisitorForDexCacheArray;
class NativeLocationVisitor;
class PruneClassesVisitor;
class PruneClassLoaderClassesVisitor;
+ class RegisterBootClassPathClassesVisitor;
class VisitReferencesVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3ae7974..ad951bc 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -97,7 +97,6 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
- CompilerOptions::kDefaultInlineDepthLimit,
CompilerOptions::kDefaultInlineMaxCodeUnits,
/* no_inline_from */ nullptr,
CompilerOptions::kDefaultTopKProfileThreshold,
@@ -177,10 +176,6 @@
jit_logger_.reset(new JitLogger());
jit_logger_->OpenLog();
}
-
- size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit();
- DCHECK_LT(thread_count * inline_depth_limit, std::numeric_limits<uint16_t>::max())
- << "ProfilingInfo's inline counter can potentially overflow";
}
JitCompiler::~JitCompiler() {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5406ae7..105db1d 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1034,18 +1034,63 @@
class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
public:
- InitImageMethodVisitor(OatWriter* writer, size_t offset)
+ InitImageMethodVisitor(OatWriter* writer,
+ size_t offset,
+ const std::vector<const DexFile*>* dex_files)
: OatDexMethodVisitor(writer, offset),
- pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())) {
+ pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
+ dex_files_(dex_files),
+ class_linker_(Runtime::Current()->GetClassLinker()) {
+ }
+
+ // Handle copied methods here. Copy pointer to quick code from
+ // an origin method to a copied method only if they are
+ // in the same oat file. If the origin and the copied methods are
+ // in different oat files don't touch the copied method.
+ // References to other oat files are not supported yet.
+ bool StartClass(const DexFile* dex_file, size_t class_def_index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ OatDexMethodVisitor::StartClass(dex_file, class_def_index);
+ // Skip classes that are not in the image.
+ if (!IsImageClass()) {
+ return true;
+ }
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache = hs.NewHandle(
+ class_linker_->FindDexCache(Thread::Current(), *dex_file));
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+ mirror::Class* klass = dex_cache->GetResolvedType(class_def.class_idx_);
+ if (klass != nullptr) {
+ for (ArtMethod& method : klass->GetCopiedMethods(pointer_size_)) {
+ // Find origin method. Declaring class and dex_method_idx
+ // in the copied method should be the same as in the origin
+ // method.
+ mirror::Class* declaring_class = method.GetDeclaringClass();
+ ArtMethod* origin = declaring_class->FindDeclaredVirtualMethod(
+ declaring_class->GetDexCache(),
+ method.GetDexMethodIndex(),
+ pointer_size_);
+ CHECK(origin != nullptr);
+ if (IsInOatFile(&declaring_class->GetDexFile())) {
+ const void* code_ptr =
+ origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
+ if (code_ptr == nullptr) {
+ methods_to_process_.push_back(std::make_pair(&method, origin));
+ } else {
+ method.SetEntryPointFromQuickCompiledCodePtrSize(
+ code_ptr, pointer_size_);
+ }
+ }
+ }
+ }
+ return true;
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::TypeId& type_id =
- dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
- const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
// Skip methods that are not in the image.
- if (!writer_->GetCompilerDriver()->IsImageClass(class_descriptor)) {
+ if (!IsImageClass()) {
return true;
}
@@ -1059,17 +1104,16 @@
++method_offsets_index_;
}
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker_->FindDexCache(
Thread::Current(), *dex_file_)));
ArtMethod* method;
if (writer_->HasBootImage()) {
const InvokeType invoke_type = it.GetMethodInvokeType(
dex_file_->GetClassDef(class_def_index_));
- method = linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+ method = class_linker_->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
*dex_file_,
it.GetMemberIndex(),
dex_cache,
@@ -1089,7 +1133,8 @@
// Should already have been resolved by the compiler, just peek into the dex cache.
// It may not be resolved if the class failed to verify, in this case, don't set the
// entrypoint. This is not fatal since the dex cache will contain a resolution method.
- method = dex_cache->GetResolvedMethod(it.GetMemberIndex(), linker->GetImagePointerSize());
+ method = dex_cache->GetResolvedMethod(it.GetMemberIndex(),
+ class_linker_->GetImagePointerSize());
}
if (method != nullptr &&
compiled_method != nullptr &&
@@ -1101,8 +1146,38 @@
return true;
}
+ // Check whether current class is image class
+ bool IsImageClass() {
+ const DexFile::TypeId& type_id =
+ dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
+ const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
+ return writer_->GetCompilerDriver()->IsImageClass(class_descriptor);
+ }
+
+ // Check whether specified dex file is in the compiled oat file.
+ bool IsInOatFile(const DexFile* dex_file) {
+ return ContainsElement(*dex_files_, dex_file);
+ }
+
+ // Assign a pointer to quick code for copied methods
+ // not handled in the method StartClass
+ void Postprocess() {
+ for (std::pair<ArtMethod*, ArtMethod*>& p : methods_to_process_) {
+ ArtMethod* method = p.first;
+ ArtMethod* origin = p.second;
+ const void* code_ptr =
+ origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
+ if (code_ptr != nullptr) {
+ method->SetEntryPointFromQuickCompiledCodePtrSize(code_ptr, pointer_size_);
+ }
+ }
+ }
+
protected:
const PointerSize pointer_size_;
+ const std::vector<const DexFile*>* dex_files_;
+ ClassLinker* const class_linker_;
+ std::vector<std::pair<ArtMethod*, ArtMethod*>> methods_to_process_;
};
class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
@@ -1365,12 +1440,10 @@
mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache(patch.TargetStringDexFile())));
mirror::String* string = linker->LookupString(*patch.TargetStringDexFile(),
patch.TargetStringIndex(),
- dex_cache);
+ GetDexCache(patch.TargetStringDexFile()));
DCHECK(string != nullptr);
DCHECK(writer_->HasBootImage() ||
Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
@@ -1744,8 +1817,9 @@
offset = code_visitor.GetOffset();
if (HasImage()) {
- InitImageMethodVisitor image_visitor(this, offset);
+ InitImageMethodVisitor image_visitor(this, offset, dex_files_);
success = VisitDexMethods(&image_visitor);
+ image_visitor.Postprocess();
DCHECK(success);
offset = image_visitor.GetOffset();
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 2ee4db9..476906a 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -528,7 +528,8 @@
has_dom_based_dynamic_bce_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
- induction_range_(induction_analysis) {}
+ induction_range_(induction_analysis),
+ next_(nullptr) {}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
DCHECK(!IsAddedBlock(block));
@@ -1618,8 +1619,8 @@
void InsertDeoptInLoop(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
HInstruction* suspend = loop->GetSuspendCheck();
block->InsertInstructionBefore(condition, block->GetLastInstruction());
- HDeoptimize* deoptimize =
- new (GetGraph()->GetArena()) HDeoptimize(condition, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
+ GetGraph()->GetArena(), condition, HDeoptimize::Kind::kBCE, suspend->GetDexPc());
block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
if (suspend->HasEnvironment()) {
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
@@ -1631,8 +1632,8 @@
void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
HBasicBlock* block = bounds_check->GetBlock();
block->InsertInstructionBefore(condition, bounds_check);
- HDeoptimize* deoptimize =
- new (GetGraph()->GetArena()) HDeoptimize(condition, bounds_check->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
+ GetGraph()->GetArena(), condition, HDeoptimize::Kind::kBCE, bounds_check->GetDexPc());
block->InsertInstructionBefore(deoptimize, bounds_check);
deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
}
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index fe42301..048073e 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -36,7 +36,8 @@
: HGraphVisitor(graph),
block_has_cha_guard_(GetGraph()->GetBlocks().size(),
0,
- graph->GetArena()->Adapter(kArenaAllocCHA)) {
+ graph->GetArena()->Adapter(kArenaAllocCHA)),
+ instruction_iterator_(nullptr) {
number_of_guards_to_visit_ = GetGraph()->GetNumberOfCHAGuards();
DCHECK_NE(number_of_guards_to_visit_, 0u);
// Will recount number of guards during guard optimization.
@@ -201,8 +202,8 @@
HInstruction* suspend = loop_info->GetSuspendCheck();
// Need a new deoptimize instruction that copies the environment
// of the suspend instruction for the loop.
- HDeoptimize* deoptimize =
- new (GetGraph()->GetArena()) HDeoptimize(compare, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
+ GetGraph()->GetArena(), compare, HDeoptimize::Kind::kInline, suspend->GetDexPc());
pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
suspend->GetEnvironment(), loop_info->GetHeader());
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e34f116..d7cc577 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1134,7 +1134,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
// The read barrier instrumentation of object ArrayGet
@@ -1602,14 +1602,20 @@
}
}
-static Condition GenerateLongTestConstant(HCondition* condition,
- bool invert,
- CodeGeneratorARM* codegen) {
+static std::pair<Condition, Condition> GenerateLongTestConstant(HCondition* condition,
+ bool invert,
+ CodeGeneratorARM* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- Condition ret = EQ;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<Condition, Condition> ret;
const Location left = locations->InAt(0);
const Location right = locations->InAt(1);
@@ -1629,22 +1635,26 @@
__ CmpConstant(left_high, High32Bits(value));
__ it(EQ);
__ cmp(left_low, ShifterOperand(Low32Bits(value)), EQ);
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
case kCondLE:
case kCondGT:
// Trivially true or false.
if (value == std::numeric_limits<int64_t>::max()) {
__ cmp(left_low, ShifterOperand(left_low));
- ret = cond == kCondLE ? EQ : NE;
+ ret = cond == kCondLE ? std::make_pair(EQ, NE) : std::make_pair(NE, EQ);
break;
}
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondLT;
+ opposite = kCondGE;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondGE;
+ opposite = kCondLT;
}
value++;
@@ -1653,7 +1663,7 @@
case kCondLT:
__ CmpConstant(left_low, Low32Bits(value));
__ sbcs(IP, left_high, ShifterOperand(High32Bits(value)));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
default:
LOG(FATAL) << "Unreachable";
@@ -1663,14 +1673,20 @@
return ret;
}
-static Condition GenerateLongTest(HCondition* condition,
- bool invert,
- CodeGeneratorARM* codegen) {
+static std::pair<Condition, Condition> GenerateLongTest(HCondition* condition,
+ bool invert,
+ CodeGeneratorARM* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- Condition ret = EQ;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<Condition, Condition> ret;
Location left = locations->InAt(0);
Location right = locations->InAt(1);
@@ -1689,15 +1705,19 @@
__ cmp(left.AsRegisterPairLow<Register>(),
ShifterOperand(right.AsRegisterPairLow<Register>()),
EQ);
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
case kCondLE:
case kCondGT:
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondGE;
+ opposite = kCondLT;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondLT;
+ opposite = kCondGE;
}
std::swap(left, right);
@@ -1709,7 +1729,7 @@
__ sbcs(IP,
left.AsRegisterPairHigh<Register>(),
ShifterOperand(right.AsRegisterPairHigh<Register>()));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
default:
LOG(FATAL) << "Unreachable";
@@ -1719,90 +1739,83 @@
return ret;
}
-static Condition GenerateTest(HInstruction* instruction,
- Location loc,
- bool invert,
- CodeGeneratorARM* codegen) {
- DCHECK(!instruction->IsConstant());
+static std::pair<Condition, Condition> GenerateTest(HCondition* condition,
+ bool invert,
+ CodeGeneratorARM* codegen) {
+ const LocationSummary* const locations = condition->GetLocations();
+ const Primitive::Type type = condition->GetLeft()->GetType();
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+ std::pair<Condition, Condition> ret;
+ const Location right = locations->InAt(1);
- Condition ret = invert ? EQ : NE;
+ if (invert) {
+ std::swap(cond, opposite);
+ }
- if (IsBooleanValueOrMaterializedCondition(instruction)) {
- __ CmpConstant(loc.AsRegister<Register>(), 0);
+ if (type == Primitive::kPrimLong) {
+ ret = locations->InAt(1).IsConstant()
+ ? GenerateLongTestConstant(condition, invert, codegen)
+ : GenerateLongTest(condition, invert, codegen);
+ } else if (Primitive::IsFloatingPointType(type)) {
+ GenerateVcmp(condition, codegen);
+ __ vmstat();
+ ret = std::make_pair(ARMFPCondition(cond, condition->IsGtBias()),
+ ARMFPCondition(opposite, condition->IsGtBias()));
} else {
- HCondition* const condition = instruction->AsCondition();
- const LocationSummary* const locations = condition->GetLocations();
- const Primitive::Type type = condition->GetLeft()->GetType();
- const IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- const Location right = locations->InAt(1);
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
- if (type == Primitive::kPrimLong) {
- ret = condition->GetLocations()->InAt(1).IsConstant()
- ? GenerateLongTestConstant(condition, invert, codegen)
- : GenerateLongTest(condition, invert, codegen);
- } else if (Primitive::IsFloatingPointType(type)) {
- GenerateVcmp(condition, codegen);
- __ vmstat();
- ret = ARMFPCondition(cond, condition->IsGtBias());
+ const Register left = locations->InAt(0).AsRegister<Register>();
+
+ if (right.IsRegister()) {
+ __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
- DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
-
- const Register left = locations->InAt(0).AsRegister<Register>();
-
- if (right.IsRegister()) {
- __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
- } else {
- DCHECK(right.IsConstant());
- __ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
- }
-
- ret = ARMCondition(cond);
+ DCHECK(right.IsConstant());
+ __ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
+
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
}
return ret;
}
-static bool CanGenerateTest(HInstruction* condition, ArmAssembler* assembler) {
- if (!IsBooleanValueOrMaterializedCondition(condition)) {
- const HCondition* const cond = condition->AsCondition();
+static bool CanGenerateTest(HCondition* condition, ArmAssembler* assembler) {
+ if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
+ const LocationSummary* const locations = condition->GetLocations();
+ const IfCondition c = condition->GetCondition();
- if (cond->GetLeft()->GetType() == Primitive::kPrimLong) {
- const LocationSummary* const locations = cond->GetLocations();
- const IfCondition c = cond->GetCondition();
+ if (locations->InAt(1).IsConstant()) {
+ const int64_t value = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
+ ShifterOperand so;
- if (locations->InAt(1).IsConstant()) {
- const int64_t value = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
- ShifterOperand so;
-
- if (c < kCondLT || c > kCondGE) {
- // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
- // we check that the least significant half of the first input to be compared
- // is in a low register (the other half is read outside an IT block), and
- // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
- // encoding can be used.
- if (!ArmAssembler::IsLowRegister(locations->InAt(0).AsRegisterPairLow<Register>()) ||
- !IsUint<8>(Low32Bits(value))) {
- return false;
- }
- } else if (c == kCondLE || c == kCondGT) {
- if (value < std::numeric_limits<int64_t>::max() &&
- !assembler->ShifterOperandCanHold(kNoRegister,
- kNoRegister,
- SBC,
- High32Bits(value + 1),
- kCcSet,
- &so)) {
- return false;
- }
- } else if (!assembler->ShifterOperandCanHold(kNoRegister,
- kNoRegister,
- SBC,
- High32Bits(value),
- kCcSet,
- &so)) {
+ if (c < kCondLT || c > kCondGE) {
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the least significant half of the first input to be compared
+ // is in a low register (the other half is read outside an IT block), and
+ // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
+ // encoding can be used.
+ if (!ArmAssembler::IsLowRegister(locations->InAt(0).AsRegisterPairLow<Register>()) ||
+ !IsUint<8>(Low32Bits(value))) {
return false;
}
+ } else if (c == kCondLE || c == kCondGT) {
+ if (value < std::numeric_limits<int64_t>::max() &&
+ !assembler->ShifterOperandCanHold(kNoRegister,
+ kNoRegister,
+ SBC,
+ High32Bits(value + 1),
+ kCcSet,
+ &so)) {
+ return false;
+ }
+ } else if (!assembler->ShifterOperandCanHold(kNoRegister,
+ kNoRegister,
+ SBC,
+ High32Bits(value),
+ kCcSet,
+ &so)) {
+ return false;
}
}
}
@@ -1875,6 +1888,7 @@
Label* CodeGeneratorARM::GetFinalLabel(HInstruction* instruction, Label* final_label) {
DCHECK(!instruction->IsControlFlow() && !instruction->IsSuspendCheck());
+ DCHECK(!instruction->IsInvoke() || !instruction->GetLocations()->CanCall());
const HBasicBlock* const block = instruction->GetBlock();
const HLoopInformation* const info = block->GetLoopInformation();
@@ -2414,13 +2428,6 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
- Label* true_label,
- Label* false_label ATTRIBUTE_UNUSED) {
- __ vmstat(); // transfer FP status register to ARM APSR.
- __ b(true_label, ARMFPCondition(cond->GetCondition(), cond->IsGtBias()));
-}
-
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
@@ -2437,7 +2444,6 @@
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
- // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -2508,25 +2514,38 @@
void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target_in,
Label* false_target_in) {
+ if (CanGenerateTest(condition, codegen_->GetAssembler())) {
+ Label* non_fallthrough_target;
+ bool invert;
+
+ if (true_target_in == nullptr) {
+ DCHECK(false_target_in != nullptr);
+ non_fallthrough_target = false_target_in;
+ invert = true;
+ } else {
+ non_fallthrough_target = true_target_in;
+ invert = false;
+ }
+
+ const auto cond = GenerateTest(condition, invert, codegen_);
+
+ __ b(non_fallthrough_target, cond.first);
+
+ if (false_target_in != nullptr && false_target_in != non_fallthrough_target) {
+ __ b(false_target_in);
+ }
+
+ return;
+ }
+
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
Label fallthrough_target;
Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
- Primitive::Type type = condition->InputAt(0)->GetType();
- switch (type) {
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(condition, true_target, false_target);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(condition, codegen_);
- GenerateFPJumps(condition, true_target, false_target);
- break;
- default:
- LOG(FATAL) << "Unexpected compare type " << type;
- }
+ DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
if (false_target != &fallthrough_target) {
__ b(false_target);
@@ -2728,7 +2747,8 @@
}
if (!Primitive::IsFloatingPointType(type) &&
- CanGenerateTest(condition, codegen_->GetAssembler())) {
+ (IsBooleanValueOrMaterializedCondition(condition) ||
+ CanGenerateTest(condition->AsCondition(), codegen_->GetAssembler()))) {
bool invert = false;
if (out.Equals(second)) {
@@ -2752,7 +2772,14 @@
codegen_->MoveLocation(out, src.Equals(first) ? second : first, type);
}
- const Condition cond = GenerateTest(condition, locations->InAt(2), invert, codegen_);
+ std::pair<Condition, Condition> cond;
+
+ if (IsBooleanValueOrMaterializedCondition(condition)) {
+ __ CmpConstant(locations->InAt(2).AsRegister<Register>(), 0);
+ cond = invert ? std::make_pair(EQ, NE) : std::make_pair(NE, EQ);
+ } else {
+ cond = GenerateTest(condition->AsCondition(), invert, codegen_);
+ }
if (out.IsRegister()) {
ShifterOperand operand;
@@ -2764,8 +2791,8 @@
operand = ShifterOperand(src.AsRegister<Register>());
}
- __ it(cond);
- __ mov(out.AsRegister<Register>(), operand, cond);
+ __ it(cond.first);
+ __ mov(out.AsRegister<Register>(), operand, cond.first);
} else {
DCHECK(out.IsRegisterPair());
@@ -2783,10 +2810,10 @@
operand_low = ShifterOperand(src.AsRegisterPairLow<Register>());
}
- __ it(cond);
- __ mov(out.AsRegisterPairLow<Register>(), operand_low, cond);
- __ it(cond);
- __ mov(out.AsRegisterPairHigh<Register>(), operand_high, cond);
+ __ it(cond.first);
+ __ mov(out.AsRegisterPairLow<Register>(), operand_low, cond.first);
+ __ it(cond.first);
+ __ mov(out.AsRegisterPairHigh<Register>(), operand_high, cond.first);
}
return;
@@ -2839,7 +2866,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
break;
@@ -2866,51 +2893,48 @@
return;
}
- LocationSummary* locations = cond->GetLocations();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
- Register out = locations->Out().AsRegister<Register>();
- Label true_label, false_label;
+ const Register out = cond->GetLocations()->Out().AsRegister<Register>();
- switch (cond->InputAt(0)->GetType()) {
- default: {
- // Integer case.
- if (right.IsRegister()) {
- __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
- } else {
- DCHECK(right.IsConstant());
- __ CmpConstant(left.AsRegister<Register>(),
- CodeGenerator::GetInt32ValueOf(right.GetConstant()));
- }
- __ it(ARMCondition(cond->GetCondition()), kItElse);
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMCondition(cond->GetCondition()));
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMCondition(cond->GetOppositeCondition()));
- return;
- }
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(cond, &true_label, &false_label);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(cond, codegen_);
- GenerateFPJumps(cond, &true_label, &false_label);
- break;
+ if (ArmAssembler::IsLowRegister(out) && CanGenerateTest(cond, codegen_->GetAssembler())) {
+ const auto condition = GenerateTest(cond, false, codegen_);
+
+ __ it(condition.first);
+ __ mov(out, ShifterOperand(1), condition.first);
+ __ it(condition.second);
+ __ mov(out, ShifterOperand(0), condition.second);
+ return;
}
// Convert the jumps into the result.
Label done_label;
+ Label* const final_label = codegen_->GetFinalLabel(cond, &done_label);
- // False case: result = 0.
- __ Bind(&false_label);
- __ LoadImmediate(out, 0);
- __ b(&done_label);
+ if (cond->InputAt(0)->GetType() == Primitive::kPrimLong) {
+ Label true_label, false_label;
- // True case: result = 1.
- __ Bind(&true_label);
- __ LoadImmediate(out, 1);
- __ Bind(&done_label);
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ LoadImmediate(out, 0);
+ __ b(final_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ LoadImmediate(out, 1);
+ } else {
+ DCHECK(CanGenerateTest(cond, codegen_->GetAssembler()));
+
+ const auto condition = GenerateTest(cond, false, codegen_);
+
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+ __ b(final_label, condition.second);
+ __ LoadImmediate(out, 1);
+ }
+
+ if (done_label.IsLinked()) {
+ __ Bind(&done_label);
+ }
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
@@ -4441,7 +4465,8 @@
// rotates by swapping input regs (effectively rotating by the first 32-bits of
// a larger rotation) or flipping direction (thus treating larger right/left
// rotations as sub-word sized rotations in the other direction) as appropriate.
-void InstructionCodeGeneratorARM::HandleLongRotate(LocationSummary* locations) {
+void InstructionCodeGeneratorARM::HandleLongRotate(HRor* ror) {
+ LocationSummary* locations = ror->GetLocations();
Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
Location rhs = locations->InAt(1);
@@ -4474,6 +4499,7 @@
Register shift_left = locations->GetTemp(1).AsRegister<Register>();
Label end;
Label shift_by_32_plus_shift_right;
+ Label* final_label = codegen_->GetFinalLabel(ror, &end);
__ and_(shift_right, rhs.AsRegister<Register>(), ShifterOperand(0x1F));
__ Lsrs(shift_left, rhs.AsRegister<Register>(), 6);
@@ -4488,7 +4514,7 @@
__ Lsl(out_reg_lo, in_reg_lo, shift_left);
__ Lsr(shift_left, in_reg_hi, shift_right);
__ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_left));
- __ b(&end);
+ __ b(final_label);
__ Bind(&shift_by_32_plus_shift_right); // Shift by 32+shift_right.
// out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
@@ -4500,7 +4526,9 @@
__ Lsl(shift_right, in_reg_hi, shift_left);
__ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_right));
- __ Bind(&end);
+ if (end.IsLinked()) {
+ __ Bind(&end);
+ }
}
}
@@ -4540,7 +4568,7 @@
break;
}
case Primitive::kPrimLong: {
- HandleLongRotate(locations);
+ HandleLongRotate(ror);
break;
}
default:
@@ -4919,6 +4947,7 @@
Location right = locations->InAt(1);
Label less, greater, done;
+ Label* final_label = codegen_->GetFinalLabel(compare, &done);
Primitive::Type type = compare->InputAt(0)->GetType();
Condition less_cond;
switch (type) {
@@ -4958,17 +4987,19 @@
UNREACHABLE();
}
- __ b(&done, EQ);
+ __ b(final_label, EQ);
__ b(&less, less_cond);
__ Bind(&greater);
__ LoadImmediate(out, 1);
- __ b(&done);
+ __ b(final_label);
__ Bind(&less);
__ LoadImmediate(out, -1);
- __ Bind(&done);
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
}
void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
@@ -5746,6 +5777,7 @@
int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
if (maybe_compressed_char_at) {
Label uncompressed_load, done;
+ Label* final_label = codegen_->GetFinalLabel(instruction, &done);
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
@@ -5754,13 +5786,15 @@
out_loc.AsRegister<Register>(),
obj,
data_offset + const_index);
- __ b(&done);
+ __ b(final_label);
__ Bind(&uncompressed_load);
__ LoadFromOffset(GetLoadOperandType(Primitive::kPrimChar),
out_loc.AsRegister<Register>(),
obj,
data_offset + (const_index << 1));
- __ Bind(&done);
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
} else {
uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type));
@@ -5784,17 +5818,20 @@
}
if (maybe_compressed_char_at) {
Label uncompressed_load, done;
+ Label* final_label = codegen_->GetFinalLabel(instruction, &done);
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
__ b(&uncompressed_load, CS);
__ ldrb(out_loc.AsRegister<Register>(),
Address(temp, index.AsRegister<Register>(), Shift::LSL, 0));
- __ b(&done);
+ __ b(final_label);
__ Bind(&uncompressed_load);
__ ldrh(out_loc.AsRegister<Register>(),
Address(temp, index.AsRegister<Register>(), Shift::LSL, 1));
- __ Bind(&done);
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
} else {
codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, index.AsRegister<Register>());
}
@@ -6019,6 +6056,7 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
Label done;
+ Label* final_label = codegen_->GetFinalLabel(instruction, &done);
SlowPathCodeARM* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
@@ -6040,7 +6078,7 @@
index.AsRegister<Register>());
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ b(&done);
+ __ b(final_label);
__ Bind(&non_zero);
}
@@ -7020,13 +7058,16 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- Label done, zero;
+ Label done;
+ Label* const final_label = codegen_->GetFinalLabel(instruction, &done);
SlowPathCodeARM* slow_path = nullptr;
// Return 0 if `obj` is null.
// avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &zero);
+ DCHECK_NE(out, obj);
+ __ LoadImmediate(out, 0);
+ __ CompareAndBranchIfZero(obj, final_label);
}
switch (type_check_kind) {
@@ -7038,11 +7079,23 @@
class_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ cmp(out, ShifterOperand(cls));
// Classes must be equal for the instanceof to succeed.
- __ b(&zero, NE);
- __ LoadImmediate(out, 1);
- __ b(&done);
+ __ cmp(out, ShifterOperand(cls));
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (ArmAssembler::IsLowRegister(out)) {
+ __ it(EQ);
+ __ mov(out, ShifterOperand(1), EQ);
+ } else {
+ __ b(final_label, NE);
+ __ LoadImmediate(out, 1);
+ }
+
break;
}
@@ -7064,14 +7117,11 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ CompareAndBranchIfZero(out, &done);
+ // If `out` is null, we use it for the result, and jump to the final label.
+ __ CompareAndBranchIfZero(out, final_label);
__ cmp(out, ShifterOperand(cls));
__ b(&loop, NE);
__ LoadImmediate(out, 1);
- if (zero.IsLinked()) {
- __ b(&done);
- }
break;
}
@@ -7094,14 +7144,32 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ CompareAndBranchIfNonZero(out, &loop);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ b(&done);
- __ Bind(&success);
- __ LoadImmediate(out, 1);
- if (zero.IsLinked()) {
- __ b(&done);
+ // This is essentially a null check, but it sets the condition flags to the
+ // proper value for the code that follows the loop, i.e. not `EQ`.
+ __ cmp(out, ShifterOperand(1));
+ __ b(&loop, HS);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (ArmAssembler::IsLowRegister(out)) {
+ // If `out` is null, we use it for the result, and the condition flags
+ // have already been set to `NE`, so the IT block that comes afterwards
+ // (and which handles the successful case) turns into a NOP (instead of
+ // overwriting `out`).
+ __ Bind(&success);
+ // There is only one branch to the `success` label (which is bound to this
+ // IT block), and it has the same condition, `EQ`, so in that case the MOV
+ // is executed.
+ __ it(EQ);
+ __ mov(out, ShifterOperand(1), EQ);
+ } else {
+ // If `out` is null, we use it for the result, and jump to the final label.
+ __ b(final_label);
+ __ Bind(&success);
+ __ LoadImmediate(out, 1);
}
+
break;
}
@@ -7124,14 +7192,28 @@
component_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ CompareAndBranchIfZero(out, &done);
+ // If `out` is null, we use it for the result, and jump to the final label.
+ __ CompareAndBranchIfZero(out, final_label);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(out, &zero);
- __ Bind(&exact_check);
- __ LoadImmediate(out, 1);
- __ b(&done);
+ __ cmp(out, ShifterOperand(0));
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (ArmAssembler::IsLowRegister(out)) {
+ __ Bind(&exact_check);
+ __ it(EQ);
+ __ mov(out, ShifterOperand(1), EQ);
+ } else {
+ __ b(final_label, NE);
+ __ Bind(&exact_check);
+ __ LoadImmediate(out, 1);
+ }
+
break;
}
@@ -7151,9 +7233,6 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
- if (zero.IsLinked()) {
- __ b(&done);
- }
break;
}
@@ -7182,18 +7261,10 @@
/* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel());
- if (zero.IsLinked()) {
- __ b(&done);
- }
break;
}
}
- if (zero.IsLinked()) {
- __ Bind(&zero);
- __ LoadImmediate(out, 0);
- }
-
if (done.IsLinked()) {
__ Bind(&done);
}
@@ -7269,9 +7340,10 @@
codegen_->AddSlowPath(type_check_slow_path);
Label done;
+ Label* final_label = codegen_->GetFinalLabel(instruction, &done);
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &done);
+ __ CompareAndBranchIfZero(obj, final_label);
}
switch (type_check_kind) {
@@ -7335,7 +7407,7 @@
Label loop;
__ Bind(&loop);
__ cmp(temp, ShifterOperand(cls));
- __ b(&done, EQ);
+ __ b(final_label, EQ);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction,
@@ -7363,7 +7435,7 @@
// Do an exact check.
__ cmp(temp, ShifterOperand(cls));
- __ b(&done, EQ);
+ __ b(final_label, EQ);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -7433,7 +7505,10 @@
break;
}
}
- __ Bind(&done);
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
__ Bind(type_check_slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5b15902..86f2f21 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -237,7 +237,7 @@
void HandleBitwiseOperation(HBinaryOperation* operation);
void HandleCondition(HCondition* condition);
void HandleIntegerRotate(LocationSummary* locations);
- void HandleLongRotate(LocationSummary* locations);
+ void HandleLongRotate(HRor* ror);
void HandleShift(HBinaryOperation* operation);
void GenerateWideAtomicStore(Register addr, uint32_t offset,
@@ -299,7 +299,6 @@
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
- void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 28cc942..b39a0e4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -68,6 +68,7 @@
using helpers::OutputCPURegister;
using helpers::OutputFPRegister;
using helpers::OutputRegister;
+using helpers::QRegisterFrom;
using helpers::RegisterFrom;
using helpers::StackOperandFrom;
using helpers::VIXLRegCodeFromART;
@@ -153,7 +154,8 @@
codegen->GetNumberOfFloatingPointRegisters()));
CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills);
- CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize, fp_spills);
+ unsigned v_reg_size = codegen->GetGraph()->HasSIMD() ? kQRegSize : kDRegSize;
+ CPURegList fp_list = CPURegList(CPURegister::kVRegister, v_reg_size, fp_spills);
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
@@ -464,10 +466,13 @@
: SlowPathCodeARM64(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations); // Only saves live 128-bit regs for SIMD.
arm64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, locations); // Only restores live 128-bit regs for SIMD.
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -1150,7 +1155,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
// The read barrier instrumentation of object ArrayGet
@@ -1455,9 +1460,12 @@
}
Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
- DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
- kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
- kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
+ DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister
+ || kind == Location::kStackSlot || kind == Location::kDoubleStackSlot
+ || kind == Location::kSIMDStackSlot);
+ kind = (kind == Location::kFpuRegister || kind == Location::kSIMDStackSlot)
+ ? Location::kFpuRegister
+ : Location::kRegister;
Location scratch = GetScratchLocation(kind);
if (!scratch.Equals(Location::NoLocation())) {
return scratch;
@@ -1467,7 +1475,9 @@
scratch = LocationFrom(vixl_temps_.AcquireX());
} else {
DCHECK(kind == Location::kFpuRegister);
- scratch = LocationFrom(vixl_temps_.AcquireD());
+ scratch = LocationFrom(codegen_->GetGraph()->HasSIMD()
+ ? vixl_temps_.AcquireVRegisterOfSize(kQRegSize)
+ : vixl_temps_.AcquireD());
}
AddScratchLocation(scratch);
return scratch;
@@ -1478,7 +1488,7 @@
vixl_temps_.Release(XRegisterFrom(loc));
} else {
DCHECK(loc.IsFpuRegister());
- vixl_temps_.Release(DRegisterFrom(loc));
+ vixl_temps_.Release(codegen_->GetGraph()->HasSIMD() ? QRegisterFrom(loc) : DRegisterFrom(loc));
}
RemoveScratchLocation(loc);
}
@@ -1741,6 +1751,8 @@
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
__ Ldr(dst, StackOperandFrom(source));
+ } else if (source.IsSIMDStackSlot()) {
+ __ Ldr(QRegisterFrom(destination), StackOperandFrom(source));
} else if (source.IsConstant()) {
DCHECK(CoherentConstantAndType(source, dst_type));
MoveConstant(dst, source.GetConstant());
@@ -1763,7 +1775,29 @@
__ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
} else {
DCHECK(destination.IsFpuRegister());
- __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
+ if (GetGraph()->HasSIMD()) {
+ __ Mov(QRegisterFrom(destination), QRegisterFrom(source));
+ } else {
+ __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
+ }
+ }
+ }
+ } else if (destination.IsSIMDStackSlot()) {
+ if (source.IsFpuRegister()) {
+ __ Str(QRegisterFrom(source), StackOperandFrom(destination));
+ } else {
+ DCHECK(source.IsSIMDStackSlot());
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ if (GetVIXLAssembler()->GetScratchFPRegisterList()->IsEmpty()) {
+ Register temp = temps.AcquireX();
+ __ Ldr(temp, MemOperand(sp, source.GetStackIndex()));
+ __ Str(temp, MemOperand(sp, destination.GetStackIndex()));
+ __ Ldr(temp, MemOperand(sp, source.GetStackIndex() + kArm64WordSize));
+ __ Str(temp, MemOperand(sp, destination.GetStackIndex() + kArm64WordSize));
+ } else {
+ FPRegister temp = temps.AcquireVRegisterOfSize(kQRegSize);
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -3281,7 +3315,7 @@
void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
DCHECK(instruction->IsDiv() || instruction->IsRem());
Primitive::Type type = instruction->GetResultType();
- DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
LocationSummary* locations = instruction->GetLocations();
Register out = OutputRegister(instruction);
@@ -5520,7 +5554,11 @@
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // In suspend check slow path, usually there are no caller-save registers at all.
+ // If SIMD instructions are present, however, we force spilling all live SIMD
+ // registers in full width (since the runtime only saves/restores lower part).
+ locations->SetCustomSlowPathCallerSaves(
+ GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
}
void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7471cd5..869aad2 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -318,6 +318,11 @@
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
+ vixl::aarch64::MemOperand CreateVecMemRegisters(
+ HVecMemoryOperation* instruction,
+ Location* reg_loc,
+ bool is_load);
+
Arm64Assembler* const assembler_;
CodeGeneratorARM64* const codegen_;
@@ -407,8 +412,9 @@
}
size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
- // Allocated in D registers, which are word sized.
- return kArm64WordSize;
+ return GetGraph()->HasSIMD()
+ ? 2 * kArm64WordSize // 16 bytes == 2 arm64 words for each spill
+ : 1 * kArm64WordSize; // 8 bytes == 1 arm64 words for each spill
}
uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d75779c..cce412b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1175,7 +1175,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
// The read barrier instrumentation of object ArrayGet
@@ -1687,14 +1687,21 @@
}
}
-static vixl32::Condition GenerateLongTestConstant(HCondition* condition,
- bool invert,
- CodeGeneratorARMVIXL* codegen) {
+static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTestConstant(
+ HCondition* condition,
+ bool invert,
+ CodeGeneratorARMVIXL* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- vixl32::Condition ret = eq;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne);
const Location left = locations->InAt(0);
const Location right = locations->InAt(1);
@@ -1713,13 +1720,14 @@
case kCondAE: {
__ Cmp(left_high, High32Bits(value));
+ // We use the scope because of the IT block that follows.
ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
2 * vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
__ it(eq);
__ cmp(eq, left_low, Low32Bits(value));
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
}
case kCondLE:
@@ -1727,15 +1735,19 @@
// Trivially true or false.
if (value == std::numeric_limits<int64_t>::max()) {
__ Cmp(left_low, left_low);
- ret = cond == kCondLE ? eq : ne;
+ ret = cond == kCondLE ? std::make_pair(eq, ne) : std::make_pair(ne, eq);
break;
}
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondLT;
+ opposite = kCondGE;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondGE;
+ opposite = kCondLT;
}
value++;
@@ -1746,7 +1758,7 @@
__ Cmp(left_low, Low32Bits(value));
__ Sbcs(temps.Acquire(), left_high, High32Bits(value));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
}
default:
@@ -1757,14 +1769,21 @@
return ret;
}
-static vixl32::Condition GenerateLongTest(HCondition* condition,
- bool invert,
- CodeGeneratorARMVIXL* codegen) {
+static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTest(
+ HCondition* condition,
+ bool invert,
+ CodeGeneratorARMVIXL* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- vixl32::Condition ret = eq;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne);
Location left = locations->InAt(0);
Location right = locations->InAt(1);
@@ -1779,22 +1798,27 @@
case kCondAE: {
__ Cmp(HighRegisterFrom(left), HighRegisterFrom(right));
+ // We use the scope because of the IT block that follows.
ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
2 * vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
__ it(eq);
__ cmp(eq, LowRegisterFrom(left), LowRegisterFrom(right));
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
}
case kCondLE:
case kCondGT:
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondGE;
+ opposite = kCondLT;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondLT;
+ opposite = kCondGE;
}
std::swap(left, right);
@@ -1805,7 +1829,7 @@
__ Cmp(LowRegisterFrom(left), LowRegisterFrom(right));
__ Sbcs(temps.Acquire(), HighRegisterFrom(left), HighRegisterFrom(right));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
}
default:
@@ -1816,69 +1840,62 @@
return ret;
}
-static vixl32::Condition GenerateTest(HInstruction* instruction,
- Location loc,
- bool invert,
- CodeGeneratorARMVIXL* codegen) {
- DCHECK(!instruction->IsConstant());
+static std::pair<vixl32::Condition, vixl32::Condition> GenerateTest(HCondition* condition,
+ bool invert,
+ CodeGeneratorARMVIXL* codegen) {
+ const Primitive::Type type = condition->GetLeft()->GetType();
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+ std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne);
- vixl32::Condition ret = invert ? eq : ne;
+ if (invert) {
+ std::swap(cond, opposite);
+ }
- if (IsBooleanValueOrMaterializedCondition(instruction)) {
- __ Cmp(RegisterFrom(loc), 0);
+ if (type == Primitive::kPrimLong) {
+ ret = condition->GetLocations()->InAt(1).IsConstant()
+ ? GenerateLongTestConstant(condition, invert, codegen)
+ : GenerateLongTest(condition, invert, codegen);
+ } else if (Primitive::IsFloatingPointType(type)) {
+ GenerateVcmp(condition, codegen);
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ ret = std::make_pair(ARMFPCondition(cond, condition->IsGtBias()),
+ ARMFPCondition(opposite, condition->IsGtBias()));
} else {
- HCondition* const condition = instruction->AsCondition();
- const Primitive::Type type = condition->GetLeft()->GetType();
- const IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
-
- if (type == Primitive::kPrimLong) {
- ret = condition->GetLocations()->InAt(1).IsConstant()
- ? GenerateLongTestConstant(condition, invert, codegen)
- : GenerateLongTest(condition, invert, codegen);
- } else if (Primitive::IsFloatingPointType(type)) {
- GenerateVcmp(condition, codegen);
- __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- ret = ARMFPCondition(cond, condition->IsGtBias());
- } else {
- DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
- __ Cmp(InputRegisterAt(condition, 0), InputOperandAt(condition, 1));
- ret = ARMCondition(cond);
- }
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
+ __ Cmp(InputRegisterAt(condition, 0), InputOperandAt(condition, 1));
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
}
return ret;
}
-static bool CanGenerateTest(HInstruction* condition, ArmVIXLAssembler* assembler) {
- if (!IsBooleanValueOrMaterializedCondition(condition)) {
- const HCondition* const cond = condition->AsCondition();
+static bool CanGenerateTest(HCondition* condition, ArmVIXLAssembler* assembler) {
+ if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
+ const LocationSummary* const locations = condition->GetLocations();
+ const IfCondition c = condition->GetCondition();
- if (cond->GetLeft()->GetType() == Primitive::kPrimLong) {
- const LocationSummary* const locations = cond->GetLocations();
- const IfCondition c = cond->GetCondition();
+ if (locations->InAt(1).IsConstant()) {
+ const int64_t value = Int64ConstantFrom(locations->InAt(1));
- if (locations->InAt(1).IsConstant()) {
- const int64_t value = Int64ConstantFrom(locations->InAt(1));
-
- if (c < kCondLT || c > kCondGE) {
- // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
- // we check that the least significant half of the first input to be compared
- // is in a low register (the other half is read outside an IT block), and
- // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
- // encoding can be used.
- if (!LowRegisterFrom(locations->InAt(0)).IsLow() || !IsUint<8>(Low32Bits(value))) {
- return false;
- }
- // TODO(VIXL): The rest of the checks are there to keep the backend in sync with
- // the previous one, but are not strictly necessary.
- } else if (c == kCondLE || c == kCondGT) {
- if (value < std::numeric_limits<int64_t>::max() &&
- !assembler->ShifterOperandCanHold(SBC, High32Bits(value + 1), kCcSet)) {
- return false;
- }
- } else if (!assembler->ShifterOperandCanHold(SBC, High32Bits(value), kCcSet)) {
+ if (c < kCondLT || c > kCondGE) {
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the least significant half of the first input to be compared
+ // is in a low register (the other half is read outside an IT block), and
+ // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
+ // encoding can be used.
+ if (!LowRegisterFrom(locations->InAt(0)).IsLow() || !IsUint<8>(Low32Bits(value))) {
return false;
}
+ // TODO(VIXL): The rest of the checks are there to keep the backend in sync with
+ // the previous one, but are not strictly necessary.
+ } else if (c == kCondLE || c == kCondGT) {
+ if (value < std::numeric_limits<int64_t>::max() &&
+ !assembler->ShifterOperandCanHold(SBC, High32Bits(value + 1), kCcSet)) {
+ return false;
+ }
+ } else if (!assembler->ShifterOperandCanHold(SBC, High32Bits(value), kCcSet)) {
+ return false;
}
}
}
@@ -1950,6 +1967,7 @@
vixl32::Label* CodeGeneratorARMVIXL::GetFinalLabel(HInstruction* instruction,
vixl32::Label* final_label) {
DCHECK(!instruction->IsControlFlow() && !instruction->IsSuspendCheck());
+ DCHECK(!instruction->IsInvoke() || !instruction->GetLocations()->CanCall());
const HBasicBlock* const block = instruction->GetBlock();
const HLoopInformation* const info = block->GetLoopInformation();
@@ -2444,14 +2462,6 @@
void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARMVIXL::GenerateFPJumps(HCondition* cond,
- vixl32::Label* true_label,
- vixl32::Label* false_label ATTRIBUTE_UNUSED) {
- // To branch on the result of the FP compare we transfer FPSCR to APSR (encoded as PC in VMRS).
- __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(ARMFPCondition(cond->GetCondition(), cond->IsGtBias()), true_label);
-}
-
void InstructionCodeGeneratorARMVIXL::GenerateLongComparesAndJumps(HCondition* cond,
vixl32::Label* true_label,
vixl32::Label* false_label) {
@@ -2468,7 +2478,6 @@
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
- // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -2539,31 +2548,44 @@
void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition,
vixl32::Label* true_target_in,
vixl32::Label* false_target_in) {
+ if (CanGenerateTest(condition, codegen_->GetAssembler())) {
+ vixl32::Label* non_fallthrough_target;
+ bool invert;
+
+ if (true_target_in == nullptr) {
+ DCHECK(false_target_in != nullptr);
+ non_fallthrough_target = false_target_in;
+ invert = true;
+ } else {
+ non_fallthrough_target = true_target_in;
+ invert = false;
+ }
+
+ const auto cond = GenerateTest(condition, invert, codegen_);
+
+ __ B(cond.first, non_fallthrough_target);
+
+ if (false_target_in != nullptr && false_target_in != non_fallthrough_target) {
+ __ B(false_target_in);
+ }
+
+ return;
+ }
+
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough` instead.
vixl32::Label fallthrough;
vixl32::Label* true_target = (true_target_in == nullptr) ? &fallthrough : true_target_in;
vixl32::Label* false_target = (false_target_in == nullptr) ? &fallthrough : false_target_in;
- Primitive::Type type = condition->InputAt(0)->GetType();
- switch (type) {
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(condition, true_target, false_target);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(condition, codegen_);
- GenerateFPJumps(condition, true_target, false_target);
- break;
- default:
- LOG(FATAL) << "Unexpected compare type " << type;
- }
+ DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
if (false_target != &fallthrough) {
__ B(false_target);
}
- if (true_target_in == nullptr || false_target_in == nullptr) {
+ if (fallthrough.IsReferenced()) {
__ Bind(&fallthrough);
}
}
@@ -2758,7 +2780,8 @@
}
if (!Primitive::IsFloatingPointType(type) &&
- CanGenerateTest(condition, codegen_->GetAssembler())) {
+ (IsBooleanValueOrMaterializedCondition(condition) ||
+ CanGenerateTest(condition->AsCondition(), codegen_->GetAssembler()))) {
bool invert = false;
if (out.Equals(second)) {
@@ -2782,15 +2805,24 @@
codegen_->MoveLocation(out, src.Equals(first) ? second : first, type);
}
- const vixl32::Condition cond = GenerateTest(condition, locations->InAt(2), invert, codegen_);
+ std::pair<vixl32::Condition, vixl32::Condition> cond(eq, ne);
+
+ if (IsBooleanValueOrMaterializedCondition(condition)) {
+ __ Cmp(InputRegisterAt(select, 2), 0);
+ cond = invert ? std::make_pair(eq, ne) : std::make_pair(ne, eq);
+ } else {
+ cond = GenerateTest(condition->AsCondition(), invert, codegen_);
+ }
+
const size_t instr_count = out.IsRegisterPair() ? 4 : 2;
+ // We use the scope because of the IT block that follows.
ExactAssemblyScope guard(GetVIXLAssembler(),
instr_count * vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
if (out.IsRegister()) {
- __ it(cond);
- __ mov(cond, RegisterFrom(out), OperandFrom(src, type));
+ __ it(cond.first);
+ __ mov(cond.first, RegisterFrom(out), OperandFrom(src, type));
} else {
DCHECK(out.IsRegisterPair());
@@ -2808,10 +2840,10 @@
operand_low = LowRegisterFrom(src);
}
- __ it(cond);
- __ mov(cond, LowRegisterFrom(out), operand_low);
- __ it(cond);
- __ mov(cond, HighRegisterFrom(out), operand_high);
+ __ it(cond.first);
+ __ mov(cond.first, LowRegisterFrom(out), operand_low);
+ __ it(cond.first);
+ __ mov(cond.first, HighRegisterFrom(out), operand_high);
}
return;
@@ -2864,7 +2896,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
break;
@@ -2891,50 +2923,52 @@
return;
}
- Location right = cond->GetLocations()->InAt(1);
- vixl32::Register out = OutputRegister(cond);
- vixl32::Label true_label, false_label;
+ const vixl32::Register out = OutputRegister(cond);
- switch (cond->InputAt(0)->GetType()) {
- default: {
- // Integer case.
- if (right.IsRegister()) {
- __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
- } else {
- DCHECK(right.IsConstant());
- __ Cmp(InputRegisterAt(cond, 0),
- CodeGenerator::GetInt32ValueOf(right.GetConstant()));
- }
- ExactAssemblyScope aas(GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
- __ ite(ARMCondition(cond->GetCondition()));
- __ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
- __ mov(ARMCondition(cond->GetOppositeCondition()), OutputRegister(cond), 0);
- return;
- }
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(cond, &true_label, &false_label);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(cond, codegen_);
- GenerateFPJumps(cond, &true_label, &false_label);
- break;
+ if (out.IsLow() && CanGenerateTest(cond, codegen_->GetAssembler())) {
+ const auto condition = GenerateTest(cond, false, codegen_);
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 4 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(condition.first);
+ __ mov(condition.first, out, 1);
+ __ it(condition.second);
+ __ mov(condition.second, out, 0);
+ return;
}
// Convert the jumps into the result.
vixl32::Label done_label;
+ vixl32::Label* const final_label = codegen_->GetFinalLabel(cond, &done_label);
- // False case: result = 0.
- __ Bind(&false_label);
- __ Mov(out, 0);
- __ B(&done_label);
+ if (cond->InputAt(0)->GetType() == Primitive::kPrimLong) {
+ vixl32::Label true_label, false_label;
- // True case: result = 1.
- __ Bind(&true_label);
- __ Mov(out, 1);
- __ Bind(&done_label);
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ Mov(out, 0);
+ __ B(final_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ Mov(out, 1);
+ } else {
+ DCHECK(CanGenerateTest(cond, codegen_->GetAssembler()));
+
+ const auto condition = GenerateTest(cond, false, codegen_);
+
+ __ Mov(LeaveFlags, out, 0);
+ __ B(condition.second, final_label, /* far_target */ false);
+ __ Mov(out, 1);
+ }
+
+ if (done_label.IsReferenced()) {
+ __ Bind(&done_label);
+ }
}
void LocationsBuilderARMVIXL::VisitEqual(HEqual* comp) {
@@ -4447,6 +4481,7 @@
vixl32::Register shift_left = RegisterFrom(locations->GetTemp(1));
vixl32::Label end;
vixl32::Label shift_by_32_plus_shift_right;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(ror, &end);
__ And(shift_right, RegisterFrom(rhs), 0x1F);
__ Lsrs(shift_left, RegisterFrom(rhs), 6);
@@ -4461,7 +4496,7 @@
__ Lsl(out_reg_lo, in_reg_lo, shift_left);
__ Lsr(shift_left, in_reg_hi, shift_right);
__ Add(out_reg_lo, out_reg_lo, shift_left);
- __ B(&end);
+ __ B(final_label);
__ Bind(&shift_by_32_plus_shift_right); // Shift by 32+shift_right.
// out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
@@ -4473,7 +4508,9 @@
__ Lsl(shift_right, in_reg_hi, shift_left);
__ Add(out_reg_lo, out_reg_lo, shift_right);
- __ Bind(&end);
+ if (end.IsReferenced()) {
+ __ Bind(&end);
+ }
}
}
@@ -4906,6 +4943,7 @@
Location right = locations->InAt(1);
vixl32::Label less, greater, done;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done);
Primitive::Type type = compare->InputAt(0)->GetType();
vixl32::Condition less_cond = vixl32::Condition(kNone);
switch (type) {
@@ -4944,17 +4982,19 @@
UNREACHABLE();
}
- __ B(eq, &done, /* far_target */ false);
+ __ B(eq, final_label, /* far_target */ false);
__ B(less_cond, &less, /* far_target */ false);
__ Bind(&greater);
__ Mov(out, 1);
- __ B(&done);
+ __ B(final_label);
__ Bind(&less);
__ Mov(out, -1);
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
}
void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
@@ -5746,6 +5786,7 @@
int32_t const_index = Int32ConstantFrom(index);
if (maybe_compressed_char_at) {
vixl32::Label uncompressed_load, done;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
@@ -5754,13 +5795,15 @@
RegisterFrom(out_loc),
obj,
data_offset + const_index);
- __ B(&done);
+ __ B(final_label);
__ Bind(&uncompressed_load);
GetAssembler()->LoadFromOffset(GetLoadOperandType(Primitive::kPrimChar),
RegisterFrom(out_loc),
obj,
data_offset + (const_index << 1));
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
} else {
uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type));
@@ -5785,15 +5828,18 @@
}
if (maybe_compressed_char_at) {
vixl32::Label uncompressed_load, done;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
__ B(cs, &uncompressed_load, /* far_target */ false);
__ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
- __ B(&done);
+ __ B(final_label);
__ Bind(&uncompressed_load);
__ Ldrh(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 1));
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
} else {
codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
}
@@ -6032,6 +6078,7 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
vixl32::Label done;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
SlowPathCodeARMVIXL* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
@@ -6054,7 +6101,7 @@
// TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding
// store instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ B(&done);
+ __ B(final_label);
__ Bind(&non_zero);
}
@@ -7061,13 +7108,16 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- vixl32::Label done, zero;
+ vixl32::Label done;
+ vixl32::Label* const final_label = codegen_->GetFinalLabel(instruction, &done);
SlowPathCodeARMVIXL* slow_path = nullptr;
// Return 0 if `obj` is null.
// avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &zero, /* far_target */ false);
+ DCHECK(!out.Is(obj));
+ __ Mov(out, 0);
+ __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
}
switch (type_check_kind) {
@@ -7079,11 +7129,28 @@
class_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ Cmp(out, cls);
// Classes must be equal for the instanceof to succeed.
- __ B(ne, &zero, /* far_target */ false);
- __ Mov(out, 1);
- __ B(&done);
+ __ Cmp(out, cls);
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ Mov(LeaveFlags, out, 0);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (out.IsLow()) {
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(eq);
+ __ mov(eq, out, 1);
+ } else {
+ __ B(ne, final_label, /* far_target */ false);
+ __ Mov(out, 1);
+ }
+
break;
}
@@ -7105,14 +7172,11 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ CompareAndBranchIfZero(out, &done, /* far_target */ false);
+ // If `out` is null, we use it for the result, and jump to the final label.
+ __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
__ Cmp(out, cls);
__ B(ne, &loop, /* far_target */ false);
__ Mov(out, 1);
- if (zero.IsReferenced()) {
- __ B(&done);
- }
break;
}
@@ -7135,14 +7199,38 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ CompareAndBranchIfNonZero(out, &loop);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ B(&done);
- __ Bind(&success);
- __ Mov(out, 1);
- if (zero.IsReferenced()) {
- __ B(&done);
+ // This is essentially a null check, but it sets the condition flags to the
+ // proper value for the code that follows the loop, i.e. not `eq`.
+ __ Cmp(out, 1);
+ __ B(hs, &loop, /* far_target */ false);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (out.IsLow()) {
+ // If `out` is null, we use it for the result, and the condition flags
+ // have already been set to `ne`, so the IT block that comes afterwards
+ // (and which handles the successful case) turns into a NOP (instead of
+ // overwriting `out`).
+ __ Bind(&success);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ // There is only one branch to the `success` label (which is bound to this
+ // IT block), and it has the same condition, `eq`, so in that case the MOV
+ // is executed.
+ __ it(eq);
+ __ mov(eq, out, 1);
+ } else {
+ // If `out` is null, we use it for the result, and jump to the final label.
+ __ B(final_label);
+ __ Bind(&success);
+ __ Mov(out, 1);
}
+
break;
}
@@ -7165,14 +7253,34 @@
component_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ CompareAndBranchIfZero(out, &done, /* far_target */ false);
+ // If `out` is null, we use it for the result, and jump to the final label.
+ __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(out, &zero, /* far_target */ false);
- __ Bind(&exact_check);
- __ Mov(out, 1);
- __ B(&done);
+ __ Cmp(out, 0);
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ Mov(LeaveFlags, out, 0);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (out.IsLow()) {
+ __ Bind(&exact_check);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(eq);
+ __ mov(eq, out, 1);
+ } else {
+ __ B(ne, final_label, /* far_target */ false);
+ __ Bind(&exact_check);
+ __ Mov(out, 1);
+ }
+
break;
}
@@ -7192,9 +7300,6 @@
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
- if (zero.IsReferenced()) {
- __ B(&done);
- }
break;
}
@@ -7223,18 +7328,10 @@
/* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
- if (zero.IsReferenced()) {
- __ B(&done);
- }
break;
}
}
- if (zero.IsReferenced()) {
- __ Bind(&zero);
- __ Mov(out, 0);
- }
-
if (done.IsReferenced()) {
__ Bind(&done);
}
@@ -7310,9 +7407,10 @@
codegen_->AddSlowPath(type_check_slow_path);
vixl32::Label done;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &done, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
}
switch (type_check_kind) {
@@ -7376,7 +7474,7 @@
vixl32::Label loop;
__ Bind(&loop);
__ Cmp(temp, cls);
- __ B(eq, &done, /* far_target */ false);
+ __ B(eq, final_label, /* far_target */ false);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction,
@@ -7404,7 +7502,7 @@
// Do an exact check.
__ Cmp(temp, cls);
- __ B(eq, &done, /* far_target */ false);
+ __ B(eq, final_label, /* far_target */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -7472,7 +7570,9 @@
break;
}
}
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
__ Bind(type_check_slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 781027a..1e9669d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -401,9 +401,6 @@
void GenerateCompareTestAndBranch(HCondition* condition,
vixl::aarch32::Label* true_target,
vixl::aarch32::Label* false_target);
- void GenerateFPJumps(HCondition* cond,
- vixl::aarch32::Label* true_label,
- vixl::aarch32::Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond,
vixl::aarch32::Label* true_label,
vixl::aarch32::Label* false_label);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 5f02a52..aa030b2 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -461,6 +461,536 @@
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
};
+class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(
+ locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(2),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimNot,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ mips_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
+};
+
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathMIPS below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
+//
+// If `entrypoint` is a valid location it is assumed to already be
+// holding the entrypoint. The case where the entrypoint is passed in
+// is for the GcRoot read barrier.
+class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ ReadBarrierMarkSlowPathMIPS(HInstruction* instruction,
+ Location ref,
+ Location entrypoint = Location::NoLocation())
+ : SlowPathCodeMIPS(instruction), ref_(ref), entrypoint_(entrypoint) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register ref_reg = ref_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsArraySet() ||
+ instruction_->IsLoadClass() ||
+ instruction_->IsLoadString() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast() ||
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
+ (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier marking slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
+ (S2 <= ref_reg && ref_reg <= S7) ||
+ (ref_reg == FP)) << ref_reg;
+ // "Compact" slow path, saving two moves.
+ //
+ // Instead of using the standard runtime calling convention (input
+ // and output in A0 and V0 respectively):
+ //
+ // A0 <- ref
+ // V0 <- ReadBarrierMark(A0)
+ // ref <- V0
+ //
+ // we just use rX (the register containing `ref`) as input and output
+ // of a dedicated entrypoint:
+ //
+ // rX <- ReadBarrierMarkRegX(rX)
+ //
+ if (entrypoint_.IsValid()) {
+ mips_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
+ DCHECK_EQ(entrypoint_.AsRegister<Register>(), T9);
+ __ Jalr(entrypoint_.AsRegister<Register>());
+ __ NopIfNoReordering();
+ } else {
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
+ // This runtime call does not require a stack map.
+ mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
+ instruction_,
+ this,
+ /* direct */ false);
+ }
+ __ B(GetExitLabel());
+ }
+
+ private:
+ // The location (register) of the marked object reference.
+ const Location ref_;
+
+ // The location of the entrypoint if already loaded.
+ const Location entrypoint_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS);
+};
+
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathMIPS above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ ReadBarrierMarkAndUpdateFieldSlowPathMIPS(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ Location field_offset,
+ Register temp1)
+ : SlowPathCodeMIPS(instruction),
+ ref_(ref),
+ obj_(obj),
+ field_offset_(field_offset),
+ temp1_(temp1) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE {
+ return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register ref_reg = ref_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+ // This slow path is only used by the UnsafeCASObject intrinsic.
+ DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier marking and field updating slow path: "
+ << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+ DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
+
+ __ Bind(GetEntryLabel());
+
+ // Save the old reference.
+ // Note that we cannot use AT or TMP to save the old reference, as those
+ // are used by the code that follows, but we need the old reference after
+ // the call to the ReadBarrierMarkRegX entry point.
+ DCHECK_NE(temp1_, AT);
+ DCHECK_NE(temp1_, TMP);
+ __ Move(temp1_, ref_reg);
+
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
+ (S2 <= ref_reg && ref_reg <= S7) ||
+ (ref_reg == FP)) << ref_reg;
+ // "Compact" slow path, saving two moves.
+ //
+ // Instead of using the standard runtime calling convention (input
+ // and output in A0 and V0 respectively):
+ //
+ // A0 <- ref
+ // V0 <- ReadBarrierMark(A0)
+ // ref <- V0
+ //
+ // we just use rX (the register containing `ref`) as input and output
+ // of a dedicated entrypoint:
+ //
+ // rX <- ReadBarrierMarkRegX(rX)
+ //
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
+ // This runtime call does not require a stack map.
+ mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
+ instruction_,
+ this,
+ /* direct */ false);
+
+ // If the new reference is different from the old reference,
+ // update the field in the holder (`*(obj_ + field_offset_)`).
+ //
+ // Note that this field could also hold a different object, if
+ // another thread had concurrently changed it. In that case, the
+ // the compare-and-set (CAS) loop below would abort, leaving the
+ // field as-is.
+ MipsLabel done;
+ __ Beq(temp1_, ref_reg, &done);
+
+ // Update the the holder's field atomically. This may fail if
+ // mutator updates before us, but it's OK. This is achieved
+ // using a strong compare-and-set (CAS) operation with relaxed
+ // memory synchronization ordering, where the expected value is
+ // the old reference and the desired value is the new reference.
+
+ // Convenience aliases.
+ Register base = obj_;
+ // The UnsafeCASObject intrinsic uses a register pair as field
+ // offset ("long offset"), of which only the low part contains
+ // data.
+ Register offset = field_offset_.AsRegisterPairLow<Register>();
+ Register expected = temp1_;
+ Register value = ref_reg;
+ Register tmp_ptr = TMP; // Pointer to actual memory.
+ Register tmp = AT; // Value in memory.
+
+ __ Addu(tmp_ptr, base, offset);
+
+ if (kPoisonHeapReferences) {
+ __ PoisonHeapReference(expected);
+ // Do not poison `value` if it is the same register as
+ // `expected`, which has just been poisoned.
+ if (value != expected) {
+ __ PoisonHeapReference(value);
+ }
+ }
+
+ // do {
+ // tmp = [r_ptr] - expected;
+ // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
+
+ bool is_r6 = mips_codegen->GetInstructionSetFeatures().IsR6();
+ MipsLabel loop_head, exit_loop;
+ __ Bind(&loop_head);
+ if (is_r6) {
+ __ LlR6(tmp, tmp_ptr);
+ } else {
+ __ LlR2(tmp, tmp_ptr);
+ }
+ __ Bne(tmp, expected, &exit_loop);
+ __ Move(tmp, value);
+ if (is_r6) {
+ __ ScR6(tmp, tmp_ptr);
+ } else {
+ __ ScR2(tmp, tmp_ptr);
+ }
+ __ Beqz(tmp, &loop_head);
+ __ Bind(&exit_loop);
+
+ if (kPoisonHeapReferences) {
+ __ UnpoisonHeapReference(expected);
+ // Do not unpoison `value` if it is the same register as
+ // `expected`, which has just been unpoisoned.
+ if (value != expected) {
+ __ UnpoisonHeapReference(value);
+ }
+ }
+
+ __ Bind(&done);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ // The location (register) of the marked object reference.
+ const Location ref_;
+ // The register containing the object holding the marked object reference field.
+ const Register obj_;
+ // The location of the offset of the marked reference field within `obj_`.
+ Location field_offset_;
+
+ const Register temp1_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS);
+};
+
+// Slow path generating a read barrier for a heap reference.
+class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ ReadBarrierForHeapReferenceSlowPathMIPS(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index)
+ : SlowPathCodeMIPS(instruction),
+ out_(out),
+ ref_(ref),
+ obj_(obj),
+ offset_(offset),
+ index_(index) {
+ DCHECK(kEmitCompilerReadBarrier);
+ // If `obj` is equal to `out` or `ref`, it means the initial object
+ // has been overwritten by (or after) the heap object reference load
+ // to be instrumented, e.g.:
+ //
+ // __ LoadFromOffset(kLoadWord, out, out, offset);
+ // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
+ //
+ // In that case, we have lost the information about the original
+ // object, and the emitted read barrier cannot work properly.
+ DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
+ DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast() ||
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier for heap reference slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We may have to change the index's value, but as `index_` is a
+ // constant member (like other "inputs" of this slow path),
+ // introduce a copy of it, `index`.
+ Location index = index_;
+ if (index_.IsValid()) {
+ // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
+ if (instruction_->IsArrayGet()) {
+ // Compute the actual memory offset and store it in `index`.
+ Register index_reg = index_.AsRegister<Register>();
+ DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
+ if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
+ // We are about to change the value of `index_reg` (see the
+ // calls to art::mips::MipsAssembler::Sll and
+ // art::mips::MipsAssembler::Addiu32 below), but it has
+ // not been saved by the previous call to
+ // art::SlowPathCode::SaveLiveRegisters, as it is a
+ // callee-save register --
+ // art::SlowPathCode::SaveLiveRegisters does not consider
+ // callee-save registers, as it has been designed with the
+ // assumption that callee-save registers are supposed to be
+ // handled by the called function. So, as a callee-save
+ // register, `index_reg` _would_ eventually be saved onto
+ // the stack, but it would be too late: we would have
+ // changed its value earlier. Therefore, we manually save
+ // it here into another freely available register,
+ // `free_reg`, chosen of course among the caller-save
+ // registers (as a callee-save `free_reg` register would
+ // exhibit the same problem).
+ //
+ // Note we could have requested a temporary register from
+ // the register allocator instead; but we prefer not to, as
+ // this is a slow path, and we know we can find a
+ // caller-save register that is available.
+ Register free_reg = FindAvailableCallerSaveRegister(codegen);
+ __ Move(free_reg, index_reg);
+ index_reg = free_reg;
+ index = Location::RegisterLocation(index_reg);
+ } else {
+ // The initial register stored in `index_` has already been
+ // saved in the call to art::SlowPathCode::SaveLiveRegisters
+ // (as it is not a callee-save register), so we can freely
+ // use it.
+ }
+ // Shifting the index value contained in `index_reg` by the scale
+ // factor (2) cannot overflow in practice, as the runtime is
+ // unable to allocate object arrays with a size larger than
+ // 2^26 - 1 (that is, 2^28 - 4 bytes).
+ __ Sll(index_reg, index_reg, TIMES_4);
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ __ Addiu32(index_reg, index_reg, offset_);
+ } else {
+ // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
+ // intrinsics, `index_` is not shifted by a scale factor of 2
+ // (as in the case of ArrayGet), as it is actually an offset
+ // to an object field within an object.
+ DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
+ (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
+ << instruction_->AsInvoke()->GetIntrinsic();
+ DCHECK_EQ(offset_, 0U);
+ DCHECK(index_.IsRegisterPair());
+ // UnsafeGet's offset location is a register pair, the low
+ // part contains the correct offset.
+ index = index_.ToLow();
+ }
+ }
+
+ // We're moving two or three locations to locations that could
+ // overlap, so we need a parallel move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(ref_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(obj_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot,
+ nullptr);
+ if (index.IsValid()) {
+ parallel_move.AddMove(index,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimInt,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ } else {
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
+ }
+ mips_codegen->InvokeRuntime(kQuickReadBarrierSlow,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<
+ kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
+ mips_codegen->Move32(out_, calling_convention.GetReturnLocation(Primitive::kPrimNot));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
+
+ private:
+ Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
+ size_t ref = static_cast<int>(ref_.AsRegister<Register>());
+ size_t obj = static_cast<int>(obj_.AsRegister<Register>());
+ for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (i != ref &&
+ i != obj &&
+ !codegen->IsCoreCalleeSaveRegister(i) &&
+ !codegen->IsBlockedCoreRegister(i)) {
+ return static_cast<Register>(i);
+ }
+ }
+ // We shall never fail to find a free caller-save register, as
+ // there are more than two core caller-save registers on MIPS
+ // (meaning it is possible to find one which is different from
+ // `ref` and `obj`).
+ DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
+ LOG(FATAL) << "Could not find a free caller-save register";
+ UNREACHABLE();
+ }
+
+ const Location out_;
+ const Location ref_;
+ const Location obj_;
+ const uint32_t offset_;
+ // An additional location containing an index to an array.
+ // Only used for HArrayGet and the UnsafeGetObject &
+ // UnsafeGetObjectVolatile intrinsics.
+ const Location index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS);
+};
+
+// Slow path generating a read barrier for a GC root.
+class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ ReadBarrierForRootSlowPathMIPS(HInstruction* instruction, Location out, Location root)
+ : SlowPathCodeMIPS(instruction), out_(out), root_(root) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
+ << "Unexpected instruction in read barrier for GC root slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ mips_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
+ mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
+ mips_codegen->Move32(out_, calling_convention.GetReturnLocation(Primitive::kPrimNot));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
+
+ private:
+ const Location out_;
+ const Location root_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS);
+};
+
CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
const MipsInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
@@ -1310,10 +1840,26 @@
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
+ GenerateInvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
+ IsDirectEntrypoint(entrypoint));
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
+}
+
+void CodeGeneratorMIPS::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path,
+ bool direct) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset, direct);
+}
+
+void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool direct) {
bool reordering = __ SetReorder(false);
- __ LoadFromOffset(kLoadWord, T9, TR, GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value());
+ __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
__ Jalr(T9);
- if (IsDirectEntrypoint(entrypoint)) {
+ if (direct) {
// Reserve argument space on stack (for $a0-$a3) for
// entrypoints that directly reference native implementations.
// Called function may use this space to store $a0-$a3 regs.
@@ -1323,9 +1869,6 @@
__ Nop(); // In delay slot.
}
__ SetReorder(reordering);
- if (EntrypointRequiresStackMap(entrypoint)) {
- RecordPcInfo(instruction, dex_pc, slow_path);
- }
}
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
@@ -1885,14 +2428,31 @@
}
void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
+ Primitive::Type type = instruction->GetType();
+ bool object_array_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (type == Primitive::kPrimNot);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ if (Primitive::IsFloatingPointType(type)) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps in the case of an object array get with
+ // read barriers enabled: we do not want the move to overwrite the
+ // array's location, as we need it to emit the read barrier.
+ locations->SetOut(Location::RequiresRegister(),
+ object_array_get_with_read_barrier
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ }
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier.
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->AddTemp(Location::RequiresRegister());
}
}
@@ -1905,7 +2465,9 @@
void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
+ Location out_loc = locations->Out();
Location index = locations->InAt(1);
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
auto null_checker = GetImplicitNullChecker(instruction, codegen_);
@@ -1915,7 +2477,7 @@
instruction->IsStringCharAt();
switch (type) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
@@ -1928,7 +2490,7 @@
}
case Primitive::kPrimByte: {
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
@@ -1941,21 +2503,20 @@
}
case Primitive::kPrimShort: {
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
__ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
}
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (maybe_compressed_char_at) {
uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
__ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
@@ -1995,72 +2556,109 @@
__ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
__ B(&done);
__ Bind(&uncompressed_load);
- __ Sll(TMP, index_reg, TIMES_2);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
__ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
__ Bind(&done);
} else {
- __ Sll(TMP, index_reg, TIMES_2);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
__ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
}
}
break;
}
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
+ case Primitive::kPrimInt: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
__ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
}
break;
}
+ case Primitive::kPrimNot: {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ // /* HeapReference<Object> */ out =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier call.
+ codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
+ out_loc,
+ obj,
+ data_offset,
+ index,
+ temp,
+ /* needs_null_check */ true);
+ } else {
+ Register out = out_loc.AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+ } else {
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
+ __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction,
+ out_loc,
+ out_loc,
+ obj_loc,
+ data_offset,
+ index);
+ }
+ }
+ break;
+ }
+
case Primitive::kPrimLong: {
- Register out = locations->Out().AsRegisterPairLow<Register>();
+ Register out = out_loc.AsRegisterPairLow<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
__ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
}
case Primitive::kPrimFloat: {
- FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ FRegister out = out_loc.AsFpuRegister<FRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadSFromOffset(out, obj, offset, null_checker);
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
__ LoadSFromOffset(out, TMP, data_offset, null_checker);
}
break;
}
case Primitive::kPrimDouble: {
- FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ FRegister out = out_loc.AsFpuRegister<FRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadDFromOffset(out, obj, offset, null_checker);
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
- __ Addu(TMP, obj, TMP);
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
__ LoadDFromOffset(out, TMP, data_offset, null_checker);
}
break;
@@ -2070,11 +2668,6 @@
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
-
- if (type == Primitive::kPrimNot) {
- Register out = locations->Out().AsRegister<Register>();
- __ MaybeUnpoisonHeapReference(out);
- }
}
void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
@@ -2116,23 +2709,28 @@
}
void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
- bool needs_runtime_call = instruction->NeedsTypeCheck();
+ Primitive::Type value_type = instruction->GetComponentType();
+
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- needs_runtime_call ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
- if (needs_runtime_call) {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ may_need_runtime_call_for_type_check ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
} else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
- locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
- } else {
- locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
- }
+ locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
+ }
+ if (needs_write_barrier) {
+ // Temporary register for the write barrier.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
}
}
@@ -2142,7 +2740,7 @@
Location index = locations->InAt(1);
Location value_location = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
- bool needs_runtime_call = locations->WillCall();
+ bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
auto null_checker = GetImplicitNullChecker(instruction, codegen_);
@@ -2173,8 +2771,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
} else {
- __ Sll(base_reg, index.AsRegister<Register>(), TIMES_2);
- __ Addu(base_reg, obj, base_reg);
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_2, base_reg);
}
if (value_location.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
@@ -2186,58 +2783,134 @@
break;
}
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ } else {
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ }
+ break;
+ }
+
case Primitive::kPrimNot: {
- if (!needs_runtime_call) {
+ if (value_location.IsConstant()) {
+ // Just setting null.
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4);
- __ Addu(base_reg, obj, base_reg);
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
}
- if (value_location.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
- __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
- DCHECK(!needs_write_barrier);
- } else {
- Register value = value_location.AsRegister<Register>();
- if (kPoisonHeapReferences && needs_write_barrier) {
- // Note that in the case where `value` is a null reference,
- // we do not enter this block, as a null reference does not
- // need poisoning.
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- // Use Sw() instead of StoreToOffset() in order to be able to
- // hold the poisoned reference in AT and thus avoid allocating
- // yet another temporary register.
- if (index.IsConstant()) {
- if (!IsInt<16>(static_cast<int32_t>(data_offset))) {
- int16_t low = Low16Bits(data_offset);
- uint32_t high = data_offset - low;
- __ Addiu32(TMP, obj, high);
- base_reg = TMP;
- data_offset = low;
- }
- } else {
- DCHECK(IsInt<16>(static_cast<int32_t>(data_offset)));
- }
- __ PoisonHeapReference(AT, value);
- __ Sw(AT, base_reg, data_offset);
- null_checker();
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ DCHECK_EQ(value, 0);
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ DCHECK(!needs_write_barrier);
+ DCHECK(!may_need_runtime_call_for_type_check);
+ break;
+ }
+
+ DCHECK(needs_write_barrier);
+ Register value = value_location.AsRegister<Register>();
+ Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+ Register temp2 = TMP; // Doesn't need to survive slow path.
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ MipsLabel done;
+ SlowPathCodeMIPS* slow_path = nullptr;
+
+ if (may_need_runtime_call_for_type_check) {
+ slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (instruction->GetValueCanBeNull()) {
+ MipsLabel non_zero;
+ __ Bnez(value, &non_zero);
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
}
- if (needs_write_barrier) {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
- }
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ __ B(&done);
+ __ Bind(&non_zero);
}
+
+ // Note that when read barriers are enabled, the type checks
+ // are performed without read barriers. This is fine, even in
+ // the case where a class object is in the from-space after
+ // the flip, as a comparison involving such a type would not
+ // produce a false positive; it may of course produce a false
+ // negative, in which case we would take the ArraySet slow
+ // path.
+
+ // /* HeapReference<Class> */ temp1 = obj->klass_
+ __ LoadFromOffset(kLoadWord, temp1, obj, class_offset, null_checker);
+ __ MaybeUnpoisonHeapReference(temp1);
+
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
+ // If heap poisoning is enabled, no need to unpoison `temp1`
+ // nor `temp2`, as we are comparing two poisoned references.
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ MipsLabel do_put;
+ __ Beq(temp1, temp2, &do_put);
+ // If heap poisoning is enabled, the `temp1` reference has
+ // not been unpoisoned yet; unpoison it now.
+ __ MaybeUnpoisonHeapReference(temp1);
+
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp1`, as we are comparing against null below.
+ __ Bnez(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ Bne(temp1, temp2, slow_path->GetEntryLabel());
+ }
+ }
+
+ Register source = value;
+ if (kPoisonHeapReferences) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ __ Move(temp1, value);
+ __ PoisonHeapReference(temp1);
+ source = temp1;
+ }
+
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- // Note: if heap poisoning is enabled, pAputObject takes care
- // of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
+ }
+ __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
+
+ if (!may_need_runtime_call_for_type_check) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
}
break;
}
@@ -2247,8 +2920,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8);
- __ Addu(base_reg, obj, base_reg);
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
}
if (value_location.IsConstant()) {
int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
@@ -2265,8 +2937,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4);
- __ Addu(base_reg, obj, base_reg);
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
}
if (value_location.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
@@ -2283,8 +2954,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8);
- __ Addu(base_reg, obj, base_reg);
+ __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
}
if (value_location.IsConstant()) {
int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
@@ -2327,6 +2997,23 @@
__ Bgeu(index, length, slow_path->GetEntryLabel());
}
+// Temp is used for read barrier.
+static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
+ if (kEmitCompilerReadBarrier &&
+ (kUseBakerReadBarrier ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ return 1;
+ }
+ return 0;
+}
+
+// Extra temp is used for read barrier.
+static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+ return 1 + NumberOfInstanceOfTemps(type_check_kind);
+}
+
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
@@ -2337,7 +3024,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = throws_into_catch
+ call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
break;
@@ -2351,15 +3038,20 @@
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
}
void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Location temp_loc = locations->GetTemp(0);
+ Register temp = temp_loc.AsRegister<Register>();
+ const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ DCHECK_LE(num_temps, 2u);
+ Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -2396,8 +3088,12 @@
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kArrayCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Jump to slow path for throwing the exception or doing a
// more involved array check.
__ Bne(temp, cls, slow_path->GetEntryLabel());
@@ -2406,15 +3102,22 @@
case TypeCheckKind::kAbstractClassCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
- __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ super_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
__ Beqz(temp, slow_path->GetEntryLabel());
@@ -2425,15 +3128,22 @@
case TypeCheckKind::kClassHierarchyCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
MipsLabel loop;
__ Bind(&loop);
__ Beq(temp, cls, &done);
// /* HeapReference<Class> */ temp = temp->super_class_
- __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ super_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception. Otherwise, jump to the beginning of the loop.
__ Bnez(temp, &loop);
@@ -2443,14 +3153,21 @@
case TypeCheckKind::kArrayObjectCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Do an exact check.
__ Beq(temp, cls, &done);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
- __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ component_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ Beqz(temp, slow_path->GetEntryLabel());
// Otherwise, the object is indeed an array, further check that this component
@@ -2477,11 +3194,19 @@
// Avoid read barriers to improve performance of the fast path. We can not get false
// positives by doing this.
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
- __ LoadFromOffset(kLoadWord, temp, temp, iftable_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ temp_loc,
+ iftable_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Iftable is never null.
__ Lw(TMP, temp, array_length_offset);
// Loop through the iftable and check if any class matches.
@@ -5032,8 +5757,15 @@
Primitive::Type field_type = field_info.GetFieldType();
bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
bool generate_volatile = field_info.IsVolatile() && is_wide;
+ bool object_field_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
+ instruction,
+ generate_volatile
+ ? LocationSummary::kCallOnMainOnly
+ : (object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall));
locations->SetInAt(0, Location::RequiresRegister());
if (generate_volatile) {
@@ -5054,7 +5786,18 @@
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister());
} else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps in the case of an object field get with
+ // read barriers enabled: we do not want the move to overwrite the
+ // object's location, as we need it to emit the read barrier.
+ locations->SetOut(Location::RequiresRegister(),
+ object_field_get_with_read_barrier
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ }
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
}
}
@@ -5064,7 +5807,9 @@
uint32_t dex_pc) {
Primitive::Type type = field_info.GetFieldType();
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
+ Location dst_loc = locations->Out();
LoadOperandType load_type = kLoadUnsignedByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
@@ -5107,40 +5852,61 @@
CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
if (type == Primitive::kPrimDouble) {
// FP results are returned in core registers. Need to move them.
- Location out = locations->Out();
- if (out.IsFpuRegister()) {
- __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), out.AsFpuRegister<FRegister>());
+ if (dst_loc.IsFpuRegister()) {
+ __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), dst_loc.AsFpuRegister<FRegister>());
__ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
- out.AsFpuRegister<FRegister>());
+ dst_loc.AsFpuRegister<FRegister>());
} else {
- DCHECK(out.IsDoubleStackSlot());
+ DCHECK(dst_loc.IsDoubleStackSlot());
__ StoreToOffset(kStoreWord,
locations->GetTemp(1).AsRegister<Register>(),
SP,
- out.GetStackIndex());
+ dst_loc.GetStackIndex());
__ StoreToOffset(kStoreWord,
locations->GetTemp(2).AsRegister<Register>(),
SP,
- out.GetStackIndex() + 4);
+ dst_loc.GetStackIndex() + 4);
}
}
} else {
- if (!Primitive::IsFloatingPointType(type)) {
+ if (type == Primitive::kPrimNot) {
+ // /* HeapReference<Object> */ dst = *(obj + offset)
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp_loc = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier call.
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ dst_loc,
+ obj,
+ offset,
+ temp_loc,
+ /* needs_null_check */ true);
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ } else {
+ __ LoadFromOffset(kLoadWord, dst_loc.AsRegister<Register>(), obj, offset, null_checker);
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
+ }
+ } else if (!Primitive::IsFloatingPointType(type)) {
Register dst;
if (type == Primitive::kPrimLong) {
- DCHECK(locations->Out().IsRegisterPair());
- dst = locations->Out().AsRegisterPairLow<Register>();
+ DCHECK(dst_loc.IsRegisterPair());
+ dst = dst_loc.AsRegisterPairLow<Register>();
} else {
- DCHECK(locations->Out().IsRegister());
- dst = locations->Out().AsRegister<Register>();
+ DCHECK(dst_loc.IsRegister());
+ dst = dst_loc.AsRegister<Register>();
}
__ LoadFromOffset(load_type, dst, obj, offset, null_checker);
- if (type == Primitive::kPrimNot) {
- __ MaybeUnpoisonHeapReference(dst);
- }
} else {
- DCHECK(locations->Out().IsFpuRegister());
- FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ DCHECK(dst_loc.IsFpuRegister());
+ FRegister dst = dst_loc.AsFpuRegister<FRegister>();
if (type == Primitive::kPrimFloat) {
__ LoadSFromOffset(dst, obj, offset, null_checker);
} else {
@@ -5149,7 +5915,9 @@
}
}
- if (is_volatile) {
+ // Memory barriers, in the case of references, are handled in the
+ // previous switch statement.
+ if (is_volatile && (type != Primitive::kPrimNot)) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
}
@@ -5290,7 +6058,6 @@
}
}
- // TODO: memory barriers?
if (needs_write_barrier) {
Register src = value_location.AsRegister<Register>();
codegen_->MarkGCCard(obj, src, value_can_be_null);
@@ -5320,14 +6087,133 @@
instruction->GetValueCanBeNull());
}
-void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(
- HInstruction* instruction ATTRIBUTE_UNUSED,
- Location root,
- Register obj,
- uint32_t offset) {
+void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
+ Register out_reg = out.AsRegister<Register>();
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
+ DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ out,
+ out_reg,
+ offset,
+ maybe_temp,
+ /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // Save the value of `out` into `maybe_temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ __ Move(maybe_temp.AsRegister<Register>(), out_reg);
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
+ Register out_reg = out.AsRegister<Register>();
+ Register obj_reg = obj.AsRegister<Register>();
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
+ if (kUseBakerReadBarrier) {
+ DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ out,
+ obj_reg,
+ offset,
+ maybe_temp,
+ /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
- if (kEmitCompilerReadBarrier) {
- UNIMPLEMENTED(FATAL) << "for read barrier";
+ if (read_barrier_option == kWithReadBarrier) {
+ DCHECK(kEmitCompilerReadBarrier);
+ if (kUseBakerReadBarrier) {
+ // Fast path implementation of art::ReadBarrier::BarrierForRoot when
+ // Baker's read barrier are used:
+ //
+ // root = obj.field;
+ // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+ // if (temp != null) {
+ // root = temp(root)
+ // }
+
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
+ static_assert(
+ sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+ "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+ "have different sizes.");
+ static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::CompressedReference<mirror::Object> and int32_t "
+ "have different sizes.");
+
+ // Slow path marking the GC root `root`.
+ Location temp = Location::RegisterLocation(T9);
+ SlowPathCodeMIPS* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
+ instruction,
+ root,
+ /*entrypoint*/ temp);
+ codegen_->AddSlowPath(slow_path);
+
+ // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+ const int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
+ // Loading the entrypoint does not require a load acquire since it is only changed when
+ // threads are suspended or running a checkpoint.
+ __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
+ // The entrypoint is null when the GC is not marking, this prevents one load compared to
+ // checking GetIsGcMarking.
+ __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ } else {
+ // GC root loaded through a slow path for read barriers other
+ // than Baker's.
+ // /* GcRoot<mirror::Object>* */ root = obj + offset
+ __ Addiu32(root_reg, obj, offset);
+ // /* mirror::Object* */ root = root->Read()
+ codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ }
} else {
// Plain GC root load with no read barrier.
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -5337,6 +6223,225 @@
}
}
+void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ Location no_index = Location::NoLocation();
+ ScaleFactor no_scale_factor = TIMES_1;
+ GenerateReferenceLoadWithBakerReadBarrier(instruction,
+ ref,
+ obj,
+ offset,
+ no_index,
+ no_scale_factor,
+ temp,
+ needs_null_check);
+}
+
+void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ // /* HeapReference<Object> */ ref =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ ScaleFactor scale_factor = TIMES_4;
+ GenerateReferenceLoadWithBakerReadBarrier(instruction,
+ ref,
+ obj,
+ data_offset,
+ index,
+ scale_factor,
+ temp,
+ needs_null_check);
+}
+
+void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location index,
+ ScaleFactor scale_factor,
+ Location temp,
+ bool needs_null_check,
+ bool always_update_field) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // In slow path based read barriers, the read barrier call is
+ // inserted after the original load. However, in fast path based
+ // Baker's read barriers, we need to perform the load of
+ // mirror::Object::monitor_ *before* the original reference load.
+ // This load-load ordering is required by the read barrier.
+ // The fast path/slow path (for Baker's algorithm) should look like:
+ //
+ // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // HeapReference<Object> ref = *src; // Original reference load.
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
+ // if (is_gray) {
+ // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
+ // }
+ //
+ // Note: the original implementation in ReadBarrier::Barrier is
+ // slightly more complex as it performs additional checks that we do
+ // not do here for performance reasons.
+
+ Register ref_reg = ref.AsRegister<Register>();
+ Register temp_reg = temp.AsRegister<Register>();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
+
+ // /* int32_t */ monitor = obj->monitor_
+ __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ __ Sync(0); // Barrier to prevent load-load reordering.
+
+ // The actual reference load.
+ if (index.IsValid()) {
+ // Load types involving an "index": ArrayGet,
+ // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+ // intrinsics.
+ // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
+ if (index.IsConstant()) {
+ size_t computed_offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
+ __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
+ } else {
+ // Handle the special case of the
+ // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+ // intrinsics, which use a register pair as index ("long
+ // offset"), of which only the low part contains data.
+ Register index_reg = index.IsRegisterPair()
+ ? index.AsRegisterPairLow<Register>()
+ : index.AsRegister<Register>();
+ __ ShiftAndAdd(TMP, index_reg, obj, scale_factor, TMP);
+ __ LoadFromOffset(kLoadWord, ref_reg, TMP, offset);
+ }
+ } else {
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, ref_reg, obj, offset);
+ }
+
+ // Object* ref = ref_addr->AsMirrorPtr()
+ __ MaybeUnpoisonHeapReference(ref_reg);
+
+ // Slow path marking the object `ref` when it is gray.
+ SlowPathCodeMIPS* slow_path;
+ if (always_update_field) {
+ // ReadBarrierMarkAndUpdateFieldSlowPathMIPS only supports address
+ // of the form `obj + field_offset`, where `obj` is a register and
+ // `field_offset` is a register pair (of which only the lower half
+ // is used). Thus `offset` and `scale_factor` above are expected
+ // to be null in this code path.
+ DCHECK_EQ(offset, 0u);
+ DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
+ slow_path = new (GetGraph()->GetArena())
+ ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
+ ref,
+ obj,
+ /* field_offset */ index,
+ temp_reg);
+ } else {
+ slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+ }
+ AddSlowPath(slow_path);
+
+ // if (rb_state == ReadBarrier::GrayState())
+ // ref = ReadBarrier::Mark(ref);
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit into the sign bit (31) and
+ // performing a branch on less than zero.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
+ __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
+ __ Bltz(temp_reg, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // Insert a slow path based read barrier *after* the reference load.
+ //
+ // If heap poisoning is enabled, the unpoisoning of the loaded
+ // reference will be carried out by the runtime within the slow
+ // path.
+ //
+ // Note that `ref` currently does not get unpoisoned (when heap
+ // poisoning is enabled), which is alright as the `ref` argument is
+ // not used by the artReadBarrierSlow entry point.
+ //
+ // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
+ AddSlowPath(slow_path);
+
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorMIPS::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ if (kEmitCompilerReadBarrier) {
+ // Baker's read barriers shall be handled by the fast path
+ // (CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier).
+ DCHECK(!kUseBakerReadBarrier);
+ // If heap poisoning is enabled, unpoisoning will be taken care of
+ // by the runtime within the slow path.
+ GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
+ } else if (kPoisonHeapReferences) {
+ __ UnpoisonHeapReference(out.AsRegister<Register>());
+ }
+}
+
+void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction,
+ Location out,
+ Location root) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // Insert a slow path based read barrier *after* the GC root load.
+ //
+ // Note that GC roots are not affected by heap poisoning, so we do
+ // not need to do anything special for this here.
+ SlowPathCodeMIPS* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+ AddSlowPath(slow_path);
+
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
@@ -5345,7 +6450,8 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = LocationSummary::kNoCall;
+ call_kind =
+ kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5360,14 +6466,20 @@
// The output does overlap inputs.
// Note that TypeCheckSlowPathMIPS uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
}
void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
+ const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ DCHECK_LE(num_temps, 1u);
+ Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5385,8 +6497,12 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -5395,15 +6511,22 @@
case TypeCheckKind::kAbstractClassCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
__ Bind(&loop);
// /* HeapReference<Class> */ out = out->super_class_
- __ LoadFromOffset(kLoadWord, out, out, super_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ super_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ Bne(out, cls, &loop);
@@ -5413,15 +6536,22 @@
case TypeCheckKind::kClassHierarchyCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
MipsLabel loop, success;
__ Bind(&loop);
__ Beq(out, cls, &success);
// /* HeapReference<Class> */ out = out->super_class_
- __ LoadFromOffset(kLoadWord, out, out, super_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ super_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
__ Bnez(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -5432,15 +6562,22 @@
case TypeCheckKind::kArrayObjectCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Do an exact check.
MipsLabel success;
__ Beq(out, cls, &success);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ out = out->component_type_
- __ LoadFromOffset(kLoadWord, out, out, component_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ component_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -5455,8 +6592,12 @@
case TypeCheckKind::kArrayCheck: {
// No read barrier since the slow path will retry upon failure.
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
/* is_fatal */ false);
@@ -5627,9 +6768,6 @@
HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
- if (kEmitCompilerReadBarrier) {
- UNIMPLEMENTED(FATAL) << "for read barrier";
- }
// We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
@@ -5665,9 +6803,6 @@
HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- if (kEmitCompilerReadBarrier) {
- UNIMPLEMENTED(FATAL) << "for read barrier";
- }
// We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
@@ -5916,12 +7051,13 @@
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(V0));
+ calling_convention.GetReturnLocation(Primitive::kPrimNot));
return;
}
DCHECK(!cls->NeedsAccessCheck());
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
@@ -5976,6 +7112,9 @@
break;
}
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5985,11 +7124,13 @@
GenerateGcRootFieldLoad(cls,
out_loc,
base_or_current_method_reg,
- ArtMethod::DeclaringClassOffset().Int32Value());
+ ArtMethod::DeclaringClassOffset().Int32Value(),
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
@@ -5997,6 +7138,7 @@
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
bool reordering = __ SetReorder(false);
@@ -6006,7 +7148,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
uint32_t address = dchecked_integral_cast<uint32_t>(
reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
DCHECK_NE(address, 0u);
@@ -6020,7 +7162,7 @@
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
bool reordering = __ SetReorder(false);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
- GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
__ SetReorder(reordering);
generate_null_check = true;
break;
@@ -6032,7 +7174,7 @@
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
__ Lui(out, /* placeholder */ 0x1234);
- GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
__ SetReorder(reordering);
break;
}
@@ -6165,7 +7307,11 @@
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
bool reordering = __ SetReorder(false);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
- GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(load,
+ out_loc,
+ out,
+ /* placeholder */ 0x5678,
+ kCompilerReadBarrierOption);
__ SetReorder(reordering);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
@@ -6181,7 +7327,11 @@
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
__ Lui(out, /* placeholder */ 0x1234);
- GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(load,
+ out_loc,
+ out,
+ /* placeholder */ 0x5678,
+ kCompilerReadBarrierOption);
__ SetReorder(reordering);
return;
}
@@ -7279,8 +8429,7 @@
// We are in the range of the table.
// Load the target address from the jump table, indexing by the value.
__ LoadLabelAddress(AT, constant_area, table->GetLabel());
- __ Sll(TMP, TMP, 2);
- __ Addu(TMP, TMP, AT);
+ __ ShiftAndAdd(TMP, TMP, AT, 2, TMP);
__ Lw(TMP, TMP, 0);
// Compute the absolute target address by adding the table start address
// (the table contains offsets to targets relative to its start).
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 98fee24..3875c4b 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -241,6 +241,38 @@
uint32_t dex_pc,
bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+
+ // Generate a heap reference load using one register `out`:
+ //
+ // out <- *(out + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ //
+ // Location `maybe_temp` is used when generating a read barrier and
+ // shall be a register in that case; it may be an invalid location
+ // otherwise.
+ void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option);
+ // Generate a heap reference load using two different registers
+ // `out` and `obj`:
+ //
+ // out <- *(obj + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ //
+ // Location `maybe_temp` is used when generating a Baker's (fast
+ // path) read barrier and shall be a register in that case; it may
+ // be an invalid location otherwise.
+ void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option);
+
// Generate a GC root reference load:
//
// root <- *(obj + offset)
@@ -249,7 +281,9 @@
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
Register obj,
- uint32_t offset);
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option);
+
void GenerateIntCompare(IfCondition cond, LocationSummary* locations);
// When the function returns `false` it means that the condition holds if `dst` is non-zero
// and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
@@ -353,6 +387,91 @@
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference array load when Baker's read barriers are used.
+ void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check);
+
+ // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+ // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+ //
+ // Load the object reference located at the address
+ // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+ // `ref`, and mark it if needed.
+ //
+ // If `always_update_field` is true, the value of the reference is
+ // atomically updated in the holder (`obj`).
+ void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location index,
+ ScaleFactor scale_factor,
+ Location temp,
+ bool needs_null_check,
+ bool always_update_field = false);
+
+ // Generate a read barrier for a heap reference within `instruction`
+ // using a slow path.
+ //
+ // A read barrier for an object reference read from the heap is
+ // implemented as a call to the artReadBarrierSlow runtime entry
+ // point, which is passed the values in locations `ref`, `obj`, and
+ // `offset`:
+ //
+ // mirror::Object* artReadBarrierSlow(mirror::Object* ref,
+ // mirror::Object* obj,
+ // uint32_t offset);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierSlow.
+ //
+ // When `index` is provided (i.e. for array accesses), the offset
+ // value passed to artReadBarrierSlow is adjusted to take `index`
+ // into account.
+ void GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // If read barriers are enabled, generate a read barrier for a heap
+ // reference using a slow path. If heap poisoning is enabled, also
+ // unpoison the reference in `out`.
+ void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // Generate a read barrier for a GC root within `instruction` using
+ // a slow path.
+ //
+ // A read barrier for an object reference GC root is implemented as
+ // a call to the artReadBarrierForRootSlow runtime entry point,
+ // which is passed the value in location `root`:
+ //
+ // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierForRootSlow.
+ void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
+
void MarkGCCard(Register object, Register value, bool value_can_be_null);
// Register allocation.
@@ -400,6 +519,15 @@
uint32_t dex_pc,
SlowPathCode* slow_path = nullptr) OVERRIDE;
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path,
+ bool direct);
+
+ void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
+
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5246dbc..19250c6 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -407,6 +407,528 @@
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
};
+class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(
+ locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(2),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimNot,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ mips64_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ RestoreLiveRegisters(codegen, locations);
+ __ Bc(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
+};
+
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
+//
+// If `entrypoint` is a valid location it is assumed to already be
+// holding the entrypoint. The case where the entrypoint is passed in
+// is for the GcRoot read barrier.
+class ReadBarrierMarkSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ ReadBarrierMarkSlowPathMIPS64(HInstruction* instruction,
+ Location ref,
+ Location entrypoint = Location::NoLocation())
+ : SlowPathCodeMIPS64(instruction), ref_(ref), entrypoint_(entrypoint) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsArraySet() ||
+ instruction_->IsLoadClass() ||
+ instruction_->IsLoadString() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast() ||
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
+ (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier marking slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ DCHECK((V0 <= ref_reg && ref_reg <= T2) ||
+ (S2 <= ref_reg && ref_reg <= S7) ||
+ (ref_reg == S8)) << ref_reg;
+ // "Compact" slow path, saving two moves.
+ //
+ // Instead of using the standard runtime calling convention (input
+ // and output in A0 and V0 respectively):
+ //
+ // A0 <- ref
+ // V0 <- ReadBarrierMark(A0)
+ // ref <- V0
+ //
+ // we just use rX (the register containing `ref`) as input and output
+ // of a dedicated entrypoint:
+ //
+ // rX <- ReadBarrierMarkRegX(rX)
+ //
+ if (entrypoint_.IsValid()) {
+ mips64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
+ DCHECK_EQ(entrypoint_.AsRegister<GpuRegister>(), T9);
+ __ Jalr(entrypoint_.AsRegister<GpuRegister>());
+ __ Nop();
+ } else {
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(ref_reg - 1);
+ // This runtime call does not require a stack map.
+ mips64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
+ instruction_,
+ this);
+ }
+ __ Bc(GetExitLabel());
+ }
+
+ private:
+ // The location (register) of the marked object reference.
+ const Location ref_;
+
+ // The location of the entrypoint if already loaded.
+ const Location entrypoint_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS64);
+};
+
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathMIPS64 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ Location field_offset,
+ GpuRegister temp1)
+ : SlowPathCodeMIPS64(instruction),
+ ref_(ref),
+ obj_(obj),
+ field_offset_(field_offset),
+ temp1_(temp1) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE {
+ return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+ // This slow path is only used by the UnsafeCASObject intrinsic.
+ DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier marking and field updating slow path: "
+ << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+ DCHECK(field_offset_.IsRegister()) << field_offset_;
+
+ __ Bind(GetEntryLabel());
+
+ // Save the old reference.
+ // Note that we cannot use AT or TMP to save the old reference, as those
+ // are used by the code that follows, but we need the old reference after
+ // the call to the ReadBarrierMarkRegX entry point.
+ DCHECK_NE(temp1_, AT);
+ DCHECK_NE(temp1_, TMP);
+ __ Move(temp1_, ref_reg);
+
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ DCHECK((V0 <= ref_reg && ref_reg <= T2) ||
+ (S2 <= ref_reg && ref_reg <= S7) ||
+ (ref_reg == S8)) << ref_reg;
+ // "Compact" slow path, saving two moves.
+ //
+ // Instead of using the standard runtime calling convention (input
+ // and output in A0 and V0 respectively):
+ //
+ // A0 <- ref
+ // V0 <- ReadBarrierMark(A0)
+ // ref <- V0
+ //
+ // we just use rX (the register containing `ref`) as input and output
+ // of a dedicated entrypoint:
+ //
+ // rX <- ReadBarrierMarkRegX(rX)
+ //
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(ref_reg - 1);
+ // This runtime call does not require a stack map.
+ mips64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
+ instruction_,
+ this);
+
+ // If the new reference is different from the old reference,
+ // update the field in the holder (`*(obj_ + field_offset_)`).
+ //
+ // Note that this field could also hold a different object, if
+ // another thread had concurrently changed it. In that case, the
+ // the compare-and-set (CAS) loop below would abort, leaving the
+ // field as-is.
+ Mips64Label done;
+ __ Beqc(temp1_, ref_reg, &done);
+
+ // Update the the holder's field atomically. This may fail if
+ // mutator updates before us, but it's OK. This is achieved
+ // using a strong compare-and-set (CAS) operation with relaxed
+ // memory synchronization ordering, where the expected value is
+ // the old reference and the desired value is the new reference.
+
+ // Convenience aliases.
+ GpuRegister base = obj_;
+ GpuRegister offset = field_offset_.AsRegister<GpuRegister>();
+ GpuRegister expected = temp1_;
+ GpuRegister value = ref_reg;
+ GpuRegister tmp_ptr = TMP; // Pointer to actual memory.
+ GpuRegister tmp = AT; // Value in memory.
+
+ __ Daddu(tmp_ptr, base, offset);
+
+ if (kPoisonHeapReferences) {
+ __ PoisonHeapReference(expected);
+ // Do not poison `value` if it is the same register as
+ // `expected`, which has just been poisoned.
+ if (value != expected) {
+ __ PoisonHeapReference(value);
+ }
+ }
+
+ // do {
+ // tmp = [r_ptr] - expected;
+ // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
+
+ Mips64Label loop_head, exit_loop;
+ __ Bind(&loop_head);
+ __ Ll(tmp, tmp_ptr);
+ // The LL instruction sign-extends the 32-bit value, but
+ // 32-bit references must be zero-extended. Zero-extend `tmp`.
+ __ Dext(tmp, tmp, 0, 32);
+ __ Bnec(tmp, expected, &exit_loop);
+ __ Move(tmp, value);
+ __ Sc(tmp, tmp_ptr);
+ __ Beqzc(tmp, &loop_head);
+ __ Bind(&exit_loop);
+
+ if (kPoisonHeapReferences) {
+ __ UnpoisonHeapReference(expected);
+ // Do not unpoison `value` if it is the same register as
+ // `expected`, which has just been unpoisoned.
+ if (value != expected) {
+ __ UnpoisonHeapReference(value);
+ }
+ }
+
+ __ Bind(&done);
+ __ Bc(GetExitLabel());
+ }
+
+ private:
+ // The location (register) of the marked object reference.
+ const Location ref_;
+ // The register containing the object holding the marked object reference field.
+ const GpuRegister obj_;
+ // The location of the offset of the marked reference field within `obj_`.
+ Location field_offset_;
+
+ const GpuRegister temp1_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS64);
+};
+
+// Slow path generating a read barrier for a heap reference.
+class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ ReadBarrierForHeapReferenceSlowPathMIPS64(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index)
+ : SlowPathCodeMIPS64(instruction),
+ out_(out),
+ ref_(ref),
+ obj_(obj),
+ offset_(offset),
+ index_(index) {
+ DCHECK(kEmitCompilerReadBarrier);
+ // If `obj` is equal to `out` or `ref`, it means the initial object
+ // has been overwritten by (or after) the heap object reference load
+ // to be instrumented, e.g.:
+ //
+ // __ LoadFromOffset(kLoadWord, out, out, offset);
+ // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
+ //
+ // In that case, we have lost the information about the original
+ // object, and the emitted read barrier cannot work properly.
+ DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
+ DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ Primitive::Type type = Primitive::kPrimNot;
+ GpuRegister reg_out = out_.AsRegister<GpuRegister>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast() ||
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier for heap reference slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We may have to change the index's value, but as `index_` is a
+ // constant member (like other "inputs" of this slow path),
+ // introduce a copy of it, `index`.
+ Location index = index_;
+ if (index_.IsValid()) {
+ // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
+ if (instruction_->IsArrayGet()) {
+ // Compute the actual memory offset and store it in `index`.
+ GpuRegister index_reg = index_.AsRegister<GpuRegister>();
+ DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
+ if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
+ // We are about to change the value of `index_reg` (see the
+ // calls to art::mips64::Mips64Assembler::Sll and
+ // art::mips64::MipsAssembler::Addiu32 below), but it has
+ // not been saved by the previous call to
+ // art::SlowPathCode::SaveLiveRegisters, as it is a
+ // callee-save register --
+ // art::SlowPathCode::SaveLiveRegisters does not consider
+ // callee-save registers, as it has been designed with the
+ // assumption that callee-save registers are supposed to be
+ // handled by the called function. So, as a callee-save
+ // register, `index_reg` _would_ eventually be saved onto
+ // the stack, but it would be too late: we would have
+ // changed its value earlier. Therefore, we manually save
+ // it here into another freely available register,
+ // `free_reg`, chosen of course among the caller-save
+ // registers (as a callee-save `free_reg` register would
+ // exhibit the same problem).
+ //
+ // Note we could have requested a temporary register from
+ // the register allocator instead; but we prefer not to, as
+ // this is a slow path, and we know we can find a
+ // caller-save register that is available.
+ GpuRegister free_reg = FindAvailableCallerSaveRegister(codegen);
+ __ Move(free_reg, index_reg);
+ index_reg = free_reg;
+ index = Location::RegisterLocation(index_reg);
+ } else {
+ // The initial register stored in `index_` has already been
+ // saved in the call to art::SlowPathCode::SaveLiveRegisters
+ // (as it is not a callee-save register), so we can freely
+ // use it.
+ }
+ // Shifting the index value contained in `index_reg` by the scale
+ // factor (2) cannot overflow in practice, as the runtime is
+ // unable to allocate object arrays with a size larger than
+ // 2^26 - 1 (that is, 2^28 - 4 bytes).
+ __ Sll(index_reg, index_reg, TIMES_4);
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ __ Addiu32(index_reg, index_reg, offset_);
+ } else {
+ // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
+ // intrinsics, `index_` is not shifted by a scale factor of 2
+ // (as in the case of ArrayGet), as it is actually an offset
+ // to an object field within an object.
+ DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
+ (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
+ << instruction_->AsInvoke()->GetIntrinsic();
+ DCHECK_EQ(offset_, 0U);
+ DCHECK(index_.IsRegister());
+ }
+ }
+
+ // We're moving two or three locations to locations that could
+ // overlap, so we need a parallel move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(ref_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(obj_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot,
+ nullptr);
+ if (index.IsValid()) {
+ parallel_move.AddMove(index,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimInt,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ } else {
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
+ }
+ mips64_codegen->InvokeRuntime(kQuickReadBarrierSlow,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<
+ kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
+ mips64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
+
+ RestoreLiveRegisters(codegen, locations);
+ __ Bc(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE {
+ return "ReadBarrierForHeapReferenceSlowPathMIPS64";
+ }
+
+ private:
+ GpuRegister FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
+ size_t ref = static_cast<int>(ref_.AsRegister<GpuRegister>());
+ size_t obj = static_cast<int>(obj_.AsRegister<GpuRegister>());
+ for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (i != ref &&
+ i != obj &&
+ !codegen->IsCoreCalleeSaveRegister(i) &&
+ !codegen->IsBlockedCoreRegister(i)) {
+ return static_cast<GpuRegister>(i);
+ }
+ }
+ // We shall never fail to find a free caller-save register, as
+ // there are more than two core caller-save registers on MIPS64
+ // (meaning it is possible to find one which is different from
+ // `ref` and `obj`).
+ DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
+ LOG(FATAL) << "Could not find a free caller-save register";
+ UNREACHABLE();
+ }
+
+ const Location out_;
+ const Location ref_;
+ const Location obj_;
+ const uint32_t offset_;
+ // An additional location containing an index to an array.
+ // Only used for HArrayGet and the UnsafeGetObject &
+ // UnsafeGetObjectVolatile intrinsics.
+ const Location index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS64);
+};
+
+// Slow path generating a read barrier for a GC root.
+class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ ReadBarrierForRootSlowPathMIPS64(HInstruction* instruction, Location out, Location root)
+ : SlowPathCodeMIPS64(instruction), out_(out), root_(root) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Primitive::Type type = Primitive::kPrimNot;
+ GpuRegister reg_out = out_.AsRegister<GpuRegister>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
+ << "Unexpected instruction in read barrier for GC root slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ root_,
+ Primitive::kPrimNot);
+ mips64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
+ mips64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
+
+ RestoreLiveRegisters(codegen, locations);
+ __ Bc(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; }
+
+ private:
+ const Location out_;
+ const Location root_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS64);
+};
+
CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
const Mips64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
@@ -558,26 +1080,21 @@
return;
}
- // Make sure the frame size isn't unreasonably large. Per the various APIs
- // it looks like it should always be less than 2GB in size, which allows
- // us using 32-bit signed offsets from the stack pointer.
- if (GetFrameSize() > 0x7FFFFFFF)
- LOG(FATAL) << "Stack frame larger than 2GB";
+ // Make sure the frame size isn't unreasonably large.
+ if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
+ LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+ }
// Spill callee-saved registers.
- // Note that their cumulative size is small and they can be indexed using
- // 16-bit offsets.
- // TODO: increment/decrement SP in one step instead of two or remove this comment.
-
- uint32_t ofs = FrameEntrySpillSize();
+ uint32_t ofs = GetFrameSize();
__ IncreaseFrameSize(ofs);
for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
GpuRegister reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
ofs -= kMips64DoublewordSize;
- __ Sd(reg, SP, ofs);
+ __ StoreToOffset(kStoreDoubleword, reg, SP, ofs);
__ cfi().RelOffset(DWARFReg(reg), ofs);
}
}
@@ -586,23 +1103,16 @@
FpuRegister reg = kFpuCalleeSaves[i];
if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
ofs -= kMips64DoublewordSize;
- __ Sdc1(reg, SP, ofs);
+ __ StoreFpuToOffset(kStoreDoubleword, reg, SP, ofs);
__ cfi().RelOffset(DWARFReg(reg), ofs);
}
}
- // Allocate the rest of the frame and store the current method pointer
- // at its end.
-
- __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
-
// Save the current method if we need it. Note that we do not
// do this in HCurrentMethod, as the instruction might have been removed
// in the SSA graph.
if (RequiresCurrentMethod()) {
- static_assert(IsInt<16>(kCurrentMethodStackOffset),
- "kCurrentMethodStackOffset must fit into int16_t");
- __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ __ StoreToOffset(kStoreDoubleword, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
if (GetGraph()->HasShouldDeoptimizeFlag()) {
@@ -615,42 +1125,32 @@
__ cfi().RememberState();
if (!HasEmptyFrame()) {
- // Deallocate the rest of the frame.
-
- __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
-
// Restore callee-saved registers.
- // Note that their cumulative size is small and they can be indexed using
- // 16-bit offsets.
- // TODO: increment/decrement SP in one step instead of two or remove this comment.
-
- uint32_t ofs = 0;
-
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- FpuRegister reg = kFpuCalleeSaves[i];
- if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
- __ Ldc1(reg, SP, ofs);
- ofs += kMips64DoublewordSize;
- __ cfi().Restore(DWARFReg(reg));
- }
- }
-
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ // For better instruction scheduling restore RA before other registers.
+ uint32_t ofs = GetFrameSize();
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
GpuRegister reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
- __ Ld(reg, SP, ofs);
- ofs += kMips64DoublewordSize;
+ ofs -= kMips64DoublewordSize;
+ __ LoadFromOffset(kLoadDoubleword, reg, SP, ofs);
__ cfi().Restore(DWARFReg(reg));
}
}
- DCHECK_EQ(ofs, FrameEntrySpillSize());
- __ DecreaseFrameSize(ofs);
+ for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+ FpuRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ ofs -= kMips64DoublewordSize;
+ __ LoadFpuFromOffset(kLoadDoubleword, reg, SP, ofs);
+ __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ __ DecreaseFrameSize(GetFrameSize());
}
- __ Jr(RA);
- __ Nop();
+ __ Jic(RA, 0);
__ cfi().RestoreState();
__ cfi().DefCFAOffset(GetFrameSize());
@@ -1162,23 +1662,32 @@
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
- __ LoadFromOffset(kLoadDoubleword,
- T9,
- TR,
- GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value());
- __ Jalr(T9);
- __ Nop();
+ GenerateInvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
}
}
+void CodeGeneratorMIPS64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset);
+}
+
+void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
+ __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
+ __ Jalr(T9);
+ __ Nop();
+}
+
void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
GpuRegister class_reg) {
__ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
__ LoadConst32(AT, mirror::Class::kStatusInitialized);
__ Bltc(TMP, AT, slow_path->GetEntryLabel());
- // TODO: barrier needed?
+ // Even if the initialized flag is set, we need to ensure consistent memory ordering.
+ __ Sync(0);
__ Bind(slow_path->GetExitLabel());
}
@@ -1469,14 +1978,31 @@
}
void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
+ Primitive::Type type = instruction->GetType();
+ bool object_array_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (type == Primitive::kPrimNot);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ if (Primitive::IsFloatingPointType(type)) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps in the case of an object array get with
+ // read barriers enabled: we do not want the move to overwrite the
+ // array's location, as we need it to emit the read barrier.
+ locations->SetOut(Location::RequiresRegister(),
+ object_array_get_with_read_barrier
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ }
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier.
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->AddTemp(Location::RequiresRegister());
}
}
@@ -1489,7 +2015,9 @@
void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
+ Location out_loc = locations->Out();
Location index = locations->InAt(1);
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
auto null_checker = GetImplicitNullChecker(instruction, codegen_);
@@ -1499,7 +2027,7 @@
instruction->IsStringCharAt();
switch (type) {
case Primitive::kPrimBoolean: {
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
@@ -1512,7 +2040,7 @@
}
case Primitive::kPrimByte: {
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
@@ -1525,21 +2053,20 @@
}
case Primitive::kPrimShort: {
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_2);
__ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
}
break;
}
case Primitive::kPrimChar: {
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (maybe_compressed_char_at) {
uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
__ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
@@ -1579,73 +2106,110 @@
__ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
__ Bc(&done);
__ Bind(&uncompressed_load);
- __ Dsll(TMP, index_reg, TIMES_2);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index_reg, obj, TIMES_2);
__ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
__ Bind(&done);
} else {
- __ Dsll(TMP, index_reg, TIMES_2);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index_reg, obj, TIMES_2);
__ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
}
}
break;
}
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
+ case Primitive::kPrimInt: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFromOffset(load_type, out, obj, offset, null_checker);
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
__ LoadFromOffset(load_type, out, TMP, data_offset, null_checker);
}
break;
}
+ case Primitive::kPrimNot: {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ // /* HeapReference<Object> */ out =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier call.
+ codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
+ out_loc,
+ obj,
+ data_offset,
+ index,
+ temp,
+ /* needs_null_check */ true);
+ } else {
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, offset, null_checker);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+ } else {
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
+ __ LoadFromOffset(kLoadUnsignedWord, out, TMP, data_offset, null_checker);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction,
+ out_loc,
+ out_loc,
+ obj_loc,
+ data_offset,
+ index);
+ }
+ }
+ break;
+ }
+
case Primitive::kPrimLong: {
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_8);
__ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
}
case Primitive::kPrimFloat: {
- FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister out = out_loc.AsFpuRegister<FpuRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFpuFromOffset(kLoadWord, out, obj, offset, null_checker);
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
__ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
}
break;
}
case Primitive::kPrimDouble: {
- FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister out = out_loc.AsFpuRegister<FpuRegister>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
- __ Daddu(TMP, obj, TMP);
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_8);
__ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
@@ -1655,11 +2219,6 @@
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
-
- if (type == Primitive::kPrimNot) {
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
- __ MaybeUnpoisonHeapReference(out);
- }
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
@@ -1701,23 +2260,28 @@
}
void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
- bool needs_runtime_call = instruction->NeedsTypeCheck();
+ Primitive::Type value_type = instruction->GetComponentType();
+
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- needs_runtime_call ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
- if (needs_runtime_call) {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ may_need_runtime_call_for_type_check ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
} else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
- locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
- } else {
- locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
- }
+ locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
+ }
+ if (needs_write_barrier) {
+ // Temporary register for the write barrier.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
}
}
@@ -1727,7 +2291,7 @@
Location index = locations->InAt(1);
Location value_location = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
- bool needs_runtime_call = locations->WillCall();
+ bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
auto null_checker = GetImplicitNullChecker(instruction, codegen_);
@@ -1758,8 +2322,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
} else {
- __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_2);
- __ Daddu(base_reg, obj, base_reg);
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_2);
}
if (value_location.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
@@ -1771,68 +2334,134 @@
break;
}
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ } else {
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ GpuRegister value = value_location.AsRegister<GpuRegister>();
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ }
+ break;
+ }
+
case Primitive::kPrimNot: {
- if (!needs_runtime_call) {
+ if (value_location.IsConstant()) {
+ // Just setting null.
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- DCHECK(index.IsRegister()) << index;
- __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_4);
- __ Daddu(base_reg, obj, base_reg);
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
}
- if (value_location.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
- __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
- DCHECK(!needs_write_barrier);
- } else {
- GpuRegister value = value_location.AsRegister<GpuRegister>();
- if (kPoisonHeapReferences && needs_write_barrier) {
- // Note that in the case where `value` is a null reference,
- // we do not enter this block, as a null reference does not
- // need poisoning.
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- // Use Sw() instead of StoreToOffset() in order to be able to
- // hold the poisoned reference in AT and thus avoid allocating
- // yet another temporary register.
- if (index.IsConstant()) {
- if (!IsInt<16>(static_cast<int32_t>(data_offset))) {
- int16_t low16 = Low16Bits(data_offset);
- // For consistency with StoreToOffset() and such treat data_offset as int32_t.
- uint64_t high48 = static_cast<uint64_t>(static_cast<int32_t>(data_offset)) - low16;
- int16_t upper16 = High16Bits(high48);
- // Allow the full [-2GB,+2GB) range in case `low16` is negative and needs a
- // compensatory 64KB added, which may push `high48` above 2GB and require
- // the dahi instruction.
- int16_t higher16 = High32Bits(high48) + ((upper16 < 0) ? 1 : 0);
- __ Daui(TMP, obj, upper16);
- if (higher16 != 0) {
- __ Dahi(TMP, higher16);
- }
- base_reg = TMP;
- data_offset = low16;
- }
- } else {
- DCHECK(IsInt<16>(static_cast<int32_t>(data_offset)));
- }
- __ PoisonHeapReference(AT, value);
- __ Sw(AT, base_reg, data_offset);
- null_checker();
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ DCHECK_EQ(value, 0);
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ DCHECK(!needs_write_barrier);
+ DCHECK(!may_need_runtime_call_for_type_check);
+ break;
+ }
+
+ DCHECK(needs_write_barrier);
+ GpuRegister value = value_location.AsRegister<GpuRegister>();
+ GpuRegister temp1 = locations->GetTemp(0).AsRegister<GpuRegister>();
+ GpuRegister temp2 = TMP; // Doesn't need to survive slow path.
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ Mips64Label done;
+ SlowPathCodeMIPS64* slow_path = nullptr;
+
+ if (may_need_runtime_call_for_type_check) {
+ slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (instruction->GetValueCanBeNull()) {
+ Mips64Label non_zero;
+ __ Bnezc(value, &non_zero);
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
}
- if (needs_write_barrier) {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
- }
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ __ Bc(&done);
+ __ Bind(&non_zero);
}
+
+ // Note that when read barriers are enabled, the type checks
+ // are performed without read barriers. This is fine, even in
+ // the case where a class object is in the from-space after
+ // the flip, as a comparison involving such a type would not
+ // produce a false positive; it may of course produce a false
+ // negative, in which case we would take the ArraySet slow
+ // path.
+
+ // /* HeapReference<Class> */ temp1 = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp1, obj, class_offset, null_checker);
+ __ MaybeUnpoisonHeapReference(temp1);
+
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ LoadFromOffset(kLoadUnsignedWord, temp1, temp1, component_offset);
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp2, value, class_offset);
+ // If heap poisoning is enabled, no need to unpoison `temp1`
+ // nor `temp2`, as we are comparing two poisoned references.
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ Mips64Label do_put;
+ __ Beqc(temp1, temp2, &do_put);
+ // If heap poisoning is enabled, the `temp1` reference has
+ // not been unpoisoned yet; unpoison it now.
+ __ MaybeUnpoisonHeapReference(temp1);
+
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ LoadFromOffset(kLoadUnsignedWord, temp1, temp1, super_offset);
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp1`, as we are comparing against null below.
+ __ Bnezc(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ Bnec(temp1, temp2, slow_path->GetEntryLabel());
+ }
+ }
+
+ GpuRegister source = value;
+ if (kPoisonHeapReferences) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ __ Move(temp1, value);
+ __ PoisonHeapReference(temp1);
+ source = temp1;
+ }
+
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- // Note: if heap poisoning is enabled, pAputObject takes care
- // of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
+ }
+ __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
+
+ if (!may_need_runtime_call_for_type_check) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
}
break;
}
@@ -1842,8 +2471,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_8);
- __ Daddu(base_reg, obj, base_reg);
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_8);
}
if (value_location.IsConstant()) {
int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
@@ -1860,8 +2488,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_4);
- __ Daddu(base_reg, obj, base_reg);
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
}
if (value_location.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
@@ -1878,8 +2505,7 @@
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_8);
- __ Daddu(base_reg, obj, base_reg);
+ __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_8);
}
if (value_location.IsConstant()) {
int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
@@ -1922,6 +2548,23 @@
__ Bgeuc(index, length, slow_path->GetEntryLabel());
}
+// Temp is used for read barrier.
+static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
+ if (kEmitCompilerReadBarrier &&
+ (kUseBakerReadBarrier ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ return 1;
+ }
+ return 0;
+}
+
+// Extra temp is used for read barrier.
+static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+ return 1 + NumberOfInstanceOfTemps(type_check_kind);
+}
+
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
@@ -1932,7 +2575,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = throws_into_catch
+ call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
break;
@@ -1946,15 +2589,20 @@
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
}
void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
- GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
+ Location temp_loc = locations->GetTemp(0);
+ GpuRegister temp = temp_loc.AsRegister<GpuRegister>();
+ const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ DCHECK_LE(num_temps, 2u);
+ Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -1991,8 +2639,12 @@
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kArrayCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Jump to slow path for throwing the exception or doing a
// more involved array check.
__ Bnec(temp, cls, slow_path->GetEntryLabel());
@@ -2001,15 +2653,22 @@
case TypeCheckKind::kAbstractClassCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
- __ LoadFromOffset(kLoadUnsignedWord, temp, temp, super_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ super_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
__ Beqzc(temp, slow_path->GetEntryLabel());
@@ -2020,15 +2679,22 @@
case TypeCheckKind::kClassHierarchyCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
Mips64Label loop;
__ Bind(&loop);
__ Beqc(temp, cls, &done);
// /* HeapReference<Class> */ temp = temp->super_class_
- __ LoadFromOffset(kLoadUnsignedWord, temp, temp, super_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ super_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception. Otherwise, jump to the beginning of the loop.
__ Bnezc(temp, &loop);
@@ -2038,14 +2704,21 @@
case TypeCheckKind::kArrayObjectCheck: {
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Do an exact check.
__ Beqc(temp, cls, &done);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
- __ LoadFromOffset(kLoadUnsignedWord, temp, temp, component_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ component_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ Beqzc(temp, slow_path->GetEntryLabel());
// Otherwise, the object is indeed an array, further check that this component
@@ -2072,11 +2745,19 @@
// Avoid read barriers to improve performance of the fast path. We can not get false
// positives by doing this.
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
- __ LoadFromOffset(kLoadUnsignedWord, temp, temp, iftable_offset);
- __ MaybeUnpoisonHeapReference(temp);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ temp_loc,
+ iftable_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// Iftable is never null.
__ Lw(TMP, temp, array_length_offset);
// Loop through the iftable and check if any class matches.
@@ -3292,14 +3973,31 @@
}
void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
- const FieldInfo& field_info ATTRIBUTE_UNUSED) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool object_field_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister());
} else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps in the case of an object field get with
+ // read barriers enabled: we do not want the move to overwrite the
+ // object's location, as we need it to emit the read barrier.
+ locations->SetOut(Location::RequiresRegister(),
+ object_field_get_with_read_barrier
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ }
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
}
@@ -3307,8 +4005,11 @@
const FieldInfo& field_info) {
Primitive::Type type = field_info.GetFieldType();
LocationSummary* locations = instruction->GetLocations();
- GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
+ Location dst_loc = locations->Out();
LoadOperandType load_type = kLoadUnsignedByte;
+ bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
auto null_checker = GetImplicitNullChecker(instruction, codegen_);
@@ -3341,19 +4042,46 @@
UNREACHABLE();
}
if (!Primitive::IsFloatingPointType(type)) {
- DCHECK(locations->Out().IsRegister());
- GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
+ DCHECK(dst_loc.IsRegister());
+ GpuRegister dst = dst_loc.AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimNot) {
+ // /* HeapReference<Object> */ dst = *(obj + offset)
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp_loc = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier call.
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ dst_loc,
+ obj,
+ offset,
+ temp_loc,
+ /* needs_null_check */ true);
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ } else {
+ __ LoadFromOffset(kLoadUnsignedWord, dst, obj, offset, null_checker);
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
+ }
+ } else {
+ __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
+ }
} else {
- DCHECK(locations->Out().IsFpuRegister());
- FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ DCHECK(dst_loc.IsFpuRegister());
+ FpuRegister dst = dst_loc.AsFpuRegister<FpuRegister>();
__ LoadFpuFromOffset(load_type, dst, obj, offset, null_checker);
}
- // TODO: memory barrier?
- if (type == Primitive::kPrimNot) {
- GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- __ MaybeUnpoisonHeapReference(dst);
+ // Memory barriers, in the case of references, are handled in the
+ // previous switch statement.
+ if (is_volatile && (type != Primitive::kPrimNot)) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
}
@@ -3377,6 +4105,7 @@
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
Location value_location = locations->InAt(1);
StoreOperandType store_type = kStoreByte;
+ bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
auto null_checker = GetImplicitNullChecker(instruction, codegen_);
@@ -3404,6 +4133,10 @@
UNREACHABLE();
}
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ }
+
if (value_location.IsConstant()) {
int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
__ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
@@ -3427,12 +4160,16 @@
__ StoreFpuToOffset(store_type, src, obj, offset, null_checker);
}
}
- // TODO: memory barriers?
+
if (needs_write_barrier) {
DCHECK(value_location.IsRegister());
GpuRegister src = value_location.AsRegister<GpuRegister>();
codegen_->MarkGCCard(obj, src, value_can_be_null);
}
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
}
void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -3451,14 +4188,134 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
+ GpuRegister out_reg = out.AsRegister<GpuRegister>();
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
+ DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ out,
+ out_reg,
+ offset,
+ maybe_temp,
+ /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // Save the value of `out` into `maybe_temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ __ Move(maybe_temp.AsRegister<GpuRegister>(), out_reg);
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ LoadFromOffset(kLoadUnsignedWord, out_reg, out_reg, offset);
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ LoadFromOffset(kLoadUnsignedWord, out_reg, out_reg, offset);
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
+ GpuRegister out_reg = out.AsRegister<GpuRegister>();
+ GpuRegister obj_reg = obj.AsRegister<GpuRegister>();
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
+ if (kUseBakerReadBarrier) {
+ DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ out,
+ obj_reg,
+ offset,
+ maybe_temp,
+ /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ LoadFromOffset(kLoadUnsignedWord, out_reg, obj_reg, offset);
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ LoadFromOffset(kLoadUnsignedWord, out_reg, obj_reg, offset);
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
- HInstruction* instruction ATTRIBUTE_UNUSED,
+ HInstruction* instruction,
Location root,
GpuRegister obj,
- uint32_t offset) {
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option) {
GpuRegister root_reg = root.AsRegister<GpuRegister>();
- if (kEmitCompilerReadBarrier) {
- UNIMPLEMENTED(FATAL) << "for read barrier";
+ if (read_barrier_option == kWithReadBarrier) {
+ DCHECK(kEmitCompilerReadBarrier);
+ if (kUseBakerReadBarrier) {
+ // Fast path implementation of art::ReadBarrier::BarrierForRoot when
+ // Baker's read barrier are used:
+ //
+ // root = obj.field;
+ // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+ // if (temp != null) {
+ // root = temp(root)
+ // }
+
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
+ static_assert(
+ sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+ "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+ "have different sizes.");
+ static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::CompressedReference<mirror::Object> and int32_t "
+ "have different sizes.");
+
+ // Slow path marking the GC root `root`.
+ Location temp = Location::RegisterLocation(T9);
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
+ instruction,
+ root,
+ /*entrypoint*/ temp);
+ codegen_->AddSlowPath(slow_path);
+
+ // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+ const int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(root.reg() - 1);
+ // Loading the entrypoint does not require a load acquire since it is only changed when
+ // threads are suspended or running a checkpoint.
+ __ LoadFromOffset(kLoadDoubleword, temp.AsRegister<GpuRegister>(), TR, entry_point_offset);
+ // The entrypoint is null when the GC is not marking, this prevents one load compared to
+ // checking GetIsGcMarking.
+ __ Bnezc(temp.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ } else {
+ // GC root loaded through a slow path for read barriers other
+ // than Baker's.
+ // /* GcRoot<mirror::Object>* */ root = obj + offset
+ __ Daddiu64(root_reg, obj, static_cast<int32_t>(offset));
+ // /* mirror::Object* */ root = root->Read()
+ codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ }
} else {
// Plain GC root load with no read barrier.
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -3468,6 +4325,222 @@
}
}
+void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ Location no_index = Location::NoLocation();
+ ScaleFactor no_scale_factor = TIMES_1;
+ GenerateReferenceLoadWithBakerReadBarrier(instruction,
+ ref,
+ obj,
+ offset,
+ no_index,
+ no_scale_factor,
+ temp,
+ needs_null_check);
+}
+
+void CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ // /* HeapReference<Object> */ ref =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ ScaleFactor scale_factor = TIMES_4;
+ GenerateReferenceLoadWithBakerReadBarrier(instruction,
+ ref,
+ obj,
+ data_offset,
+ index,
+ scale_factor,
+ temp,
+ needs_null_check);
+}
+
+void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ uint32_t offset,
+ Location index,
+ ScaleFactor scale_factor,
+ Location temp,
+ bool needs_null_check,
+ bool always_update_field) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // In slow path based read barriers, the read barrier call is
+ // inserted after the original load. However, in fast path based
+ // Baker's read barriers, we need to perform the load of
+ // mirror::Object::monitor_ *before* the original reference load.
+ // This load-load ordering is required by the read barrier.
+ // The fast path/slow path (for Baker's algorithm) should look like:
+ //
+ // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // HeapReference<Object> ref = *src; // Original reference load.
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
+ // if (is_gray) {
+ // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
+ // }
+ //
+ // Note: the original implementation in ReadBarrier::Barrier is
+ // slightly more complex as it performs additional checks that we do
+ // not do here for performance reasons.
+
+ GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
+ GpuRegister temp_reg = temp.AsRegister<GpuRegister>();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
+
+ // /* int32_t */ monitor = obj->monitor_
+ __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ __ Sync(0); // Barrier to prevent load-load reordering.
+
+ // The actual reference load.
+ if (index.IsValid()) {
+ // Load types involving an "index": ArrayGet,
+ // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+ // intrinsics.
+ // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
+ if (index.IsConstant()) {
+ size_t computed_offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
+ __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, computed_offset);
+ } else {
+ GpuRegister index_reg = index.AsRegister<GpuRegister>();
+ if (scale_factor == TIMES_1) {
+ __ Daddu(TMP, index_reg, obj);
+ } else {
+ __ Dlsa(TMP, index_reg, obj, scale_factor);
+ }
+ __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, offset);
+ }
+ } else {
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, offset);
+ }
+
+ // Object* ref = ref_addr->AsMirrorPtr()
+ __ MaybeUnpoisonHeapReference(ref_reg);
+
+ // Slow path marking the object `ref` when it is gray.
+ SlowPathCodeMIPS64* slow_path;
+ if (always_update_field) {
+ // ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 only supports address
+ // of the form `obj + field_offset`, where `obj` is a register and
+ // `field_offset` is a register. Thus `offset` and `scale_factor`
+ // above are expected to be null in this code path.
+ DCHECK_EQ(offset, 0u);
+ DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
+ slow_path = new (GetGraph()->GetArena())
+ ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
+ ref,
+ obj,
+ /* field_offset */ index,
+ temp_reg);
+ } else {
+ slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+ }
+ AddSlowPath(slow_path);
+
+ // if (rb_state == ReadBarrier::GrayState())
+ // ref = ReadBarrier::Mark(ref);
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit into the sign bit (31) and
+ // performing a branch on less than zero.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
+ __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
+ __ Bltzc(temp_reg, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // Insert a slow path based read barrier *after* the reference load.
+ //
+ // If heap poisoning is enabled, the unpoisoning of the loaded
+ // reference will be carried out by the runtime within the slow
+ // path.
+ //
+ // Note that `ref` currently does not get unpoisoned (when heap
+ // poisoning is enabled), which is alright as the `ref` argument is
+ // not used by the artReadBarrierSlow entry point.
+ //
+ // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
+ ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
+ AddSlowPath(slow_path);
+
+ __ Bc(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorMIPS64::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ if (kEmitCompilerReadBarrier) {
+ // Baker's read barriers shall be handled by the fast path
+ // (CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier).
+ DCHECK(!kUseBakerReadBarrier);
+ // If heap poisoning is enabled, unpoisoning will be taken care of
+ // by the runtime within the slow path.
+ GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
+ } else if (kPoisonHeapReferences) {
+ __ UnpoisonHeapReference(out.AsRegister<GpuRegister>());
+ }
+}
+
+void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
+ Location out,
+ Location root) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // Insert a slow path based read barrier *after* the GC root load.
+ //
+ // Note that GC roots are not affected by heap poisoning, so we do
+ // not need to do anything special for this here.
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+ AddSlowPath(slow_path);
+
+ __ Bc(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
@@ -3476,7 +4549,8 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = LocationSummary::kNoCall;
+ call_kind =
+ kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -3491,14 +4565,20 @@
// The output does overlap inputs.
// Note that TypeCheckSlowPathMIPS64 uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
}
void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
- GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ Location out_loc = locations->Out();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
+ const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ DCHECK_LE(num_temps, 1u);
+ Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -3516,8 +4596,12 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -3526,15 +4610,22 @@
case TypeCheckKind::kAbstractClassCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ out = out->super_class_
- __ LoadFromOffset(kLoadUnsignedWord, out, out, super_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ super_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ Bnec(out, cls, &loop);
@@ -3544,15 +4635,22 @@
case TypeCheckKind::kClassHierarchyCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
Mips64Label loop, success;
__ Bind(&loop);
__ Beqc(out, cls, &success);
// /* HeapReference<Class> */ out = out->super_class_
- __ LoadFromOffset(kLoadUnsignedWord, out, out, super_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ super_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
__ Bnezc(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ Bc(&done);
@@ -3563,15 +4661,22 @@
case TypeCheckKind::kArrayObjectCheck: {
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Do an exact check.
Mips64Label success;
__ Beqc(out, cls, &success);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ out = out->component_type_
- __ LoadFromOffset(kLoadUnsignedWord, out, out, component_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ component_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -3586,8 +4691,12 @@
case TypeCheckKind::kArrayCheck: {
// No read barrier since the slow path will retry upon failure.
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
/* is_fatal */ false);
@@ -3757,9 +4866,6 @@
HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
- if (kEmitCompilerReadBarrier) {
- UNIMPLEMENTED(FATAL) << "for read barrier";
- }
bool fallback_load = false;
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
@@ -3787,9 +4893,6 @@
HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- if (kEmitCompilerReadBarrier) {
- UNIMPLEMENTED(FATAL) << "for read barrier";
- }
bool fallback_load = false;
switch (desired_class_load_kind) {
case HLoadClass::LoadKind::kInvalid:
@@ -3982,7 +5085,8 @@
}
DCHECK(!cls->NeedsAccessCheck());
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
@@ -4011,6 +5115,9 @@
current_method_reg = locations->InAt(0).AsRegister<GpuRegister>();
}
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
@@ -4020,10 +5127,12 @@
GenerateGcRootFieldLoad(cls,
out_loc,
current_method_reg,
- ArtMethod::DeclaringClassOffset().Int32Value());
+ ArtMethod::DeclaringClassOffset().Int32Value(),
+ read_barrier_option);
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
@@ -4031,6 +5140,7 @@
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
@@ -4038,7 +5148,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
uint32_t address = dchecked_integral_cast<uint32_t>(
reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
DCHECK_NE(address, 0u);
@@ -4051,7 +5161,7 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
- GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
generate_null_check = true;
break;
}
@@ -4061,7 +5171,7 @@
codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
cls->GetClass()));
- GenerateGcRootFieldLoad(cls, out_loc, out, 0);
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, read_barrier_option);
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
case HLoadClass::LoadKind::kInvalid:
@@ -4158,7 +5268,11 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
- GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(load,
+ out_loc,
+ out,
+ /* placeholder */ 0x5678,
+ kCompilerReadBarrierOption);
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -4171,7 +5285,7 @@
codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
load->GetStringIndex(),
load->GetString()));
- GenerateGcRootFieldLoad(load, out_loc, out, 0);
+ GenerateGcRootFieldLoad(load, out_loc, out, 0, kCompilerReadBarrierOption);
return;
default:
break;
@@ -5076,8 +6190,7 @@
// We are in the range of the table.
// Load the target address from the jump table, indexing by the value.
__ LoadLabelAddress(AT, table->GetLabel());
- __ Sll(TMP, TMP, 2);
- __ Daddu(TMP, TMP, AT);
+ __ Dlsa(TMP, TMP, AT, 2);
__ Lw(TMP, TMP, 0);
// Compute the absolute target address by adding the table start address
// (the table contains offsets to targets relative to its start).
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 6040dc9..fd1a174 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -237,6 +237,38 @@
const FieldInfo& field_info,
bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ // Generate a heap reference load using one register `out`:
+ //
+ // out <- *(out + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ //
+ // Location `maybe_temp` is used when generating a read barrier and
+ // shall be a register in that case; it may be an invalid location
+ // otherwise.
+ void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option);
+ // Generate a heap reference load using two different registers
+ // `out` and `obj`:
+ //
+ // out <- *(obj + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ //
+ // Location `maybe_temp` is used when generating a Baker's (fast
+ // path) read barrier and shall be a register in that case; it may
+ // be an invalid location otherwise.
+ void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option);
+
// Generate a GC root reference load:
//
// root <- *(obj + offset)
@@ -245,7 +277,9 @@
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
GpuRegister obj,
- uint32_t offset);
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option);
+
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Mips64Label* true_target,
@@ -316,6 +350,91 @@
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference array load when Baker's read barriers are used.
+ void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check);
+
+ // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+ // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+ //
+ // Load the object reference located at the address
+ // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+ // `ref`, and mark it if needed.
+ //
+ // If `always_update_field` is true, the value of the reference is
+ // atomically updated in the holder (`obj`).
+ void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ GpuRegister obj,
+ uint32_t offset,
+ Location index,
+ ScaleFactor scale_factor,
+ Location temp,
+ bool needs_null_check,
+ bool always_update_field = false);
+
+ // Generate a read barrier for a heap reference within `instruction`
+ // using a slow path.
+ //
+ // A read barrier for an object reference read from the heap is
+ // implemented as a call to the artReadBarrierSlow runtime entry
+ // point, which is passed the values in locations `ref`, `obj`, and
+ // `offset`:
+ //
+ // mirror::Object* artReadBarrierSlow(mirror::Object* ref,
+ // mirror::Object* obj,
+ // uint32_t offset);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierSlow.
+ //
+ // When `index` is provided (i.e. for array accesses), the offset
+ // value passed to artReadBarrierSlow is adjusted to take `index`
+ // into account.
+ void GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // If read barriers are enabled, generate a read barrier for a heap
+ // reference using a slow path. If heap poisoning is enabled, also
+ // unpoison the reference in `out`.
+ void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // Generate a read barrier for a GC root within `instruction` using
+ // a slow path.
+ //
+ // A read barrier for an object reference GC root is implemented as
+ // a call to the artReadBarrierForRootSlow runtime entry point,
+ // which is passed the value in location `root`:
+ //
+ // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierForRootSlow.
+ void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
+
void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
// Register allocation.
@@ -366,6 +485,14 @@
uint32_t dex_pc,
SlowPathCode* slow_path = nullptr) OVERRIDE;
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
+ void GenerateInvokeRuntime(int32_t entry_point_offset);
+
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
diff --git a/compiler/optimizing/code_generator_vector_arm.cc b/compiler/optimizing/code_generator_vector_arm.cc
new file mode 100644
index 0000000..e7f7b30
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_arm.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm.h"
+
+namespace art {
+namespace arm {
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<ArmAssembler*>(GetAssembler())-> // NOLINT
+
+void LocationsBuilderARM::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecCnv(HVecCnv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecNeg(HVecNeg* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecNot(HVecNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecAdd(HVecAdd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecSub(HVecSub* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecMul(HVecMul* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecDiv(HVecDiv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecAnd(HVecAnd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecAndNot(HVecAndNot* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecAndNot(HVecAndNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecOr(HVecOr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecXor(HVecXor* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecShl(HVecShl* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecShr(HVecShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecUShr(HVecUShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+#undef __
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
new file mode 100644
index 0000000..0923920
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -0,0 +1,758 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm64.h"
+#include "mirror/array-inl.h"
+
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm64 {
+
+using helpers::VRegisterFrom;
+using helpers::HeapOperand;
+using helpers::InputRegisterAt;
+using helpers::Int64ConstantFrom;
+using helpers::XRegisterFrom;
+
+#define __ GetVIXLAssembler()->
+
+void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Dup(dst.V16B(), InputRegisterAt(instruction, 0));
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Dup(dst.V8H(), InputRegisterAt(instruction, 0));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Dup(dst.V4S(), InputRegisterAt(instruction, 0));
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Dup(dst.V2D(), XRegisterFrom(locations->InAt(0)));
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Dup(dst.V4S(), VRegisterFrom(locations->InAt(0)).V4S(), 0);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Dup(dst.V2D(), VRegisterFrom(locations->InAt(0)).V2D(), 0);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM64::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARM64::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM64::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(),
+ instruction->IsVecNot() ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ break;
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister src = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ Primitive::Type from = instruction->GetInputType();
+ Primitive::Type to = instruction->GetResultType();
+ if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Scvtf(dst.V4S(), src.V4S());
+ } else {
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
+}
+
+void LocationsBuilderARM64::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister src = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Neg(dst.V16B(), src.V16B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Neg(dst.V8H(), src.V8H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Neg(dst.V4S(), src.V4S());
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Neg(dst.V2D(), src.V2D());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Fneg(dst.V4S(), src.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fneg(dst.V2D(), src.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister src = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Abs(dst.V16B(), src.V16B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Abs(dst.V8H(), src.V8H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Abs(dst.V4S(), src.V4S());
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Abs(dst.V2D(), src.V2D());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Fabs(dst.V4S(), src.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fabs(dst.V2D(), src.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
+}
+
+void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister src = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean: // special case boolean-not
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Movi(dst.V16B(), 1);
+ __ Eor(dst.V16B(), dst.V16B(), src.V16B());
+ break;
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Not(dst.V16B(), src.V16B()); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Add(dst.V16B(), lhs.V16B(), rhs.V16B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Add(dst.V8H(), lhs.V8H(), rhs.V8H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Add(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Add(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Fadd(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fadd(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Sub(dst.V16B(), lhs.V16B(), rhs.V16B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Sub(dst.V8H(), lhs.V8H(), rhs.V8H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Sub(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Sub(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Fsub(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fsub(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Mul(dst.V16B(), lhs.V16B(), rhs.V16B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Mul(dst.V8H(), lhs.V8H(), rhs.V8H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Mul(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Fmul(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fmul(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Fdiv(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fdiv(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ And(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecAndNot(HVecAndNot* instruction) {
+ LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARM64::VisitVecAndNot(HVecAndNot* instruction) {
+ LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId();
+}
+
+void LocationsBuilderARM64::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Orr(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Eor(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Shl(dst.V16B(), lhs.V16B(), value);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Shl(dst.V8H(), lhs.V8H(), value);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Shl(dst.V4S(), lhs.V4S(), value);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Shl(dst.V2D(), lhs.V2D(), value);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Sshr(dst.V16B(), lhs.V16B(), value);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Sshr(dst.V8H(), lhs.V8H(), value);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Sshr(dst.V4S(), lhs.V4S(), value);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Sshr(dst.V2D(), lhs.V2D(), value);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister dst = VRegisterFrom(locations->Out());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Ushr(dst.V16B(), lhs.V16B(), value);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Ushr(dst.V8H(), lhs.V8H(), value);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Ushr(dst.V4S(), lhs.V4S(), value);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Ushr(dst.V2D(), lhs.V2D(), value);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector memory operations.
+static void CreateVecMemLocations(ArenaAllocator* arena,
+ HVecMemoryOperation* instruction,
+ bool is_load) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (is_load) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up registers and address for vector memory operations.
+MemOperand InstructionCodeGeneratorARM64::CreateVecMemRegisters(
+ HVecMemoryOperation* instruction,
+ Location* reg_loc,
+ bool is_load) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register base = InputRegisterAt(instruction, 0);
+ Location index = locations->InAt(1);
+ *reg_loc = is_load ? locations->Out() : locations->InAt(2);
+
+ Primitive::Type packed_type = instruction->GetPackedType();
+ uint32_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(packed_type)).Uint32Value();
+ size_t shift = Primitive::ComponentSizeShift(packed_type);
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(base);
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << shift;
+ __ Add(temp, base, offset);
+ } else {
+ if (instruction->InputAt(0)->IsIntermediateAddress()) {
+ temp = base;
+ } else {
+ __ Add(temp, base, offset);
+ }
+ __ Add(temp.X(), temp.X(), Operand(XRegisterFrom(index), LSL, shift));
+ }
+ return HeapOperand(temp);
+}
+
+void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) {
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
+ Location reg_loc = Location::NoLocation();
+ MemOperand mem = CreateVecMemRegisters(instruction, ®_loc, /*is_load*/ true);
+ VRegister reg = VRegisterFrom(reg_loc);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Ld1(reg.V16B(), mem);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Ld1(reg.V8H(), mem);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Ld1(reg.V4S(), mem);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Ld1(reg.V2D(), mem);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) {
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) {
+ Location reg_loc = Location::NoLocation();
+ MemOperand mem = CreateVecMemRegisters(instruction, ®_loc, /*is_load*/ false);
+ VRegister reg = VRegisterFrom(reg_loc);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ St1(reg.V16B(), mem);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ St1(reg.V8H(), mem);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ St1(reg.V4S(), mem);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ St1(reg.V2D(), mem);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+#undef __
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
new file mode 100644
index 0000000..74fa584
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm_vixl.h"
+
+namespace art {
+namespace arm {
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()-> // NOLINT
+
+void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderARMVIXL::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+#undef __
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
new file mode 100644
index 0000000..6969abd
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips.h"
+
+namespace art {
+namespace mips {
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
+
+void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderMIPS::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecAndNot(HVecAndNot* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderMIPS::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+#undef __
+
+} // namespace mips
+} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
new file mode 100644
index 0000000..87118ce
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips64.h"
+
+namespace art {
+namespace mips64 {
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
+
+void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK(locations);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+#undef __
+
+} // namespace mips64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
new file mode 100644
index 0000000..8dabb4d
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -0,0 +1,807 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_x86.h"
+#include "mirror/array-inl.h"
+
+namespace art {
+namespace x86 {
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT
+
+void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimLong:
+ // Long needs extra temporary to load the register pair.
+ locations->AddTemp(Location::RequiresFpuRegister());
+ FALLTHROUGH_INTENDED;
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<Register>());
+ __ punpcklbw(reg, reg);
+ __ punpcklwd(reg, reg);
+ __ pshufd(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<Register>());
+ __ punpcklwd(reg, reg);
+ __ pshufd(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<Register>());
+ __ pshufd(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimLong: {
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegisterPairLow<Register>());
+ __ movd(tmp, locations->InAt(0).AsRegisterPairHigh<Register>());
+ __ punpckldq(reg, tmp);
+ __ punpcklqdq(reg, reg);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ shufps(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ shufpd(reg, reg, Immediate(0));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderX86::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ Primitive::Type from = instruction->GetInputType();
+ Primitive::Type to = instruction->GetResultType();
+ if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ cvtdq2ps(dst, src);
+ } else {
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
+}
+
+void LocationsBuilderX86::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubb(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubd(dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubq(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ xorps(dst, dst);
+ __ subps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ xorpd(dst, dst);
+ __ subpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ if (instruction->GetPackedType() == Primitive::kPrimInt) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movaps(dst, src);
+ __ pxor(tmp, tmp);
+ __ pcmpgtd(tmp, dst);
+ __ pxor(dst, tmp);
+ __ psubd(dst, tmp);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrld(dst, Immediate(1));
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrlq(dst, Immediate(1));
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ // Boolean-not requires a temporary to construct the 16 x one.
+ if (instruction->GetPackedType() == Primitive::kPrimBoolean) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitVecNot(HVecNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean: { // special case boolean-not
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ pxor(dst, dst);
+ __ pcmpeqb(tmp, tmp); // all ones
+ __ psubb(dst, tmp); // 16 x one
+ __ pxor(dst, src);
+ break;
+ }
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pcmpeqb(dst, dst); // all ones
+ __ pxor(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ xorps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ xorpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ paddb(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ paddw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ paddd(dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ paddq(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ addps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ addpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ psubb(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psubw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ psubd(dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ psubq(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ subps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ subpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pmullw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmulld(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ mulps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ mulpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ divps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ divpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pand(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pandn(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ andnps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ andnpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ por(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ orps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ orpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pxor(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ xorps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ xorpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psllw(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pslld(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ psllq(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psraw(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ psrad(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psrlw(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ psrld(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ psrlq(dst, Immediate(static_cast<uint8_t>(value)));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector memory operations.
+static void CreateVecMemLocations(ArenaAllocator* arena,
+ HVecMemoryOperation* instruction,
+ bool is_load) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (is_load) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up registers and address for vector memory operations.
+static Address CreateVecMemRegisters(HVecMemoryOperation* instruction,
+ Location* reg_loc,
+ bool is_load) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location base = locations->InAt(0);
+ Location index = locations->InAt(1);
+ *reg_loc = is_load ? locations->Out() : locations->InAt(2);
+ size_t size = Primitive::ComponentSize(instruction->GetPackedType());
+ uint32_t offset = mirror::Array::DataOffset(size).Uint32Value();
+ ScaleFactor scale = TIMES_1;
+ switch (size) {
+ case 2: scale = TIMES_2; break;
+ case 4: scale = TIMES_4; break;
+ case 8: scale = TIMES_8; break;
+ default: break;
+ }
+ return CodeGeneratorX86::ArrayAddress(base.AsRegister<Register>(), index, scale, offset);
+}
+
+void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) {
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+}
+
+void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) {
+ Location reg_loc = Location::NoLocation();
+ Address address = CreateVecMemRegisters(instruction, ®_loc, /*is_load*/ true);
+ XmmRegister reg = reg_loc.AsFpuRegister<XmmRegister>();
+ bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ is_aligned16 ? __ movaps(reg, address) : __ movups(reg, address);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ is_aligned16 ? __ movapd(reg, address) : __ movupd(reg, address);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) {
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+}
+
+void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) {
+ Location reg_loc = Location::NoLocation();
+ Address address = CreateVecMemRegisters(instruction, ®_loc, /*is_load*/ false);
+ XmmRegister reg = reg_loc.AsFpuRegister<XmmRegister>();
+ bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ is_aligned16 ? __ movdqa(address, reg) : __ movdqu(address, reg);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ is_aligned16 ? __ movaps(address, reg) : __ movups(address, reg);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ is_aligned16 ? __ movapd(address, reg) : __ movupd(address, reg);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+#undef __
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
new file mode 100644
index 0000000..e956088
--- /dev/null
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -0,0 +1,800 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_x86_64.h"
+#include "mirror/array-inl.h"
+
+namespace art {
+namespace x86_64 {
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
+
+void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<CpuRegister>());
+ __ punpcklbw(reg, reg);
+ __ punpcklwd(reg, reg);
+ __ pshufd(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<CpuRegister>());
+ __ punpcklwd(reg, reg);
+ __ pshufd(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<CpuRegister>());
+ __ pshufd(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ movd(reg, locations->InAt(0).AsRegister<CpuRegister>()); // is 64-bit
+ __ punpcklqdq(reg, reg);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ shufps(reg, reg, Immediate(0));
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ shufpd(reg, reg, Immediate(0));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecSetScalars(HVecSetScalars* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void LocationsBuilderX86_64::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecSumReduce(HVecSumReduce* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+// Helper to set up locations for vector unary operations.
+static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecCnv(HVecCnv* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ Primitive::Type from = instruction->GetInputType();
+ Primitive::Type to = instruction->GetResultType();
+ if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ cvtdq2ps(dst, src);
+ } else {
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecNeg(HVecNeg* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubb(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubd(dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pxor(dst, dst);
+ __ psubq(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ xorps(dst, dst);
+ __ subps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ xorpd(dst, dst);
+ __ subpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ if (instruction->GetPackedType() == Primitive::kPrimInt) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movaps(dst, src);
+ __ pxor(tmp, tmp);
+ __ pcmpgtd(tmp, dst);
+ __ pxor(dst, tmp);
+ __ psubd(dst, tmp);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrld(dst, Immediate(1));
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrlq(dst, Immediate(1));
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ // Boolean-not requires a temporary to construct the 16 x one.
+ if (instruction->GetPackedType() == Primitive::kPrimBoolean) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecNot(HVecNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean: { // special case boolean-not
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ pxor(dst, dst);
+ __ pcmpeqb(tmp, tmp); // all ones
+ __ psubb(dst, tmp); // 16 x one
+ __ pxor(dst, src);
+ break;
+ }
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pcmpeqb(dst, dst); // all ones
+ __ pxor(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ xorps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ xorpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector binary operations.
+static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ paddb(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ paddw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ paddd(dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ paddq(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ addps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ addpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ psubb(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psubw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ psubd(dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ psubq(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ subps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ subpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pmullw(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmulld(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ mulps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ mulpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ divps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ divpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pand(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pandn(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ andnps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ andnpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ por(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ orps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ orpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) {
+ CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ pxor(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ xorps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ xorpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector shift operations.
+static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecShl(HVecShl* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psllw(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pslld(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ psllq(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecShr(HVecShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psraw(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ psrad(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecUShr(HVecUShr* instruction) {
+ CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ psrlw(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ psrld(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ psrlq(dst, Immediate(static_cast<int8_t>(value)));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up locations for vector memory operations.
+static void CreateVecMemLocations(ArenaAllocator* arena,
+ HVecMemoryOperation* instruction,
+ bool is_load) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (is_load) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to set up registers and address for vector memory operations.
+static Address CreateVecMemRegisters(HVecMemoryOperation* instruction,
+ Location* reg_loc,
+ bool is_load) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location base = locations->InAt(0);
+ Location index = locations->InAt(1);
+ *reg_loc = is_load ? locations->Out() : locations->InAt(2);
+ size_t size = Primitive::ComponentSize(instruction->GetPackedType());
+ uint32_t offset = mirror::Array::DataOffset(size).Uint32Value();
+ ScaleFactor scale = TIMES_1;
+ switch (size) {
+ case 2: scale = TIMES_2; break;
+ case 4: scale = TIMES_4; break;
+ case 8: scale = TIMES_8; break;
+ default: break;
+ }
+ return CodeGeneratorX86_64::ArrayAddress(base.AsRegister<CpuRegister>(), index, scale, offset);
+}
+
+void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) {
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) {
+ Location reg_loc = Location::NoLocation();
+ Address address = CreateVecMemRegisters(instruction, ®_loc, /*is_load*/ true);
+ XmmRegister reg = reg_loc.AsFpuRegister<XmmRegister>();
+ bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ is_aligned16 ? __ movaps(reg, address) : __ movups(reg, address);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ is_aligned16 ? __ movapd(reg, address) : __ movupd(reg, address);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) {
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) {
+ Location reg_loc = Location::NoLocation();
+ Address address = CreateVecMemRegisters(instruction, ®_loc, /*is_load*/ false);
+ XmmRegister reg = reg_loc.AsFpuRegister<XmmRegister>();
+ bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ is_aligned16 ? __ movdqa(address, reg) : __ movdqu(address, reg);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ is_aligned16 ? __ movaps(address, reg) : __ movups(address, reg);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ is_aligned16 ? __ movapd(address, reg) : __ movupd(address, reg);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+#undef __
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0b50619..08a752f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -183,10 +183,13 @@
: SlowPathCode(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD.
x86_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD.
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
@@ -720,7 +723,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
@@ -963,12 +966,20 @@
}
size_t CodeGeneratorX86::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- __ movsd(Address(ESP, stack_index), XmmRegister(reg_id));
+ if (GetGraph()->HasSIMD()) {
+ __ movups(Address(ESP, stack_index), XmmRegister(reg_id));
+ } else {
+ __ movsd(Address(ESP, stack_index), XmmRegister(reg_id));
+ }
return GetFloatingPointSpillSlotSize();
}
size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- __ movsd(XmmRegister(reg_id), Address(ESP, stack_index));
+ if (GetGraph()->HasSIMD()) {
+ __ movups(XmmRegister(reg_id), Address(ESP, stack_index));
+ } else {
+ __ movsd(XmmRegister(reg_id), Address(ESP, stack_index));
+ }
return GetFloatingPointSpillSlotSize();
}
@@ -5699,7 +5710,11 @@
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // In suspend check slow path, usually there are no caller-save registers at all.
+ // If SIMD instructions are present, however, we force spilling all live SIMD
+ // registers in full width (since the runtime only saves/restores lower part).
+ locations->SetCustomSlowPathCallerSaves(
+ GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
}
void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5802,9 +5817,11 @@
__ movd(destination.AsRegisterPairHigh<Register>(), src_reg);
} else if (destination.IsStackSlot()) {
__ movss(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
- } else {
- DCHECK(destination.IsDoubleStackSlot());
+ } else if (destination.IsDoubleStackSlot()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
+ } else {
+ DCHECK(destination.IsSIMDStackSlot());
+ __ movups(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
@@ -5826,6 +5843,9 @@
DCHECK(destination.IsDoubleStackSlot()) << destination;
MoveMemoryToMemory64(destination.GetStackIndex(), source.GetStackIndex());
}
+ } else if (source.IsSIMDStackSlot()) {
+ DCHECK(destination.IsFpuRegister());
+ __ movups(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
if (constant->IsIntConstant() || constant->IsNullConstant()) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 65ee383..ca3a9ea 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -348,8 +348,9 @@
}
size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
- // 8 bytes == 2 words for each spill.
- return 2 * kX86WordSize;
+ return GetGraph()->HasSIMD()
+ ? 4 * kX86WordSize // 16 bytes == 4 words for each spill
+ : 2 * kX86WordSize; // 8 bytes == 2 words for each spill
}
HGraphVisitor* GetLocationBuilder() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 08f1adf..ff6e099 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -140,10 +140,13 @@
: SlowPathCode(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD.
x86_64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD.
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
@@ -741,7 +744,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
@@ -1158,13 +1161,21 @@
}
size_t CodeGeneratorX86_64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- __ movsd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
- return kX86_64WordSize;
+ if (GetGraph()->HasSIMD()) {
+ __ movups(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
+ } else {
+ __ movsd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
+ }
+ return GetFloatingPointSpillSlotSize();
}
size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- __ movsd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
- return kX86_64WordSize;
+ if (GetGraph()->HasSIMD()) {
+ __ movups(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
+ } else {
+ __ movsd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
+ }
+ return GetFloatingPointSpillSlotSize();
}
void CodeGeneratorX86_64::InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -3649,7 +3660,7 @@
void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
DCHECK(instruction->IsDiv() || instruction->IsRem());
Primitive::Type type = instruction->GetResultType();
- DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
bool is_div = instruction->IsDiv();
LocationSummary* locations = instruction->GetLocations();
@@ -5152,7 +5163,11 @@
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // In suspend check slow path, usually there are no caller-save registers at all.
+ // If SIMD instructions are present, however, we force spilling all live SIMD
+ // registers in full width (since the runtime only saves/restores lower part).
+ locations->SetCustomSlowPathCallerSaves(
+ GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
}
void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5241,6 +5256,10 @@
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
+ } else if (source.IsSIMDStackSlot()) {
+ DCHECK(destination.IsFpuRegister());
+ __ movups(destination.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), source.GetStackIndex()));
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
if (constant->IsIntConstant() || constant->IsNullConstant()) {
@@ -5291,10 +5310,13 @@
} else if (destination.IsStackSlot()) {
__ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
source.AsFpuRegister<XmmRegister>());
- } else {
- DCHECK(destination.IsDoubleStackSlot()) << destination;
+ } else if (destination.IsDoubleStackSlot()) {
__ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
source.AsFpuRegister<XmmRegister>());
+ } else {
+ DCHECK(destination.IsSIMDStackSlot());
+ __ movups(Address(CpuRegister(RSP), destination.GetStackIndex()),
+ source.AsFpuRegister<XmmRegister>());
}
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 376c3ce..c8336da 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -326,7 +326,9 @@
}
size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
- return kX86_64WordSize;
+ return GetGraph()->HasSIMD()
+ ? 2 * kX86_64WordSize // 16 bytes == 2 x86_64 words for each spill
+ : 1 * kX86_64WordSize; // 8 bytes == 1 x86_64 words for each spill
}
HGraphVisitor* GetLocationBuilder() OVERRIDE {
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index f8bbf68..4ba5c55 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -769,6 +769,45 @@
InternalCodeAllocator code_allocator;
codegen.Finalize(&code_allocator);
}
+
+// Check that ParallelMoveResolver works fine for ARM64 for both cases when SIMD is on and off.
+TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
+ std::unique_ptr<const Arm64InstructionSetFeatures> features(
+ Arm64InstructionSetFeatures::FromCppDefines());
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+ arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
+
+ codegen.Initialize();
+
+ graph->SetHasSIMD(true);
+ for (int i = 0; i < 2; i++) {
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ move->AddMove(Location::SIMDStackSlot(0),
+ Location::SIMDStackSlot(257),
+ Primitive::kPrimDouble,
+ nullptr);
+ move->AddMove(Location::SIMDStackSlot(257),
+ Location::SIMDStackSlot(0),
+ Primitive::kPrimDouble,
+ nullptr);
+ move->AddMove(Location::FpuRegisterLocation(0),
+ Location::FpuRegisterLocation(1),
+ Primitive::kPrimDouble,
+ nullptr);
+ move->AddMove(Location::FpuRegisterLocation(1),
+ Location::FpuRegisterLocation(0),
+ Primitive::kPrimDouble,
+ nullptr);
+ codegen.GetMoveResolver()->EmitNativeCode(move);
+ graph->SetHasSIMD(false);
+ }
+
+ InternalCodeAllocator code_allocator;
+ codegen.Finalize(&code_allocator);
+}
+
#endif
#ifdef ART_ENABLE_CODEGEN_mips
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index cd95404..31cd204 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -74,7 +74,6 @@
}
private:
- CodegenTargetConfig() {}
InstructionSet isa_;
CreateCodegenFn create_codegen_;
};
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index e184745..01304ac 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -66,6 +66,11 @@
return vixl::aarch32::SRegister(location.AsFpuRegisterPairLow<vixl::aarch32::SRegister>());
}
+inline vixl::aarch32::SRegister HighSRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegisterPair()) << location;
+ return vixl::aarch32::SRegister(location.AsFpuRegisterPairHigh<vixl::aarch32::SRegister>());
+}
+
inline vixl::aarch32::Register RegisterFrom(Location location) {
DCHECK(location.IsRegister()) << location;
return vixl::aarch32::Register(location.reg());
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index d3f431e..721f74e 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -92,6 +92,16 @@
return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
}
+inline vixl::aarch64::FPRegister QRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister()) << location;
+ return vixl::aarch64::FPRegister::GetQRegFromCode(location.reg());
+}
+
+inline vixl::aarch64::FPRegister VRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister()) << location;
+ return vixl::aarch64::FPRegister::GetVRegFromCode(location.reg());
+}
+
inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
DCHECK(location.IsFpuRegister()) << location;
return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2bf5c53..cc3c143 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -322,9 +322,11 @@
codegen_.DumpCoreRegister(stream, location.high());
} else if (location.IsUnallocated()) {
stream << "unallocated";
- } else {
- DCHECK(location.IsDoubleStackSlot());
+ } else if (location.IsDoubleStackSlot()) {
stream << "2x" << location.GetStackIndex() << "(sp)";
+ } else {
+ DCHECK(location.IsSIMDStackSlot());
+ stream << "4x" << location.GetStackIndex() << "(sp)";
}
}
@@ -503,6 +505,10 @@
StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
}
+ void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
+ StartAttributeStream("kind") << deoptimize->GetKind();
+ }
+
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE {
StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 82ee93d..9516ccb 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -29,7 +29,21 @@
*/
class InductionVarAnalysisTest : public CommonCompilerTest {
public:
- InductionVarAnalysisTest() : pool_(), allocator_(&pool_) {
+ InductionVarAnalysisTest()
+ : pool_(),
+ allocator_(&pool_),
+ iva_(nullptr),
+ entry_(nullptr),
+ return_(nullptr),
+ exit_(nullptr),
+ parameter_(nullptr),
+ constant0_(nullptr),
+ constant1_(nullptr),
+ constant2_(nullptr),
+ constant7_(nullptr),
+ constant100_(nullptr),
+ constantm1_(nullptr),
+ float_constant0_(nullptr) {
graph_ = CreateGraph(&allocator_);
}
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index d6513c8..1c8674d 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -383,12 +383,13 @@
return false;
}
-bool InductionVarRange::IsUnitStride(HInstruction* instruction,
+bool InductionVarRange::IsUnitStride(HInstruction* context,
+ HInstruction* instruction,
/*out*/ HInstruction** offset) const {
HLoopInformation* loop = nullptr;
HInductionVarAnalysis::InductionInfo* info = nullptr;
HInductionVarAnalysis::InductionInfo* trip = nullptr;
- if (HasInductionInfo(instruction, instruction, &loop, &info, &trip)) {
+ if (HasInductionInfo(context, instruction, &loop, &info, &trip)) {
if (info->induction_class == HInductionVarAnalysis::kLinear &&
info->op_b->operation == HInductionVarAnalysis::kFetch &&
!HInductionVarAnalysis::IsNarrowingLinear(info)) {
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 0858d73..a8ee829 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -156,10 +156,14 @@
bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
/**
- * Checks if instruction is a unit stride induction inside the closest enveloping loop.
- * Returns invariant offset on success.
+ * Checks if the given instruction is a unit stride induction inside the closest enveloping
+ * loop of the context that is defined by the first parameter (e.g. pass an array reference
+ * as context and the index as instruction to make sure the stride is tested against the
+ * loop that envelops the reference the closest). Returns invariant offset on success.
*/
- bool IsUnitStride(HInstruction* instruction, /*out*/ HInstruction** offset) const;
+ bool IsUnitStride(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/ HInstruction** offset) const;
/**
* Generates the trip count expression for the given loop. Code is generated in given block
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index fcdf8eb..d01d314 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -770,7 +770,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(1000, tc);
HInstruction* offset = nullptr;
- EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(range_.IsUnitStride(phi, phi, &offset));
EXPECT_TRUE(offset == nullptr);
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
@@ -826,7 +826,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(1000, tc);
HInstruction* offset = nullptr;
- EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ EXPECT_FALSE(range_.IsUnitStride(phi, phi, &offset));
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
ASSERT_TRUE(tce != nullptr);
@@ -908,7 +908,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(0, tc); // unknown
HInstruction* offset = nullptr;
- EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(range_.IsUnitStride(phi, phi, &offset));
EXPECT_TRUE(offset == nullptr);
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
@@ -994,7 +994,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(0, tc); // unknown
HInstruction* offset = nullptr;
- EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ EXPECT_FALSE(range_.IsUnitStride(phi, phi, &offset));
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
ASSERT_TRUE(tce != nullptr);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8a813bd..66948eb 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -46,29 +46,100 @@
namespace art {
-static constexpr size_t kMaximumNumberOfHInstructions = 32;
+// Instruction limit to control memory.
+static constexpr size_t kMaximumNumberOfTotalInstructions = 1024;
+
+// Maximum number of instructions for considering a method small,
+// which we will always try to inline if the other non-instruction limits
+// are not reached.
+static constexpr size_t kMaximumNumberOfInstructionsForSmallMethod = 3;
// Limit the number of dex registers that we accumulate while inlining
// to avoid creating large amount of nested environments.
static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 64;
-// Avoid inlining within a huge method due to memory pressure.
-static constexpr size_t kMaximumCodeUnitSize = 4096;
+// Limit recursive call inlining, which do not benefit from too
+// much inlining compared to code locality.
+static constexpr size_t kMaximumNumberOfRecursiveCalls = 4;
+
+// Controls the use of inline caches in AOT mode.
+static constexpr bool kUseAOTInlineCaches = true;
+
+// We check for line numbers to make sure the DepthString implementation
+// aligns the output nicely.
+#define LOG_INTERNAL(msg) \
+ static_assert(__LINE__ > 10, "Unhandled line number"); \
+ static_assert(__LINE__ < 10000, "Unhandled line number"); \
+ VLOG(compiler) << DepthString(__LINE__) << msg
+
+#define LOG_TRY() LOG_INTERNAL("Try inlinining call: ")
+#define LOG_NOTE() LOG_INTERNAL("Note: ")
+#define LOG_SUCCESS() LOG_INTERNAL("Success: ")
+#define LOG_FAIL(stat) MaybeRecordStat(stat); LOG_INTERNAL("Fail: ")
+#define LOG_FAIL_NO_STAT() LOG_INTERNAL("Fail: ")
+
+std::string HInliner::DepthString(int line) const {
+ std::string value;
+ // Indent according to the inlining depth.
+ size_t count = depth_;
+ // Line numbers get printed in the log, so add a space if the log's line is less
+ // than 1000, and two if less than 100. 10 cannot be reached as it's the copyright.
+ if (!kIsTargetBuild) {
+ if (line < 100) {
+ value += " ";
+ }
+ if (line < 1000) {
+ value += " ";
+ }
+ // Safeguard if this file reaches more than 10000 lines.
+ DCHECK_LT(line, 10000);
+ }
+ for (size_t i = 0; i < count; ++i) {
+ value += " ";
+ }
+ return value;
+}
+
+static size_t CountNumberOfInstructions(HGraph* graph) {
+ size_t number_of_instructions = 0;
+ for (HBasicBlock* block : graph->GetReversePostOrderSkipEntryBlock()) {
+ for (HInstructionIterator instr_it(block->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ ++number_of_instructions;
+ }
+ }
+ return number_of_instructions;
+}
+
+void HInliner::UpdateInliningBudget() {
+ if (total_number_of_instructions_ >= kMaximumNumberOfTotalInstructions) {
+ // Always try to inline small methods.
+ inlining_budget_ = kMaximumNumberOfInstructionsForSmallMethod;
+ } else {
+ inlining_budget_ = std::max(
+ kMaximumNumberOfInstructionsForSmallMethod,
+ kMaximumNumberOfTotalInstructions - total_number_of_instructions_);
+ }
+}
void HInliner::Run() {
- const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
- if ((compiler_options.GetInlineDepthLimit() == 0)
- || (compiler_options.GetInlineMaxCodeUnits() == 0)) {
- return;
- }
- if (caller_compilation_unit_.GetCodeItem()->insns_size_in_code_units_ > kMaximumCodeUnitSize) {
- return;
- }
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
// doing some logic in the runtime to discover if a method could have been inlined.
return;
}
+
+ // Initialize the number of instructions for the method being compiled. Recursive calls
+ // to HInliner::Run have already updated the instruction count.
+ if (outermost_graph_ == graph_) {
+ total_number_of_instructions_ = CountNumberOfInstructions(graph_);
+ }
+
+ UpdateInliningBudget();
+ DCHECK_NE(total_number_of_instructions_, 0u);
+ DCHECK_NE(inlining_budget_, 0u);
+
// Keep a copy of all blocks when starting the visit.
ArenaVector<HBasicBlock*> blocks = graph_->GetReversePostOrder();
DCHECK(!blocks.empty());
@@ -289,7 +360,24 @@
return nullptr;
}
PointerSize pointer_size = caller_compilation_unit_.GetClassLinker()->GetImagePointerSize();
- return resolved_method->GetSingleImplementation(pointer_size);
+ ArtMethod* single_impl = resolved_method->GetSingleImplementation(pointer_size);
+ if (single_impl == nullptr) {
+ return nullptr;
+ }
+ if (single_impl->IsProxyMethod()) {
+ // Proxy method is a generic invoker that's not worth
+ // devirtualizing/inlining. It also causes issues when the proxy
+ // method is in another dex file if we try to rewrite invoke-interface to
+ // invoke-virtual because a proxy method doesn't have a real dex file.
+ return nullptr;
+ }
+ if (!single_impl->GetDeclaringClass()->IsResolved()) {
+ // There's a race with the class loading, which updates the CHA info
+ // before setting the class to resolved. So we just bail for this
+ // rare occurence.
+ return nullptr;
+ }
+ return single_impl;
}
bool HInliner::TryInline(HInvoke* invoke_instruction) {
@@ -302,17 +390,18 @@
ScopedObjectAccess soa(Thread::Current());
uint32_t method_index = invoke_instruction->GetDexMethodIndex();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- VLOG(compiler) << "Try inlining " << caller_dex_file.PrettyMethod(method_index);
+ LOG_TRY() << caller_dex_file.PrettyMethod(method_index);
- // We can query the dex cache directly. The verifier has populated it already.
ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
- ArtMethod* actual_method = nullptr;
if (resolved_method == nullptr) {
DCHECK(invoke_instruction->IsInvokeStaticOrDirect());
DCHECK(invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit());
- VLOG(compiler) << "Not inlining a String.<init> method";
+ LOG_FAIL_NO_STAT() << "Not inlining a String.<init> method";
return false;
- } else if (invoke_instruction->IsInvokeStaticOrDirect()) {
+ }
+ ArtMethod* actual_method = nullptr;
+
+ if (invoke_instruction->IsInvokeStaticOrDirect()) {
actual_method = resolved_method;
} else {
// Check if we can statically find the method.
@@ -325,6 +414,7 @@
if (method != nullptr) {
cha_devirtualize = true;
actual_method = method;
+ LOG_NOTE() << "Try CHA-based inlining of " << actual_method->PrettyMethod();
}
}
@@ -376,6 +466,10 @@
HInvoke* invoke_instruction,
ArtMethod* resolved_method)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (Runtime::Current()->IsAotCompiler() && !kUseAOTInlineCaches) {
+ return false;
+ }
+
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::ObjectArray<mirror::Class>> inline_cache;
InlineCacheType inline_cache_type = Runtime::Current()->IsAotCompiler()
@@ -383,16 +477,23 @@
: GetInlineCacheJIT(invoke_instruction, &hs, &inline_cache);
switch (inline_cache_type) {
- case kInlineCacheNoData:
- break;
-
- case kInlineCacheUninitialized:
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
- << " is not hit and not inlined";
+ case kInlineCacheNoData: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " could not be statically determined";
return false;
+ }
- case kInlineCacheMonomorphic:
+ case kInlineCacheUninitialized: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is not hit and not inlined";
+ return false;
+ }
+
+ case kInlineCacheMonomorphic: {
MaybeRecordStat(kMonomorphicCall);
if (outermost_graph_->IsCompilingOsr()) {
// If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
@@ -401,23 +502,29 @@
} else {
return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
}
+ }
- case kInlineCachePolymorphic:
+ case kInlineCachePolymorphic: {
MaybeRecordStat(kPolymorphicCall);
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+ }
- case kInlineCacheMegamorphic:
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
- << " is megamorphic and not inlined";
+ case kInlineCacheMegamorphic: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is megamorphic and not inlined";
MaybeRecordStat(kMegamorphicCall);
return false;
+ }
- case kInlineCacheMissingTypes:
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
- << " is missing types and not inlined";
+ case kInlineCacheMissingTypes: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is missing types and not inlined";
return false;
+ }
}
UNREACHABLE();
}
@@ -571,6 +678,32 @@
return result;
}
+static ArtMethod* ResolveMethodFromInlineCache(Handle<mirror::Class> klass,
+ ArtMethod* resolved_method,
+ HInstruction* invoke_instruction,
+ PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (Runtime::Current()->IsAotCompiler()) {
+ // We can get unrelated types when working with profiles (corruption,
+ // systme updates, or anyone can write to it). So first check if the class
+ // actually implements the declaring class of the method that is being
+ // called in bytecode.
+ // Note: the lookup methods used below require to have assignable types.
+ if (!resolved_method->GetDeclaringClass()->IsAssignableFrom(klass.Get())) {
+ return nullptr;
+ }
+ }
+
+ if (invoke_instruction->IsInvokeInterface()) {
+ resolved_method = klass->FindVirtualMethodForInterface(resolved_method, pointer_size);
+ } else {
+ DCHECK(invoke_instruction->IsInvokeVirtual());
+ resolved_method = klass->FindVirtualMethodForVirtual(resolved_method, pointer_size);
+ }
+ DCHECK(resolved_method != nullptr);
+ return resolved_method;
+}
+
bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes) {
@@ -580,27 +713,29 @@
dex::TypeIndex class_index = FindClassIndexIn(
GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
- VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
- << " from inline cache is not inlined because its class is not"
- << " accessible to the caller";
+ LOG_FAIL(kNotInlinedDexCache)
+ << "Call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " from inline cache is not inlined because its class is not"
+ << " accessible to the caller";
return false;
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
- if (invoke_instruction->IsInvokeInterface()) {
- resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForInterface(
- resolved_method, pointer_size);
- } else {
- DCHECK(invoke_instruction->IsInvokeVirtual());
- resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForVirtual(
- resolved_method, pointer_size);
+ Handle<mirror::Class> monomorphic_type = handles_->NewHandle(GetMonomorphicType(classes));
+ resolved_method = ResolveMethodFromInlineCache(
+ monomorphic_type, resolved_method, invoke_instruction, pointer_size);
+
+ LOG_NOTE() << "Try inline monomorphic call to " << resolved_method->PrettyMethod();
+ if (resolved_method == nullptr) {
+ // Bogus AOT profile, bail.
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ return false;
}
- DCHECK(resolved_method != nullptr);
+
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- Handle<mirror::Class> monomorphic_type = handles_->NewHandle(GetMonomorphicType(classes));
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
@@ -639,7 +774,8 @@
HShouldDeoptimizeFlag(graph_->GetArena(), dex_pc);
HInstruction* compare = new (graph_->GetArena()) HNotEqual(
deopt_flag, graph_->GetIntConstant(0, dex_pc));
- HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(compare, dex_pc);
+ HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(
+ graph_->GetArena(), compare, HDeoptimize::Kind::kInline, dex_pc);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
@@ -703,9 +839,16 @@
bb_cursor->InsertInstructionAfter(compare, load_class);
if (with_deoptimization) {
HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- compare, invoke_instruction->GetDexPc());
+ graph_->GetArena(),
+ compare,
+ receiver,
+ HDeoptimize::Kind::kInline,
+ invoke_instruction->GetDexPc());
bb_cursor->InsertInstructionAfter(deoptimize, compare);
deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ DCHECK_EQ(invoke_instruction->InputAt(0), receiver);
+ receiver->ReplaceUsesDominatedBy(deoptimize, deoptimize);
+ deoptimize->SetReferenceTypeInfo(receiver->GetReferenceTypeInfo());
}
return compare;
}
@@ -732,11 +875,14 @@
ArtMethod* method = nullptr;
Handle<mirror::Class> handle = handles_->NewHandle(classes->Get(i));
- if (invoke_instruction->IsInvokeInterface()) {
- method = handle->FindVirtualMethodForInterface(resolved_method, pointer_size);
- } else {
- DCHECK(invoke_instruction->IsInvokeVirtual());
- method = handle->FindVirtualMethodForVirtual(resolved_method, pointer_size);
+ method = ResolveMethodFromInlineCache(
+ handle, resolved_method, invoke_instruction, pointer_size);
+ if (method == nullptr) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ // AOT profile is bogus. This loop expects to iterate over all entries,
+ // so just just continue.
+ all_targets_inlined = false;
+ continue;
}
HInstruction* receiver = invoke_instruction->InputAt(0);
@@ -745,6 +891,7 @@
dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_);
HInstruction* return_replacement = nullptr;
+ LOG_NOTE() << "Try inline polymorphic call to " << method->PrettyMethod();
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
method,
@@ -754,8 +901,8 @@
} else {
one_target_inlined = true;
- VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
- << " has inlined " << ArtMethod::PrettyMethod(method);
+ LOG_SUCCESS() << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " has inlined " << ArtMethod::PrettyMethod(method);
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
@@ -780,7 +927,7 @@
}
invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
// Because the inline cache data can be populated concurrently, we force the end of the
- // iteration. Otherhwise, we could see a new receiver type.
+ // iteration. Otherwise, we could see a new receiver type.
break;
} else {
CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
@@ -789,9 +936,10 @@
}
if (!one_target_inlined) {
- VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
- << " from inline cache is not inlined because none"
- << " of its targets could be inlined";
+ LOG_FAIL_NO_STAT()
+ << "Call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " from inline cache is not inlined because none"
+ << " of its targets could be inlined";
return false;
}
@@ -925,9 +1073,6 @@
actual_method = new_method;
} else if (actual_method != new_method) {
// Different methods, bailout.
- VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
- << " from inline cache is not inlined because it resolves"
- << " to different methods";
return false;
}
}
@@ -981,13 +1126,19 @@
CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
} else {
HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- compare, invoke_instruction->GetDexPc());
+ graph_->GetArena(),
+ compare,
+ receiver,
+ HDeoptimize::Kind::kInline,
+ invoke_instruction->GetDexPc());
bb_cursor->InsertInstructionAfter(deoptimize, compare);
deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
if (return_replacement != nullptr) {
invoke_instruction->ReplaceWith(return_replacement);
}
+ receiver->ReplaceUsesDominatedBy(deoptimize, deoptimize);
invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ deoptimize->SetReferenceTypeInfo(receiver->GetReferenceTypeInfo());
}
// Run type propagation to get the guard typed.
@@ -1000,6 +1151,7 @@
MaybeRecordStat(kInlinedPolymorphicCall);
+ LOG_SUCCESS() << "Inlined same polymorphic target " << actual_method->PrettyMethod();
return true;
}
@@ -1014,11 +1166,23 @@
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
+ DCHECK(!method->IsProxyMethod());
// Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
// better than an invoke-interface because:
// 1) In the best case, the interface call has one more indirection (to fetch the IMT).
// 2) We will not go to the conflict trampoline with an invoke-virtual.
// TODO: Consider sharpening once it is not dependent on the compiler driver.
+
+ if (method->IsDefault() && !method->IsCopied()) {
+ // Changing to invoke-virtual cannot be done on an original default method
+ // since it's not in any vtable. Devirtualization by exact type/inline-cache
+ // always uses a method in the iftable which is never an original default
+ // method.
+ // On the other hand, inlining an original default method by CHA is fine.
+ DCHECK(cha_devirtualize);
+ return false;
+ }
+
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
uint32_t dex_method_index = FindMethodIndexIn(
method, caller_dex_file, invoke_instruction->GetDexMethodIndex());
@@ -1069,13 +1233,34 @@
return true;
}
+size_t HInliner::CountRecursiveCallsOf(ArtMethod* method) const {
+ const HInliner* current = this;
+ size_t count = 0;
+ do {
+ if (current->graph_->GetArtMethod() == method) {
+ ++count;
+ }
+ current = current->parent_;
+ } while (current != nullptr);
+ return count;
+}
+
bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* method,
ReferenceTypeInfo receiver_type,
HInstruction** return_replacement) {
if (method->IsProxyMethod()) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because of unimplemented inline support for proxy methods.";
+ LOG_FAIL(kNotInlinedProxy)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because of unimplemented inline support for proxy methods.";
+ return false;
+ }
+
+ if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
+ LOG_FAIL(kNotInlinedRecursiveBudget)
+ << "Method "
+ << method->PrettyMethod()
+ << " is not inlined because it has reached its recursive call budget.";
return false;
}
@@ -1084,15 +1269,16 @@
if (!compiler_driver_->MayInline(method->GetDexFile(),
outer_compilation_unit_.GetDexFile())) {
if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
- VLOG(compiler) << "Successfully replaced pattern of invoke "
- << method->PrettyMethod();
+ LOG_SUCCESS() << "Successfully replaced pattern of invoke "
+ << method->PrettyMethod();
MaybeRecordStat(kReplacedInvokeWithSimplePattern);
return true;
}
- VLOG(compiler) << "Won't inline " << method->PrettyMethod() << " in "
- << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
- << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
- << method->GetDexFile()->GetLocation();
+ LOG_FAIL(kNotInlinedWont)
+ << "Won't inline " << method->PrettyMethod() << " in "
+ << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
+ << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
+ << method->GetDexFile()->GetLocation();
return false;
}
@@ -1101,30 +1287,32 @@
const DexFile::CodeItem* code_item = method->GetCodeItem();
if (code_item == nullptr) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because it is native";
+ LOG_FAIL_NO_STAT()
+ << "Method " << method->PrettyMethod() << " is not inlined because it is native";
return false;
}
size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is too big to inline: "
- << code_item->insns_size_in_code_units_
- << " > "
- << inline_max_code_units;
+ LOG_FAIL(kNotInlinedCodeItem)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because its code item is too big: "
+ << code_item->insns_size_in_code_units_
+ << " > "
+ << inline_max_code_units;
return false;
}
if (code_item->tries_size_ != 0) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because of try block";
+ LOG_FAIL(kNotInlinedTryCatch)
+ << "Method " << method->PrettyMethod() << " is not inlined because of try block";
return false;
}
if (!method->IsCompilable()) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " has soft failures un-handled by the compiler, so it cannot be inlined";
+ LOG_FAIL(kNotInlinedNotVerified)
+ << "Method " << method->PrettyMethod()
+ << " has soft failures un-handled by the compiler, so it cannot be inlined";
}
if (!method->GetDeclaringClass()->IsVerified()) {
@@ -1132,8 +1320,9 @@
if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " couldn't be verified, so it cannot be inlined";
+ LOG_FAIL(kNotInlinedNotVerified)
+ << "Method " << method->PrettyMethod()
+ << " couldn't be verified, so it cannot be inlined";
return false;
}
}
@@ -1142,9 +1331,9 @@
invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
// Case of a static method that cannot be inlined because it implicitly
// requires an initialization check of its declaring class.
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because it is static and requires a clinit"
- << " check that cannot be emitted due to Dex cache limitations";
+ LOG_FAIL(kNotInlinedDexCache) << "Method " << method->PrettyMethod()
+ << " is not inlined because it is static and requires a clinit"
+ << " check that cannot be emitted due to Dex cache limitations";
return false;
}
@@ -1153,7 +1342,7 @@
return false;
}
- VLOG(compiler) << "Successfully inlined " << method->PrettyMethod();
+ LOG_SUCCESS() << method->PrettyMethod();
MaybeRecordStat(kInlinedInvoke);
return true;
}
@@ -1376,25 +1565,6 @@
/* verified_method */ nullptr,
dex_cache);
- bool requires_ctor_barrier = false;
-
- if (dex_compilation_unit.IsConstructor()) {
- // If it's a super invocation and we already generate a barrier there's no need
- // to generate another one.
- // We identify super calls by looking at the "this" pointer. If its value is the
- // same as the local "this" pointer then we must have a super invocation.
- bool is_super_invocation = invoke_instruction->InputAt(0)->IsParameterValue()
- && invoke_instruction->InputAt(0)->AsParameterValue()->IsThis();
- if (is_super_invocation && graph_->ShouldGenerateConstructorBarrier()) {
- requires_ctor_barrier = false;
- } else {
- Thread* self = Thread::Current();
- requires_ctor_barrier = compiler_driver_->RequiresConstructorBarrier(self,
- dex_compilation_unit.GetDexFile(),
- dex_compilation_unit.GetClassDefIndex());
- }
- }
-
InvokeType invoke_type = invoke_instruction->GetInvokeType();
if (invoke_type == kInterface) {
// We have statically resolved the dispatch. To please the class linker
@@ -1407,7 +1577,6 @@
graph_->GetArena(),
callee_dex_file,
method_index,
- requires_ctor_barrier,
compiler_driver_->GetInstructionSet(),
invoke_type,
graph_->IsDebuggable(),
@@ -1441,15 +1610,17 @@
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be built, so cannot be inlined";
+ LOG_FAIL(kNotInlinedCannotBuild)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be built, so cannot be inlined";
return false;
}
if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
compiler_driver_->GetInstructionSet())) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " cannot be inlined because of the register allocator";
+ LOG_FAIL(kNotInlinedRegisterAllocator)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " cannot be inlined because of the register allocator";
return false;
}
@@ -1496,15 +1667,13 @@
/* is_first_run */ false).Run();
}
- size_t number_of_instructions_budget = kMaximumNumberOfHInstructions;
- size_t number_of_inlined_instructions =
- RunOptimizations(callee_graph, code_item, dex_compilation_unit);
- number_of_instructions_budget += number_of_inlined_instructions;
+ RunOptimizations(callee_graph, code_item, dex_compilation_unit);
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it has an infinite loop";
+ LOG_FAIL(kNotInlinedInfiniteLoop)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it has an infinite loop";
return false;
}
@@ -1513,15 +1682,17 @@
if (predecessor->GetLastInstruction()->IsThrow()) {
if (invoke_instruction->GetBlock()->IsTryBlock()) {
// TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because one branch always throws and"
- << " caller is in a try/catch block";
+ LOG_FAIL(kNotInlinedTryCatch)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because one branch always throws and"
+ << " caller is in a try/catch block";
return false;
} else if (graph_->GetExitBlock() == nullptr) {
// TODO(ngeoffray): Support adding HExit in the caller graph.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because one branch always throws and"
- << " caller does not have an exit block";
+ LOG_FAIL(kNotInlinedInfiniteLoop)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because one branch always throws and"
+ << " caller does not have an exit block";
return false;
} else if (graph_->HasIrreducibleLoops()) {
// TODO(ngeoffray): Support re-computing loop information to graphs with
@@ -1537,32 +1708,31 @@
}
if (!has_one_return) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it always throws";
+ LOG_FAIL(kNotInlinedAlwaysThrows)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it always throws";
return false;
}
size_t number_of_instructions = 0;
-
- bool can_inline_environment =
- total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters;
-
// Skip the entry block, it does not contain instructions that prevent inlining.
for (HBasicBlock* block : callee_graph->GetReversePostOrderSkipEntryBlock()) {
if (block->IsLoopHeader()) {
if (block->GetLoopInformation()->IsIrreducible()) {
// Don't inline methods with irreducible loops, they could prevent some
// optimizations to run.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it contains an irreducible loop";
+ LOG_FAIL(kNotInlinedIrreducibleLoop)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it contains an irreducible loop";
return false;
}
if (!block->GetLoopInformation()->HasExitEdge()) {
// Don't inline methods with loops without exit, since they cause the
// loop information to be computed incorrectly when updating after
// inlining.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it contains a loop with no exit";
+ LOG_FAIL(kNotInlinedLoopWithoutExit)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it contains a loop with no exit";
return false;
}
}
@@ -1570,34 +1740,39 @@
for (HInstructionIterator instr_it(block->GetInstructions());
!instr_it.Done();
instr_it.Advance()) {
- if (number_of_instructions++ == number_of_instructions_budget) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " is not inlined because its caller has reached"
- << " its instruction budget limit.";
+ if (++number_of_instructions >= inlining_budget_) {
+ LOG_FAIL(kNotInlinedInstructionBudget)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " is not inlined because the outer method has reached"
+ << " its instruction budget limit.";
return false;
}
HInstruction* current = instr_it.Current();
- if (!can_inline_environment && current->NeedsEnvironment()) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " is not inlined because its caller has reached"
- << " its environment budget limit.";
+ if (current->NeedsEnvironment() &&
+ (total_number_of_dex_registers_ >= kMaximumNumberOfCumulatedDexRegisters)) {
+ LOG_FAIL(kNotInlinedEnvironmentBudget)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " is not inlined because its caller has reached"
+ << " its environment budget limit.";
return false;
}
if (current->NeedsEnvironment() &&
!CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
resolved_method)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because " << current->DebugName()
- << " needs an environment, is in a different dex file"
- << ", and cannot be encoded in the stack maps.";
+ LOG_FAIL(kNotInlinedStackMaps)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because " << current->DebugName()
+ << " needs an environment, is in a different dex file"
+ << ", and cannot be encoded in the stack maps.";
return false;
}
if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because " << current->DebugName()
- << " it is in a different dex file and requires access to the dex cache";
+ LOG_FAIL(kNotInlinedDexCache)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because " << current->DebugName()
+ << " it is in a different dex file and requires access to the dex cache";
return false;
}
@@ -1606,21 +1781,24 @@
current->IsUnresolvedStaticFieldSet() ||
current->IsUnresolvedInstanceFieldSet()) {
// Entrypoint for unresolved fields does not handle inlined frames.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an unresolved"
- << " entrypoint";
+ LOG_FAIL(kNotInlinedUnresolvedEntrypoint)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it is using an unresolved"
+ << " entrypoint";
return false;
}
}
}
- number_of_inlined_instructions_ += number_of_instructions;
-
DCHECK_EQ(caller_instruction_counter, graph_->GetCurrentInstructionId())
<< "No instructions can be added to the outer graph while inner graph is being built";
+ // Inline the callee graph inside the caller graph.
const int32_t callee_instruction_counter = callee_graph->GetCurrentInstructionId();
graph_->SetCurrentInstructionId(callee_instruction_counter);
*return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+ // Update our budget for other inlining attempts in `caller_graph`.
+ total_number_of_instructions_ += number_of_instructions;
+ UpdateInliningBudget();
DCHECK_EQ(callee_instruction_counter, callee_graph->GetCurrentInstructionId())
<< "No instructions can be added to the inner graph during inlining into the outer graph";
@@ -1633,9 +1811,9 @@
return true;
}
-size_t HInliner::RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
- const DexCompilationUnit& dex_compilation_unit) {
+void HInliner::RunOptimizations(HGraph* callee_graph,
+ const DexFile::CodeItem* code_item,
+ const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
@@ -1657,23 +1835,37 @@
optimization->Run();
}
- size_t number_of_inlined_instructions = 0u;
- if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
- HInliner inliner(callee_graph,
- outermost_graph_,
- codegen_,
- outer_compilation_unit_,
- dex_compilation_unit,
- compiler_driver_,
- handles_,
- inline_stats_,
- total_number_of_dex_registers_ + code_item->registers_size_,
- depth_ + 1);
- inliner.Run();
- number_of_inlined_instructions += inliner.number_of_inlined_instructions_;
+ // Bail early for pathological cases on the environment (for example recursive calls,
+ // or too large environment).
+ if (total_number_of_dex_registers_ >= kMaximumNumberOfCumulatedDexRegisters) {
+ LOG_NOTE() << "Calls in " << callee_graph->GetArtMethod()->PrettyMethod()
+ << " will not be inlined because the outer method has reached"
+ << " its environment budget limit.";
+ return;
}
- return number_of_inlined_instructions;
+ // Bail early if we know we already are over the limit.
+ size_t number_of_instructions = CountNumberOfInstructions(callee_graph);
+ if (number_of_instructions > inlining_budget_) {
+ LOG_NOTE() << "Calls in " << callee_graph->GetArtMethod()->PrettyMethod()
+ << " will not be inlined because the outer method has reached"
+ << " its instruction budget limit. " << number_of_instructions;
+ return;
+ }
+
+ HInliner inliner(callee_graph,
+ outermost_graph_,
+ codegen_,
+ outer_compilation_unit_,
+ dex_compilation_unit,
+ compiler_driver_,
+ handles_,
+ inline_stats_,
+ total_number_of_dex_registers_ + code_item->registers_size_,
+ total_number_of_instructions_ + number_of_instructions,
+ this,
+ depth_ + 1);
+ inliner.Run();
}
static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index a032042..9e4685c 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -42,7 +42,9 @@
VariableSizedHandleScope* handles,
OptimizingCompilerStats* stats,
size_t total_number_of_dex_registers,
- size_t depth)
+ size_t total_number_of_instructions,
+ HInliner* parent,
+ size_t depth = 0)
: HOptimization(outer_graph, kInlinerPassName, stats),
outermost_graph_(outermost_graph),
outer_compilation_unit_(outer_compilation_unit),
@@ -50,8 +52,10 @@
codegen_(codegen),
compiler_driver_(compiler_driver),
total_number_of_dex_registers_(total_number_of_dex_registers),
+ total_number_of_instructions_(total_number_of_instructions),
+ parent_(parent),
depth_(depth),
- number_of_inlined_instructions_(0),
+ inlining_budget_(0),
handles_(handles),
inline_stats_(nullptr) {}
@@ -95,10 +99,10 @@
HInstruction** return_replacement);
// Run simple optimizations on `callee_graph`.
- // Returns the number of inlined instructions.
- size_t RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
- const DexCompilationUnit& dex_compilation_unit);
+ void RunOptimizations(HGraph* callee_graph,
+ const DexFile::CodeItem* code_item,
+ const DexCompilationUnit& dex_compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool TryPatternSubstitution(HInvoke* invoke_instruction,
@@ -259,14 +263,30 @@
HInstruction* return_replacement,
HInstruction* invoke_instruction);
+ // Update the inlining budget based on `total_number_of_instructions_`.
+ void UpdateInliningBudget();
+
+ // Count the number of calls of `method` being inlined recursively.
+ size_t CountRecursiveCallsOf(ArtMethod* method) const;
+
+ // Pretty-print for spaces during logging.
+ std::string DepthString(int line) const;
+
HGraph* const outermost_graph_;
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
CodeGenerator* const codegen_;
CompilerDriver* const compiler_driver_;
const size_t total_number_of_dex_registers_;
+ size_t total_number_of_instructions_;
+
+ // The 'parent' inliner, that means the inlinigng optimization that requested
+ // `graph_` to be inlined.
+ const HInliner* const parent_;
const size_t depth_;
- size_t number_of_inlined_instructions_;
+
+ // The budget left for inlining, in number of instructions.
+ size_t inlining_budget_;
VariableSizedHandleScope* const handles_;
// Used to record stats about optimizations on the inlined graph.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 88f67fa..978c6a2 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -589,6 +589,11 @@
}
static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, CompilerDriver* driver) {
+ // Can be null in unit tests only.
+ if (UNLIKELY(cu == nullptr)) {
+ return false;
+ }
+
Thread* self = Thread::Current();
return cu->IsConstructor()
&& driver->RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
@@ -634,12 +639,9 @@
Primitive::Type type,
uint32_t dex_pc) {
if (type == Primitive::kPrimVoid) {
- if (graph_->ShouldGenerateConstructorBarrier()) {
- // The compilation unit is null during testing.
- if (dex_compilation_unit_ != nullptr) {
- DCHECK(RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_))
- << "Inconsistent use of ShouldGenerateConstructorBarrier. Should not generate a barrier.";
- }
+ // This may insert additional redundant constructor fences from the super constructors.
+ // TODO: remove redundant constructor fences (b/36656456).
+ if (RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)) {
AppendInstruction(new (arena_) HMemoryBarrier(kStoreStore, dex_pc));
}
AppendInstruction(new (arena_) HReturnVoid(dex_pc));
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 17421fc..60790e5 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -2132,6 +2132,9 @@
if (cond->IsConstant()) {
if (cond->AsIntConstant()->IsFalse()) {
// Never deopt: instruction can be removed.
+ if (deoptimize->GuardsAnInput()) {
+ deoptimize->ReplaceWith(deoptimize->GuardedInput());
+ }
deoptimize->GetBlock()->RemoveInstruction(deoptimize);
} else {
// Always deopt.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 28095c4..1006a77 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -41,6 +41,54 @@
using IntrinsicSlowPathARM = IntrinsicSlowPath<InvokeDexCallingConventionVisitorARM>;
+#define __ assembler->
+
+// Compute base address for the System.arraycopy intrinsic in `base`.
+static void GenSystemArrayCopyBaseAddress(ArmAssembler* assembler,
+ Primitive::Type type,
+ const Register& array,
+ const Location& pos,
+ const Register& base) {
+ // This routine is only used by the SystemArrayCopy intrinsic at the
+ // moment. We can allow Primitive::kPrimNot as `type` to implement
+ // the SystemArrayCopyChar intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const uint32_t element_size_shift = Primitive::ComponentSizeShift(type);
+ const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t constant = pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(base, array, element_size * constant + data_offset);
+ } else {
+ __ add(base, array, ShifterOperand(pos.AsRegister<Register>(), LSL, element_size_shift));
+ __ AddConstant(base, data_offset);
+ }
+}
+
+// Compute end address for the System.arraycopy intrinsic in `end`.
+static void GenSystemArrayCopyEndAddress(ArmAssembler* assembler,
+ Primitive::Type type,
+ const Location& copy_length,
+ const Register& base,
+ const Register& end) {
+ // This routine is only used by the SystemArrayCopy intrinsic at the
+ // moment. We can allow Primitive::kPrimNot as `type` to implement
+ // the SystemArrayCopyChar intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const uint32_t element_size_shift = Primitive::ComponentSizeShift(type);
+
+ if (copy_length.IsConstant()) {
+ int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(end, base, element_size * constant);
+ } else {
+ __ add(end, base, ShifterOperand(copy_length.AsRegister<Register>(), LSL, element_size_shift));
+ }
+}
+
+#undef __
+
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
@@ -55,6 +103,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ ArmAssembler* assembler = arm_codegen->GetAssembler();
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
DCHECK(instruction_->IsInvokeStaticOrDirect())
@@ -63,9 +112,8 @@
DCHECK(instruction_->GetLocations()->Intrinsified());
DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
- int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
- uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+ Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
Register dest = locations->InAt(2).AsRegister<Register>();
Location dest_pos = locations->InAt(3);
@@ -76,15 +124,7 @@
__ Bind(GetEntryLabel());
// Compute the base destination address in `dst_curr_addr`.
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ AddConstant(dst_curr_addr, dest, element_size * constant + offset);
- } else {
- __ add(dst_curr_addr,
- dest,
- ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift));
- __ AddConstant(dst_curr_addr, offset);
- }
+ GenSystemArrayCopyBaseAddress(assembler, type, dest, dest_pos, dst_curr_addr);
Label loop;
__ Bind(&loop);
@@ -108,6 +148,8 @@
DCHECK_NE(src_stop_addr, IP);
DCHECK_NE(tmp, IP);
DCHECK(0 <= tmp && tmp < kNumberOfCoreRegisters) << tmp;
+ // TODO: Load the entrypoint once before the loop, instead of
+ // loading it at every iteration.
int32_t entry_point_offset =
CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp);
// This runtime call does not require a stack map.
@@ -228,9 +270,11 @@
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
-static void GenNumberOfLeadingZeros(LocationSummary* locations,
+static void GenNumberOfLeadingZeros(HInvoke* invoke,
Primitive::Type type,
- ArmAssembler* assembler) {
+ CodeGeneratorARM* codegen) {
+ ArmAssembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
Location in = locations->InAt(0);
Register out = locations->Out().AsRegister<Register>();
@@ -240,11 +284,14 @@
Register in_reg_lo = in.AsRegisterPairLow<Register>();
Register in_reg_hi = in.AsRegisterPairHigh<Register>();
Label end;
+ Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ clz(out, in_reg_hi);
- __ CompareAndBranchIfNonZero(in_reg_hi, &end);
+ __ CompareAndBranchIfNonZero(in_reg_hi, final_label);
__ clz(out, in_reg_lo);
__ AddConstant(out, 32);
- __ Bind(&end);
+ if (end.IsLinked()) {
+ __ Bind(&end);
+ }
} else {
__ clz(out, in.AsRegister<Register>());
}
@@ -255,7 +302,7 @@
}
void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+ GenNumberOfLeadingZeros(invoke, Primitive::kPrimInt, codegen_);
}
void IntrinsicLocationsBuilderARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -267,27 +314,32 @@
}
void IntrinsicCodeGeneratorARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+ GenNumberOfLeadingZeros(invoke, Primitive::kPrimLong, codegen_);
}
-static void GenNumberOfTrailingZeros(LocationSummary* locations,
+static void GenNumberOfTrailingZeros(HInvoke* invoke,
Primitive::Type type,
- ArmAssembler* assembler) {
+ CodeGeneratorARM* codegen) {
DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+ ArmAssembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
Register out = locations->Out().AsRegister<Register>();
if (type == Primitive::kPrimLong) {
Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
Label end;
+ Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ rbit(out, in_reg_lo);
__ clz(out, out);
- __ CompareAndBranchIfNonZero(in_reg_lo, &end);
+ __ CompareAndBranchIfNonZero(in_reg_lo, final_label);
__ rbit(out, in_reg_hi);
__ clz(out, out);
__ AddConstant(out, 32);
- __ Bind(&end);
+ if (end.IsLinked()) {
+ __ Bind(&end);
+ }
} else {
Register in = locations->InAt(0).AsRegister<Register>();
__ rbit(out, in);
@@ -304,7 +356,7 @@
}
void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+ GenNumberOfTrailingZeros(invoke, Primitive::kPrimInt, codegen_);
}
void IntrinsicLocationsBuilderARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -316,7 +368,7 @@
}
void IntrinsicCodeGeneratorARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+ GenNumberOfTrailingZeros(invoke, Primitive::kPrimLong, codegen_);
}
static void MathAbsFP(LocationSummary* locations, bool is64bit, ArmAssembler* assembler) {
@@ -1313,6 +1365,7 @@
Label end;
Label return_true;
Label return_false;
+ Label* final_label = codegen_->GetFinalLabel(invoke, &end);
// Get offsets of count, value, and class fields within a string object.
const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
@@ -1386,12 +1439,15 @@
// If loop does not result in returning false, we return true.
__ Bind(&return_true);
__ LoadImmediate(out, 1);
- __ b(&end);
+ __ b(final_label);
// Return false and exit the function.
__ Bind(&return_false);
__ LoadImmediate(out, 0);
- __ Bind(&end);
+
+ if (end.IsLinked()) {
+ __ Bind(&end);
+ }
}
static void GenerateVisitStringIndexOf(HInvoke* invoke,
@@ -1925,138 +1981,113 @@
__ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
}
- int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
- uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
-
- // Compute the base source address in `temp1`.
- if (src_pos.IsConstant()) {
- int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
- __ AddConstant(temp1, src, element_size * constant + offset);
+ if (length.IsConstant() && length.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ // Null constant length: not need to emit the loop code at all.
} else {
- __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, element_size_shift));
- __ AddConstant(temp1, offset);
- }
+ Label done;
+ const Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
- // Compute the end source address in `temp3`.
- if (length.IsConstant()) {
- int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
- __ AddConstant(temp3, temp1, element_size * constant);
- } else {
- __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, element_size_shift));
- }
-
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // TODO: Also convert this intrinsic to the IsGcMarking strategy?
-
- // The base destination address is computed later, as `temp2` is
- // used for intermediate computations.
-
- // SystemArrayCopy implementation for Baker read barriers (see
- // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
- //
- // if (src_ptr != end_ptr) {
- // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // // Slow-path copy.
- // do {
- // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
- // } while (src_ptr != end_ptr)
- // } else {
- // // Fast-path copy.
- // do {
- // *dest_ptr++ = *src_ptr++;
- // } while (src_ptr != end_ptr)
- // }
- // }
-
- Label loop, done;
-
- // Don't enter copy loop if `length == 0`.
- __ cmp(temp1, ShifterOperand(temp3));
- __ b(&done, EQ);
-
- // /* int32_t */ monitor = src->monitor_
- __ LoadFromOffset(kLoadWord, temp2, src, monitor_offset);
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including the rb_state,
- // which shall prevent load-load reordering without using
- // a memory barrier (which would be more expensive).
- // `src` is unchanged by this operation, but its value now depends
- // on `temp2`.
- __ add(src, src, ShifterOperand(temp2, LSR, 32));
-
- // Slow path used to copy array when `src` is gray.
- SlowPathCode* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM(invoke);
- codegen_->AddSlowPath(read_barrier_slow_path);
-
- // Given the numeric representation, it's enough to check the low bit of the
- // rb_state. We do that by shifting the bit out of the lock word with LSRS
- // which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
- // Carry flag is the last bit shifted out by LSRS.
- __ b(read_barrier_slow_path->GetEntryLabel(), CS);
-
- // Fast-path copy.
-
- // Compute the base destination address in `temp2`.
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ AddConstant(temp2, dest, element_size * constant + offset);
- } else {
- __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift));
- __ AddConstant(temp2, offset);
+ if (length.IsRegister()) {
+ // Don't enter the copy loop if the length is null.
+ __ CompareAndBranchIfZero(length.AsRegister<Register>(), &done);
}
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- __ Bind(&loop);
- __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
- __ str(IP, Address(temp2, element_size, Address::PostIndex));
- __ cmp(temp1, ShifterOperand(temp3));
- __ b(&loop, NE);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // TODO: Also convert this intrinsic to the IsGcMarking strategy?
- __ Bind(read_barrier_slow_path->GetExitLabel());
- __ Bind(&done);
- } else {
- // Non read barrier code.
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
+ // if (is_gray) {
+ // // Slow-path copy.
+ // do {
+ // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+ // } while (src_ptr != end_ptr)
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
- // Compute the base destination address in `temp2`.
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ AddConstant(temp2, dest, element_size * constant + offset);
+ // /* int32_t */ monitor = src->monitor_
+ __ LoadFromOffset(kLoadWord, temp2, src, monitor_offset);
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ // Introduce a dependency on the lock_word including the rb_state,
+ // which shall prevent load-load reordering without using
+ // a memory barrier (which would be more expensive).
+ // `src` is unchanged by this operation, but its value now depends
+ // on `temp2`.
+ __ add(src, src, ShifterOperand(temp2, LSR, 32));
+
+ // Compute the base source address in `temp1`.
+ // Note that `temp1` (the base source address) is computed from
+ // `src` (and `src_pos`) here, and thus honors the artificial
+ // dependency of `src` on `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
+ // Compute the end source address in `temp3`.
+ GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
+ // The base destination address is computed later, as `temp2` is
+ // used for intermediate computations.
+
+ // Slow path used to copy array when `src` is gray.
+ // Note that the base destination address is computed in `temp2`
+ // by the slow path code.
+ SlowPathCode* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM(invoke);
+ codegen_->AddSlowPath(read_barrier_slow_path);
+
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with LSRS
+ // which can be a 16-bit instruction unlike the TST immediate.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
+ // Carry flag is the last bit shifted out by LSRS.
+ __ b(read_barrier_slow_path->GetEntryLabel(), CS);
+
+ // Fast-path copy.
+ // Compute the base destination address in `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ Label loop;
+ __ Bind(&loop);
+ __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+ __ str(IP, Address(temp2, element_size, Address::PostIndex));
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&loop, NE);
+
+ __ Bind(read_barrier_slow_path->GetExitLabel());
} else {
- __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift));
- __ AddConstant(temp2, offset);
+ // Non read barrier code.
+ // Compute the base source address in `temp1`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
+ // Compute the base destination address in `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
+ // Compute the end source address in `temp3`.
+ GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ Label loop;
+ __ Bind(&loop);
+ __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+ __ str(IP, Address(temp2, element_size, Address::PostIndex));
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&loop, NE);
}
-
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- Label loop, done;
- __ cmp(temp1, ShifterOperand(temp3));
- __ b(&done, EQ);
- __ Bind(&loop);
- __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
- __ str(IP, Address(temp2, element_size, Address::PostIndex));
- __ cmp(temp1, ShifterOperand(temp3));
- __ b(&loop, NE);
__ Bind(&done);
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1,
- temp2,
- dest,
- Register(kNoRegister),
- /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2474,13 +2505,14 @@
Register dst_ptr = locations->GetTemp(2).AsRegister<Register>();
Label done, compressed_string_loop;
+ Label* final_label = codegen_->GetFinalLabel(invoke, &done);
// dst to be copied.
__ add(dst_ptr, dstObj, ShifterOperand(data_offset));
__ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1));
__ subs(num_chr, srcEnd, ShifterOperand(srcBegin));
// Early out for valid zero-length retrievals.
- __ b(&done, EQ);
+ __ b(final_label, EQ);
// src range to copy.
__ add(src_ptr, srcObj, ShifterOperand(value_offset));
@@ -2517,7 +2549,7 @@
__ b(&loop, GE);
__ adds(num_chr, num_chr, ShifterOperand(4));
- __ b(&done, EQ);
+ __ b(final_label, EQ);
// Main loop for < 4 character case and remainder handling. Loads and stores one
// 16-bit Java character at a time.
@@ -2528,7 +2560,7 @@
__ b(&remainder, GT);
if (mirror::kUseStringCompression) {
- __ b(&done);
+ __ b(final_label);
const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
DCHECK_EQ(c_char_size, 1u);
@@ -2542,7 +2574,9 @@
__ b(&compressed_string_loop, GT);
}
- __ Bind(&done);
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
}
void IntrinsicLocationsBuilderARM::VisitFloatIsInfinite(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 807d6cf..423fd3c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -198,6 +198,8 @@
DCHECK_NE(LocationFrom(src_stop_addr).reg(), IP0);
DCHECK_NE(tmp_.reg(), IP0);
DCHECK(0 <= tmp_.reg() && tmp_.reg() < kNumberOfWRegisters) << tmp_.reg();
+ // TODO: Load the entrypoint once before the loop, instead of
+ // loading it at every iteration.
int32_t entry_point_offset =
CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(tmp_.reg());
// This runtime call does not require a stack map.
@@ -2191,8 +2193,9 @@
}
}
-// Compute base source address, base destination address, and end source address
-// for System.arraycopy* intrinsics.
+// Compute base source address, base destination address, and end
+// source address for System.arraycopy* intrinsics in `src_base`,
+// `dst_base` and `src_end` respectively.
static void GenSystemArrayCopyAddresses(MacroAssembler* masm,
Primitive::Type type,
const Register& src,
@@ -2203,12 +2206,13 @@
const Register& src_base,
const Register& dst_base,
const Register& src_end) {
+ // This routine is used by the SystemArrayCopy and the SystemArrayCopyChar intrinsics.
DCHECK(type == Primitive::kPrimNot || type == Primitive::kPrimChar)
<< "Unexpected element type: " << type;
const int32_t element_size = Primitive::ComponentSize(type);
const int32_t element_size_shift = Primitive::ComponentSizeShift(type);
+ const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
- uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
if (src_pos.IsConstant()) {
int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
__ Add(src_base, src, element_size * constant + data_offset);
@@ -2712,111 +2716,131 @@
__ Cbnz(temp2, intrinsic_slow_path->GetEntryLabel());
}
- Register src_curr_addr = temp1.X();
- Register dst_curr_addr = temp2.X();
- Register src_stop_addr = temp3.X();
-
- GenSystemArrayCopyAddresses(masm,
- Primitive::kPrimNot,
- src,
- src_pos,
- dest,
- dest_pos,
- length,
- src_curr_addr,
- dst_curr_addr,
- src_stop_addr);
-
- const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
-
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // TODO: Also convert this intrinsic to the IsGcMarking strategy?
-
- // SystemArrayCopy implementation for Baker read barriers (see
- // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
- //
- // if (src_ptr != end_ptr) {
- // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // // Slow-path copy.
- // do {
- // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
- // } while (src_ptr != end_ptr)
- // } else {
- // // Fast-path copy.
- // do {
- // *dest_ptr++ = *src_ptr++;
- // } while (src_ptr != end_ptr)
- // }
- // }
-
- vixl::aarch64::Label loop, done;
-
- // Don't enter copy loop if `length == 0`.
- __ Cmp(src_curr_addr, src_stop_addr);
- __ B(&done, eq);
-
- // Make sure `tmp` is not IP0, as it is clobbered by
- // ReadBarrierMarkRegX entry points in
- // ReadBarrierSystemArrayCopySlowPathARM64.
- temps.Exclude(ip0);
- Register tmp = temps.AcquireW();
- DCHECK_NE(LocationFrom(tmp).reg(), IP0);
-
- // /* int32_t */ monitor = src->monitor_
- __ Ldr(tmp, HeapOperand(src.W(), monitor_offset));
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- // `src` is unchanged by this operation, but its value now depends
- // on `tmp`.
- __ Add(src.X(), src.X(), Operand(tmp.X(), LSR, 32));
-
- // Slow path used to copy array when `src` is gray.
- SlowPathCodeARM64* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp));
- codegen_->AddSlowPath(read_barrier_slow_path);
-
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel());
-
- // Fast-path copy.
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- __ Bind(&loop);
- __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
- __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
- __ Cmp(src_curr_addr, src_stop_addr);
- __ B(&loop, ne);
-
- __ Bind(read_barrier_slow_path->GetExitLabel());
- __ Bind(&done);
+ if (length.IsConstant() && length.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ // Null constant length: not need to emit the loop code at all.
} else {
- // Non read barrier code.
+ Register src_curr_addr = temp1.X();
+ Register dst_curr_addr = temp2.X();
+ Register src_stop_addr = temp3.X();
+ vixl::aarch64::Label done;
+ const Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- vixl::aarch64::Label loop, done;
- __ Bind(&loop);
- __ Cmp(src_curr_addr, src_stop_addr);
- __ B(&done, eq);
- {
+ if (length.IsRegister()) {
+ // Don't enter the copy loop if the length is null.
+ __ Cbz(WRegisterFrom(length), &done);
+ }
+
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // TODO: Also convert this intrinsic to the IsGcMarking strategy?
+
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
+ // if (is_gray) {
+ // // Slow-path copy.
+ // do {
+ // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+ // } while (src_ptr != end_ptr)
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
+
+ // Make sure `tmp` is not IP0, as it is clobbered by
+ // ReadBarrierMarkRegX entry points in
+ // ReadBarrierSystemArrayCopySlowPathARM64.
+ temps.Exclude(ip0);
Register tmp = temps.AcquireW();
+ DCHECK_NE(LocationFrom(tmp).reg(), IP0);
+
+ // /* int32_t */ monitor = src->monitor_
+ __ Ldr(tmp, HeapOperand(src.W(), monitor_offset));
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ // Introduce a dependency on the lock_word including rb_state,
+ // to prevent load-load reordering, and without using
+ // a memory barrier (which would be more expensive).
+ // `src` is unchanged by this operation, but its value now depends
+ // on `tmp`.
+ __ Add(src.X(), src.X(), Operand(tmp.X(), LSR, 32));
+
+ // Compute base source address, base destination address, and end
+ // source address for System.arraycopy* intrinsics in `src_base`,
+ // `dst_base` and `src_end` respectively.
+ // Note that `src_curr_addr` is computed from from `src` (and
+ // `src_pos`) here, and thus honors the artificial dependency
+ // of `src` on `tmp`.
+ GenSystemArrayCopyAddresses(masm,
+ type,
+ src,
+ src_pos,
+ dest,
+ dest_pos,
+ length,
+ src_curr_addr,
+ dst_curr_addr,
+ src_stop_addr);
+
+ // Slow path used to copy array when `src` is gray.
+ SlowPathCodeARM64* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp));
+ codegen_->AddSlowPath(read_barrier_slow_path);
+
+ // Given the numeric representation, it's enough to check the low bit of the rb_state.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel());
+
+ // Fast-path copy.
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ vixl::aarch64::Label loop;
+ __ Bind(&loop);
__ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
__ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&loop, ne);
+
+ __ Bind(read_barrier_slow_path->GetExitLabel());
+ } else {
+ // Non read barrier code.
+ // Compute base source address, base destination address, and end
+ // source address for System.arraycopy* intrinsics in `src_base`,
+ // `dst_base` and `src_end` respectively.
+ GenSystemArrayCopyAddresses(masm,
+ type,
+ src,
+ src_pos,
+ dest,
+ dest_pos,
+ length,
+ src_curr_addr,
+ dst_curr_addr,
+ src_stop_addr);
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ vixl::aarch64::Label loop;
+ __ Bind(&loop);
+ {
+ Register tmp = temps.AcquireW();
+ __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
+ __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
+ }
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&loop, ne);
}
- __ B(&loop);
__ Bind(&done);
}
}
+
// We only need one card marking on the destination array.
codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 60bcf2c..0d933ea 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -39,6 +39,7 @@
using helpers::LocationFrom;
using helpers::LowRegisterFrom;
using helpers::LowSRegisterFrom;
+using helpers::HighSRegisterFrom;
using helpers::OutputDRegister;
using helpers::OutputSRegister;
using helpers::OutputRegister;
@@ -117,6 +118,50 @@
DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathARMVIXL);
};
+// Compute base address for the System.arraycopy intrinsic in `base`.
+static void GenSystemArrayCopyBaseAddress(ArmVIXLAssembler* assembler,
+ Primitive::Type type,
+ const vixl32::Register& array,
+ const Location& pos,
+ const vixl32::Register& base) {
+ // This routine is only used by the SystemArrayCopy intrinsic at the
+ // moment. We can allow Primitive::kPrimNot as `type` to implement
+ // the SystemArrayCopyChar intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const uint32_t element_size_shift = Primitive::ComponentSizeShift(type);
+ const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t constant = Int32ConstantFrom(pos);
+ __ Add(base, array, element_size * constant + data_offset);
+ } else {
+ __ Add(base, array, Operand(RegisterFrom(pos), vixl32::LSL, element_size_shift));
+ __ Add(base, base, data_offset);
+ }
+}
+
+// Compute end address for the System.arraycopy intrinsic in `end`.
+static void GenSystemArrayCopyEndAddress(ArmVIXLAssembler* assembler,
+ Primitive::Type type,
+ const Location& copy_length,
+ const vixl32::Register& base,
+ const vixl32::Register& end) {
+ // This routine is only used by the SystemArrayCopy intrinsic at the
+ // moment. We can allow Primitive::kPrimNot as `type` to implement
+ // the SystemArrayCopyChar intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const uint32_t element_size_shift = Primitive::ComponentSizeShift(type);
+
+ if (copy_length.IsConstant()) {
+ int32_t constant = Int32ConstantFrom(copy_length);
+ __ Add(end, base, element_size * constant);
+ } else {
+ __ Add(end, base, Operand(RegisterFrom(copy_length), vixl32::LSL, element_size_shift));
+ }
+}
+
// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
@@ -137,9 +182,8 @@
DCHECK(instruction_->GetLocations()->Intrinsified());
DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
- int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
- uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+ Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
vixl32::Register dest = InputRegisterAt(instruction_, 2);
Location dest_pos = locations->InAt(3);
@@ -150,15 +194,7 @@
__ Bind(GetEntryLabel());
// Compute the base destination address in `dst_curr_addr`.
- if (dest_pos.IsConstant()) {
- int32_t constant = Int32ConstantFrom(dest_pos);
- __ Add(dst_curr_addr, dest, element_size * constant + offset);
- } else {
- __ Add(dst_curr_addr,
- dest,
- Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift));
- __ Add(dst_curr_addr, dst_curr_addr, offset);
- }
+ GenSystemArrayCopyBaseAddress(assembler, type, dest, dest_pos, dst_curr_addr);
vixl32::Label loop;
__ Bind(&loop);
@@ -182,6 +218,8 @@
DCHECK(!src_stop_addr.Is(ip));
DCHECK(!tmp.Is(ip));
DCHECK(tmp.IsRegister()) << tmp;
+ // TODO: Load the entrypoint once before the loop, instead of
+ // loading it at every iteration.
int32_t entry_point_offset =
CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp.GetCode());
// This runtime call does not require a stack map.
@@ -296,9 +334,11 @@
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
-static void GenNumberOfLeadingZeros(LocationSummary* locations,
+static void GenNumberOfLeadingZeros(HInvoke* invoke,
Primitive::Type type,
- ArmVIXLAssembler* assembler) {
+ CodeGeneratorARMVIXL* codegen) {
+ ArmVIXLAssembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
Location in = locations->InAt(0);
vixl32::Register out = RegisterFrom(locations->Out());
@@ -308,11 +348,14 @@
vixl32::Register in_reg_lo = LowRegisterFrom(in);
vixl32::Register in_reg_hi = HighRegisterFrom(in);
vixl32::Label end;
+ vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Clz(out, in_reg_hi);
- __ CompareAndBranchIfNonZero(in_reg_hi, &end, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
__ Clz(out, in_reg_lo);
__ Add(out, out, 32);
- __ Bind(&end);
+ if (end.IsReferenced()) {
+ __ Bind(&end);
+ }
} else {
__ Clz(out, RegisterFrom(in));
}
@@ -323,7 +366,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+ GenNumberOfLeadingZeros(invoke, Primitive::kPrimInt, codegen_);
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -335,27 +378,32 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+ GenNumberOfLeadingZeros(invoke, Primitive::kPrimLong, codegen_);
}
-static void GenNumberOfTrailingZeros(LocationSummary* locations,
+static void GenNumberOfTrailingZeros(HInvoke* invoke,
Primitive::Type type,
- ArmVIXLAssembler* assembler) {
+ CodeGeneratorARMVIXL* codegen) {
DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+ ArmVIXLAssembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
vixl32::Register out = RegisterFrom(locations->Out());
if (type == Primitive::kPrimLong) {
vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
vixl32::Label end;
+ vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Rbit(out, in_reg_lo);
__ Clz(out, out);
- __ CompareAndBranchIfNonZero(in_reg_lo, &end, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
__ Rbit(out, in_reg_hi);
__ Clz(out, out);
__ Add(out, out, 32);
- __ Bind(&end);
+ if (end.IsReferenced()) {
+ __ Bind(&end);
+ }
} else {
vixl32::Register in = RegisterFrom(locations->InAt(0));
__ Rbit(out, in);
@@ -372,7 +420,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+ GenNumberOfTrailingZeros(invoke, Primitive::kPrimInt, codegen_);
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -384,7 +432,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+ GenNumberOfTrailingZeros(invoke, Primitive::kPrimLong, codegen_);
}
static void MathAbsFP(HInvoke* invoke, ArmVIXLAssembler* assembler) {
@@ -465,7 +513,8 @@
GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
}
-static void GenMinMaxFloat(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+static void GenMinMaxFloat(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* codegen) {
+ ArmVIXLAssembler* assembler = codegen->GetAssembler();
Location op1_loc = invoke->GetLocations()->InAt(0);
Location op2_loc = invoke->GetLocations()->InAt(1);
Location out_loc = invoke->GetLocations()->Out();
@@ -483,6 +532,7 @@
const vixl32::Register temp1 = temps.Acquire();
vixl32::Register temp2 = RegisterFrom(invoke->GetLocations()->GetTemp(0));
vixl32::Label nan, done;
+ vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &done);
DCHECK(op1.Is(out));
@@ -499,7 +549,8 @@
__ it(cond);
__ vmov(cond, F32, out, op2);
}
- __ B(ne, &done, /* far_target */ false); // for <>(not equal), we've done min/max calculation.
+ // for <>(not equal), we've done min/max calculation.
+ __ B(ne, final_label, /* far_target */ false);
// handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
__ Vmov(temp1, op1);
@@ -510,14 +561,16 @@
__ And(temp1, temp1, temp2);
}
__ Vmov(out, temp1);
- __ B(&done);
+ __ B(final_label);
// handle NaN input.
__ Bind(&nan);
__ Movt(temp1, High16Bits(kNanFloat)); // 0x7FC0xxxx is a NaN.
__ Vmov(out, temp1);
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
}
static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -535,7 +588,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
- GenMinMaxFloat(invoke, /* is_min */ true, GetAssembler());
+ GenMinMaxFloat(invoke, /* is_min */ true, codegen_);
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -544,10 +597,11 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
- GenMinMaxFloat(invoke, /* is_min */ false, GetAssembler());
+ GenMinMaxFloat(invoke, /* is_min */ false, codegen_);
}
-static void GenMinMaxDouble(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+static void GenMinMaxDouble(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* codegen) {
+ ArmVIXLAssembler* assembler = codegen->GetAssembler();
Location op1_loc = invoke->GetLocations()->InAt(0);
Location op2_loc = invoke->GetLocations()->InAt(1);
Location out_loc = invoke->GetLocations()->Out();
@@ -562,6 +616,7 @@
vixl32::DRegister op2 = DRegisterFrom(op2_loc);
vixl32::DRegister out = OutputDRegister(invoke);
vixl32::Label handle_nan_eq, done;
+ vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &done);
DCHECK(op1.Is(out));
@@ -578,19 +633,22 @@
__ it(cond);
__ vmov(cond, F64, out, op2);
}
- __ B(ne, &done, /* far_target */ false); // for <>(not equal), we've done min/max calculation.
+ // for <>(not equal), we've done min/max calculation.
+ __ B(ne, final_label, /* far_target */ false);
// handle op1 == op2, max(+0.0,-0.0).
if (!is_min) {
__ Vand(F64, out, op1, op2);
- __ B(&done);
+ __ B(final_label);
}
// handle op1 == op2, min(+0.0,-0.0), NaN input.
__ Bind(&handle_nan_eq);
__ Vorr(F64, out, op1, op2); // assemble op1/-0.0/NaN.
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -598,7 +656,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
- GenMinMaxDouble(invoke, /* is_min */ true , GetAssembler());
+ GenMinMaxDouble(invoke, /* is_min */ true , codegen_);
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -606,7 +664,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- GenMinMaxDouble(invoke, /* is_min */ false, GetAssembler());
+ GenMinMaxDouble(invoke, /* is_min */ false, codegen_);
}
static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
@@ -737,6 +795,58 @@
__ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
+void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+
+ ArmVIXLAssembler* assembler = GetAssembler();
+ vixl32::SRegister in_reg = InputSRegisterAt(invoke, 0);
+ vixl32::Register out_reg = OutputRegister(invoke);
+ vixl32::SRegister temp1 = LowSRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ vixl32::SRegister temp2 = HighSRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ vixl32::Label done;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(invoke, &done);
+
+ // Round to nearest integer, ties away from zero.
+ __ Vcvta(S32, F32, temp1, in_reg);
+ __ Vmov(out_reg, temp1);
+
+ // For positive, zero or NaN inputs, rounding is done.
+ __ Cmp(out_reg, 0);
+ __ B(ge, final_label, /* far_target */ false);
+
+ // Handle input < 0 cases.
+ // If input is negative but not a tie, previous result (round to nearest) is valid.
+ // If input is a negative tie, change rounding direction to positive infinity, out_reg += 1.
+ __ Vrinta(F32, F32, temp1, in_reg);
+ __ Vmov(temp2, 0.5);
+ __ Vsub(F32, temp1, in_reg, temp1);
+ __ Vcmp(F32, temp1, temp2);
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ {
+ // Use ExactAsemblyScope here because we are using IT.
+ ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ it(eq);
+ __ add(eq, out_reg, out_reg, 1);
+ }
+
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
+}
+
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
CreateIntToIntLocations(arena_, invoke);
}
@@ -1633,6 +1743,7 @@
vixl32::Label end;
vixl32::Label return_true;
vixl32::Label return_false;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(invoke, &end);
// Get offsets of count, value, and class fields within a string object.
const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
@@ -1709,12 +1820,15 @@
// If loop does not result in returning false, we return true.
__ Bind(&return_true);
__ Mov(out, 1);
- __ B(&end);
+ __ B(final_label);
// Return false and exit the function.
__ Bind(&return_false);
__ Mov(out, 0);
- __ Bind(&end);
+
+ if (end.IsReferenced()) {
+ __ Bind(&end);
+ }
}
static void GenerateVisitStringIndexOf(HInvoke* invoke,
@@ -2243,143 +2357,116 @@
__ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
}
- int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
- uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
-
- // Compute the base source address in `temp1`.
- if (src_pos.IsConstant()) {
- int32_t constant = Int32ConstantFrom(src_pos);
- __ Add(temp1, src, element_size * constant + offset);
+ if (length.IsConstant() && Int32ConstantFrom(length) == 0) {
+ // Null constant length: not need to emit the loop code at all.
} else {
- __ Add(temp1, src, Operand(RegisterFrom(src_pos), vixl32::LSL, element_size_shift));
- __ Add(temp1, temp1, offset);
- }
+ vixl32::Label done;
+ const Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
- // Compute the end source address in `temp3`.
- if (length.IsConstant()) {
- int32_t constant = Int32ConstantFrom(length);
- __ Add(temp3, temp1, element_size * constant);
- } else {
- __ Add(temp3, temp1, Operand(RegisterFrom(length), vixl32::LSL, element_size_shift));
- }
+ if (length.IsRegister()) {
+ // Don't enter the copy loop if the length is null.
+ __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
+ }
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // TODO: Also convert this intrinsic to the IsGcMarking strategy?
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // TODO: Also convert this intrinsic to the IsGcMarking strategy?
- // The base destination address is computed later, as `temp2` is
- // used for intermediate computations.
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
+ // if (is_gray) {
+ // // Slow-path copy.
+ // do {
+ // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+ // } while (src_ptr != end_ptr)
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
- // SystemArrayCopy implementation for Baker read barriers (see
- // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
- //
- // if (src_ptr != end_ptr) {
- // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // // Slow-path copy.
- // do {
- // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
- // } while (src_ptr != end_ptr)
- // } else {
- // // Fast-path copy.
- // do {
- // *dest_ptr++ = *src_ptr++;
- // } while (src_ptr != end_ptr)
- // }
- // }
+ // /* int32_t */ monitor = src->monitor_
+ __ Ldr(temp2, MemOperand(src, monitor_offset));
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
- vixl32::Label loop, done;
+ // Introduce a dependency on the lock_word including the rb_state,
+ // which shall prevent load-load reordering without using
+ // a memory barrier (which would be more expensive).
+ // `src` is unchanged by this operation, but its value now depends
+ // on `temp2`.
+ __ Add(src, src, Operand(temp2, vixl32::LSR, 32));
- // Don't enter copy loop if `length == 0`.
- __ Cmp(temp1, temp3);
- __ B(eq, &done, /* far_target */ false);
+ // Compute the base source address in `temp1`.
+ // Note that `temp1` (the base source address) is computed from
+ // `src` (and `src_pos`) here, and thus honors the artificial
+ // dependency of `src` on `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
+ // Compute the end source address in `temp3`.
+ GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
+ // The base destination address is computed later, as `temp2` is
+ // used for intermediate computations.
- // /* int32_t */ monitor = src->monitor_
- __ Ldr(temp2, MemOperand(src, monitor_offset));
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
+ // Slow path used to copy array when `src` is gray.
+ // Note that the base destination address is computed in `temp2`
+ // by the slow path code.
+ SlowPathCodeARMVIXL* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
+ codegen_->AddSlowPath(read_barrier_slow_path);
- // Introduce a dependency on the lock_word including the rb_state,
- // which shall prevent load-load reordering without using
- // a memory barrier (which would be more expensive).
- // `src` is unchanged by this operation, but its value now depends
- // on `temp2`.
- __ Add(src, src, Operand(temp2, vixl32::LSR, 32));
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with LSRS
+ // which can be a 16-bit instruction unlike the TST immediate.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
+ // Carry flag is the last bit shifted out by LSRS.
+ __ B(cs, read_barrier_slow_path->GetEntryLabel());
- // Slow path used to copy array when `src` is gray.
- SlowPathCodeARMVIXL* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
- codegen_->AddSlowPath(read_barrier_slow_path);
+ // Fast-path copy.
+ // Compute the base destination address in `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ vixl32::Label loop;
+ __ Bind(&loop);
+ {
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ const vixl32::Register temp_reg = temps.Acquire();
+ __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
+ __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
+ }
+ __ Cmp(temp1, temp3);
+ __ B(ne, &loop, /* far_target */ false);
- // Given the numeric representation, it's enough to check the low bit of the
- // rb_state. We do that by shifting the bit out of the lock word with LSRS
- // which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
- // Carry flag is the last bit shifted out by LSRS.
- __ B(cs, read_barrier_slow_path->GetEntryLabel());
-
- // Fast-path copy.
-
- // Compute the base destination address in `temp2`.
- if (dest_pos.IsConstant()) {
- int32_t constant = Int32ConstantFrom(dest_pos);
- __ Add(temp2, dest, element_size * constant + offset);
+ __ Bind(read_barrier_slow_path->GetExitLabel());
} else {
- __ Add(temp2, dest, Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift));
- __ Add(temp2, temp2, offset);
+ // Non read barrier code.
+ // Compute the base source address in `temp1`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
+ // Compute the base destination address in `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
+ // Compute the end source address in `temp3`.
+ GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ vixl32::Label loop;
+ __ Bind(&loop);
+ {
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ const vixl32::Register temp_reg = temps.Acquire();
+ __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
+ __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
+ }
+ __ Cmp(temp1, temp3);
+ __ B(ne, &loop, /* far_target */ false);
}
-
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- __ Bind(&loop);
-
- {
- UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
- const vixl32::Register temp_reg = temps.Acquire();
-
- __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
- __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
- }
-
- __ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
-
- __ Bind(read_barrier_slow_path->GetExitLabel());
- __ Bind(&done);
- } else {
- // Non read barrier code.
-
- // Compute the base destination address in `temp2`.
- if (dest_pos.IsConstant()) {
- int32_t constant = Int32ConstantFrom(dest_pos);
- __ Add(temp2, dest, element_size * constant + offset);
- } else {
- __ Add(temp2, dest, Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift));
- __ Add(temp2, temp2, offset);
- }
-
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- vixl32::Label loop, done;
- __ Cmp(temp1, temp3);
- __ B(eq, &done, /* far_target */ false);
- __ Bind(&loop);
-
- {
- UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
- const vixl32::Register temp_reg = temps.Acquire();
-
- __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
- __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
- }
-
- __ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
__ Bind(&done);
}
@@ -2779,13 +2866,14 @@
vixl32::Register dst_ptr = RegisterFrom(locations->GetTemp(2));
vixl32::Label done, compressed_string_loop;
+ vixl32::Label* final_label = codegen_->GetFinalLabel(invoke, &done);
// dst to be copied.
__ Add(dst_ptr, dstObj, data_offset);
__ Add(dst_ptr, dst_ptr, Operand(dstBegin, vixl32::LSL, 1));
__ Subs(num_chr, srcEnd, srcBegin);
// Early out for valid zero-length retrievals.
- __ B(eq, &done, /* far_target */ false);
+ __ B(eq, final_label, /* far_target */ false);
// src range to copy.
__ Add(src_ptr, srcObj, value_offset);
@@ -2829,7 +2917,7 @@
__ B(ge, &loop, /* far_target */ false);
__ Adds(num_chr, num_chr, 4);
- __ B(eq, &done, /* far_target */ false);
+ __ B(eq, final_label, /* far_target */ false);
// Main loop for < 4 character case and remainder handling. Loads and stores one
// 16-bit Java character at a time.
@@ -2842,7 +2930,7 @@
__ B(gt, &remainder, /* far_target */ false);
if (mirror::kUseStringCompression) {
- __ B(&done);
+ __ B(final_label);
const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
DCHECK_EQ(c_char_size, 1u);
@@ -2858,7 +2946,9 @@
__ B(gt, &compressed_string_loop, /* far_target */ false);
}
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -3063,7 +3153,6 @@
}
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerHighestOneBit)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index ba006ed..900b00e 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1514,21 +1514,31 @@
Thread::PeerOffset<kMipsPointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- bool can_call =
- invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
- invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile;
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+ HInvoke* invoke,
+ Primitive::Type type) {
+ bool can_call = kEmitCompilerReadBarrier &&
+ (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+ invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
LocationSummary* locations = new (arena) LocationSummary(invoke,
- can_call ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall,
+ (can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall),
kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ locations->SetOut(Location::RequiresRegister(),
+ (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
+ if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in InstructionCodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
+// Note that the caller must supply a properly aligned memory address.
+// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
static void GenUnsafeGet(HInvoke* invoke,
Primitive::Type type,
bool is_volatile,
@@ -1539,49 +1549,109 @@
(type == Primitive::kPrimLong) ||
(type == Primitive::kPrimNot)) << type;
MipsAssembler* assembler = codegen->GetAssembler();
+ // Target register.
+ Location trg_loc = locations->Out();
// Object pointer.
- Register base = locations->InAt(1).AsRegister<Register>();
+ Location base_loc = locations->InAt(1);
+ Register base = base_loc.AsRegister<Register>();
// The "offset" argument is passed as a "long". Since this code is for
// a 32-bit processor, we can only use 32-bit addresses, so we only
// need the low 32-bits of offset.
- Register offset_lo = invoke->GetLocations()->InAt(2).AsRegisterPairLow<Register>();
+ Location offset_loc = locations->InAt(2);
+ Register offset_lo = offset_loc.AsRegisterPairLow<Register>();
- __ Addu(TMP, base, offset_lo);
- if (is_volatile) {
- __ Sync(0);
+ if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == Primitive::kPrimNot))) {
+ __ Addu(TMP, base, offset_lo);
}
- if (type == Primitive::kPrimLong) {
- Register trg_lo = locations->Out().AsRegisterPairLow<Register>();
- Register trg_hi = locations->Out().AsRegisterPairHigh<Register>();
- if (is_R6) {
- __ Lw(trg_lo, TMP, 0);
- __ Lw(trg_hi, TMP, 4);
- } else {
- __ Lwr(trg_lo, TMP, 0);
- __ Lwl(trg_lo, TMP, 3);
- __ Lwr(trg_hi, TMP, 4);
- __ Lwl(trg_hi, TMP, 7);
- }
- } else {
- Register trg = locations->Out().AsRegister<Register>();
-
- if (is_R6) {
- __ Lw(trg, TMP, 0);
- } else {
- __ Lwr(trg, TMP, 0);
- __ Lwl(trg, TMP, 3);
+ switch (type) {
+ case Primitive::kPrimLong: {
+ Register trg_lo = trg_loc.AsRegisterPairLow<Register>();
+ Register trg_hi = trg_loc.AsRegisterPairHigh<Register>();
+ CHECK(!is_volatile); // TODO: support atomic 8-byte volatile loads.
+ if (is_R6) {
+ __ Lw(trg_lo, TMP, 0);
+ __ Lw(trg_hi, TMP, 4);
+ } else {
+ __ Lwr(trg_lo, TMP, 0);
+ __ Lwl(trg_lo, TMP, 3);
+ __ Lwr(trg_hi, TMP, 4);
+ __ Lwl(trg_hi, TMP, 7);
+ }
+ break;
}
- if (type == Primitive::kPrimNot) {
- __ MaybeUnpoisonHeapReference(trg);
+ case Primitive::kPrimInt: {
+ Register trg = trg_loc.AsRegister<Register>();
+ if (is_R6) {
+ __ Lw(trg, TMP, 0);
+ } else {
+ __ Lwr(trg, TMP, 0);
+ __ Lwl(trg, TMP, 3);
+ }
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ break;
}
+
+ case Primitive::kPrimNot: {
+ Register trg = trg_loc.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
+ trg_loc,
+ base,
+ /* offset */ 0U,
+ /* index */ offset_loc,
+ TIMES_1,
+ temp,
+ /* needs_null_check */ false);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ } else {
+ if (is_R6) {
+ __ Lw(trg, TMP, 0);
+ } else {
+ __ Lwr(trg, TMP, 0);
+ __ Lwl(trg, TMP, 3);
+ }
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ codegen->GenerateReadBarrierSlow(invoke,
+ trg_loc,
+ trg_loc,
+ base_loc,
+ /* offset */ 0U,
+ /* index */ offset_loc);
+ }
+ } else {
+ if (is_R6) {
+ __ Lw(trg, TMP, 0);
+ } else {
+ __ Lwr(trg, TMP, 0);
+ __ Lwl(trg, TMP, 3);
+ }
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ __ MaybeUnpoisonHeapReference(trg);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type " << type;
+ UNREACHABLE();
}
}
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
@@ -1590,7 +1660,7 @@
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1599,25 +1669,16 @@
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, IsR6(), codegen_);
}
-// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, IsR6(), codegen_);
-}
-
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1626,7 +1687,7 @@
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
@@ -1643,6 +1704,8 @@
locations->SetInAt(3, Location::RequiresRegister());
}
+// Note that the caller must supply a properly aligned memory address.
+// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
static void GenUnsafePut(LocationSummary* locations,
Primitive::Type type,
bool is_volatile,
@@ -1681,7 +1744,7 @@
} else {
Register value_lo = locations->InAt(3).AsRegisterPairLow<Register>();
Register value_hi = locations->InAt(3).AsRegisterPairHigh<Register>();
-
+ CHECK(!is_volatile); // TODO: support atomic 8-byte volatile stores.
if (is_R6) {
__ Sw(value_lo, TMP, 0);
__ Sw(value_hi, TMP, 4);
@@ -1815,50 +1878,71 @@
codegen_);
}
-// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
- Primitive::kPrimLong,
- /* is_volatile */ true,
- /* is_ordered */ false,
- IsR6(),
- codegen_);
-}
-
-static void CreateIntIntIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+ bool can_call = kEmitCompilerReadBarrier &&
+ kUseBakerReadBarrier &&
+ (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
+ (can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall),
kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
-
locations->SetOut(Location::RequiresRegister());
+
+ // Temporary register used in CAS by (Baker) read barrier.
+ if (can_call) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
-static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorMIPS* codegen) {
+// Note that the caller must supply a properly aligned memory address.
+// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* codegen) {
MipsAssembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
bool isR6 = codegen->GetInstructionSetFeatures().IsR6();
Register base = locations->InAt(1).AsRegister<Register>();
- Register offset_lo = locations->InAt(2).AsRegisterPairLow<Register>();
+ Location offset_loc = locations->InAt(2);
+ Register offset_lo = offset_loc.AsRegisterPairLow<Register>();
Register expected = locations->InAt(3).AsRegister<Register>();
Register value = locations->InAt(4).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
DCHECK_NE(base, out);
DCHECK_NE(offset_lo, out);
DCHECK_NE(expected, out);
if (type == Primitive::kPrimNot) {
- // Mark card for object assuming new value is stored.
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+ // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
+ // object and scan the receiver at the next GC for nothing.
bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(base, value, value_can_be_null);
+
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Need to make sure the reference stored in the field is a to-space
+ // one before attempting the CAS or the CAS could fail incorrectly.
+ codegen->GenerateReferenceLoadWithBakerReadBarrier(
+ invoke,
+ out_loc, // Unused, used only as a "temporary" within the read barrier.
+ base,
+ /* offset */ 0u,
+ /* index */ offset_loc,
+ ScaleFactor::TIMES_1,
+ temp,
+ /* needs_null_check */ false,
+ /* always_update_field */ true);
+ }
}
MipsLabel loop_head, exit_loop;
@@ -1926,20 +2010,30 @@
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
- GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+ GenCas(invoke, Primitive::kPrimInt, codegen_);
}
// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntLocations(arena_, invoke);
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ return;
+ }
+
+ CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
- GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+ GenCas(invoke, Primitive::kPrimNot, codegen_);
}
// int java.lang.String.compareTo(String anotherString)
@@ -2559,7 +2653,7 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2567,17 +2661,9 @@
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // We will call memcpy() to do the actual work. Allocate the temporary
- // registers to use the correct input registers, and output register.
- // memcpy() uses the normal MIPS calling convention.
- InvokeRuntimeCallingConvention calling_convention;
-
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-
- Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
- locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -2596,16 +2682,11 @@
Register dstBegin = locations->InAt(4).AsRegister<Register>();
Register dstPtr = locations->GetTemp(0).AsRegister<Register>();
- DCHECK_EQ(dstPtr, A0);
Register srcPtr = locations->GetTemp(1).AsRegister<Register>();
- DCHECK_EQ(srcPtr, A1);
Register numChrs = locations->GetTemp(2).AsRegister<Register>();
- DCHECK_EQ(numChrs, A2);
-
- Register dstReturn = locations->GetTemp(3).AsRegister<Register>();
- DCHECK_EQ(dstReturn, V0);
MipsLabel done;
+ MipsLabel loop;
// Location of data in char array buffer.
const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
@@ -2620,12 +2701,7 @@
// Calculate destination address.
__ Addiu(dstPtr, dstObj, data_offset);
- if (IsR6()) {
- __ Lsa(dstPtr, dstBegin, dstPtr, char_shift);
- } else {
- __ Sll(AT, dstBegin, char_shift);
- __ Addu(dstPtr, dstPtr, AT);
- }
+ __ ShiftAndAdd(dstPtr, dstBegin, dstPtr, char_shift);
if (mirror::kUseStringCompression) {
MipsLabel uncompressed_copy, compressed_loop;
@@ -2634,7 +2710,7 @@
__ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
__ Sll(TMP, TMP, 31);
- // If string is uncompressed, use memcpy() path.
+ // If string is uncompressed, use uncompressed path.
__ Bnez(TMP, &uncompressed_copy);
// Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
@@ -2653,17 +2729,15 @@
// Calculate source address.
__ Addiu(srcPtr, srcObj, value_offset);
- if (IsR6()) {
- __ Lsa(srcPtr, srcBegin, srcPtr, char_shift);
- } else {
- __ Sll(AT, srcBegin, char_shift);
- __ Addu(srcPtr, srcPtr, AT);
- }
+ __ ShiftAndAdd(srcPtr, srcBegin, srcPtr, char_shift);
- // Calculate number of bytes to copy from number of characters.
- __ Sll(numChrs, numChrs, char_shift);
-
- codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+ __ Bind(&loop);
+ __ Lh(AT, srcPtr, 0);
+ __ Addiu(numChrs, numChrs, -1);
+ __ Addiu(srcPtr, srcPtr, char_size);
+ __ Sh(AT, dstPtr, 0);
+ __ Addiu(dstPtr, dstPtr, char_size);
+ __ Bnez(numChrs, &loop);
__ Bind(&done);
}
@@ -2674,6 +2748,8 @@
UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile);
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile);
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 08b6fef..82d0567 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -834,15 +834,15 @@
__ Bnezc(AT, &done);
// Long outLong = floor/ceil(in);
- // if outLong == Long.MAX_VALUE {
+ // if (outLong == Long.MAX_VALUE) || (outLong == Long.MIN_VALUE) {
// // floor()/ceil() has almost certainly returned a value
// // which can't be successfully represented as a signed
// // 64-bit number. Java expects that the input value will
// // be returned in these cases.
// // There is also a small probability that floor(in)/ceil(in)
// // correctly truncates/rounds up the input value to
- // // Long.MAX_VALUE. In that case, this exception handling
- // // code still does the correct thing.
+ // // Long.MAX_VALUE or Long.MIN_VALUE. In these cases, this
+ // // exception handling code still does the correct thing.
// return in;
// }
if (mode == kFloor) {
@@ -852,8 +852,14 @@
}
__ Dmfc1(AT, out);
__ MovD(out, in);
- __ LoadConst64(TMP, kPrimLongMax);
- __ Beqc(AT, TMP, &done);
+ __ Daddiu(TMP, AT, 1);
+ __ Dati(TMP, 0x8000); // TMP = AT + 0x8000 0000 0000 0001
+ // or AT - 0x7FFF FFFF FFFF FFFF.
+ // IOW, TMP = 1 if AT = Long.MIN_VALUE
+ // or TMP = 0 if AT = Long.MAX_VALUE.
+ __ Dsrl(TMP, TMP, 1); // TMP = 0 if AT = Long.MIN_VALUE
+ // or AT = Long.MAX_VALUE.
+ __ Beqzc(TMP, &done);
// double out = outLong;
// return out;
@@ -1151,16 +1157,31 @@
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+ HInvoke* invoke,
+ Primitive::Type type) {
+ bool can_call = kEmitCompilerReadBarrier &&
+ (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+ invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
+ (can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall),
kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ locations->SetOut(Location::RequiresRegister(),
+ (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
+ if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in InstructionCodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
+// Note that the caller must supply a properly aligned memory address.
+// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
static void GenUnsafeGet(HInvoke* invoke,
Primitive::Type type,
bool is_volatile,
@@ -1168,30 +1189,71 @@
LocationSummary* locations = invoke->GetLocations();
DCHECK((type == Primitive::kPrimInt) ||
(type == Primitive::kPrimLong) ||
- (type == Primitive::kPrimNot));
+ (type == Primitive::kPrimNot)) << type;
Mips64Assembler* assembler = codegen->GetAssembler();
+ // Target register.
+ Location trg_loc = locations->Out();
+ GpuRegister trg = trg_loc.AsRegister<GpuRegister>();
// Object pointer.
- GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ Location base_loc = locations->InAt(1);
+ GpuRegister base = base_loc.AsRegister<GpuRegister>();
// Long offset.
- GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
- GpuRegister trg = locations->Out().AsRegister<GpuRegister>();
+ Location offset_loc = locations->InAt(2);
+ GpuRegister offset = offset_loc.AsRegister<GpuRegister>();
- __ Daddu(TMP, base, offset);
- if (is_volatile) {
- __ Sync(0);
+ if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == Primitive::kPrimNot))) {
+ __ Daddu(TMP, base, offset);
}
+
switch (type) {
+ case Primitive::kPrimLong:
+ __ Ld(trg, TMP, 0);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ break;
+
case Primitive::kPrimInt:
__ Lw(trg, TMP, 0);
+ if (is_volatile) {
+ __ Sync(0);
+ }
break;
case Primitive::kPrimNot:
- __ Lwu(trg, TMP, 0);
- __ MaybeUnpoisonHeapReference(trg);
- break;
-
- case Primitive::kPrimLong:
- __ Ld(trg, TMP, 0);
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
+ trg_loc,
+ base,
+ /* offset */ 0U,
+ /* index */ offset_loc,
+ TIMES_1,
+ temp,
+ /* needs_null_check */ false);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ } else {
+ __ Lwu(trg, TMP, 0);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ codegen->GenerateReadBarrierSlow(invoke,
+ trg_loc,
+ trg_loc,
+ base_loc,
+ /* offset */ 0U,
+ /* index */ offset_loc);
+ }
+ } else {
+ __ Lwu(trg, TMP, 0);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ __ MaybeUnpoisonHeapReference(trg);
+ }
break;
default:
@@ -1202,7 +1264,7 @@
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1211,7 +1273,7 @@
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1220,7 +1282,7 @@
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1229,7 +1291,7 @@
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
@@ -1238,7 +1300,7 @@
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1247,7 +1309,7 @@
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
@@ -1264,6 +1326,8 @@
locations->SetInAt(3, Location::RequiresRegister());
}
+// Note that the caller must supply a properly aligned memory address.
+// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
static void GenUnsafePut(LocationSummary* locations,
Primitive::Type type,
bool is_volatile,
@@ -1429,35 +1493,70 @@
codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+ bool can_call = kEmitCompilerReadBarrier &&
+ kUseBakerReadBarrier &&
+ (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
+ (can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall),
kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
-
locations->SetOut(Location::RequiresRegister());
+
+ // Temporary register used in CAS by (Baker) read barrier.
+ if (can_call) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
-static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorMIPS64* codegen) {
+// Note that the caller must supply a properly aligned memory address.
+// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* codegen) {
Mips64Assembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
- GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ Location offset_loc = locations->InAt(2);
+ GpuRegister offset = offset_loc.AsRegister<GpuRegister>();
GpuRegister expected = locations->InAt(3).AsRegister<GpuRegister>();
GpuRegister value = locations->InAt(4).AsRegister<GpuRegister>();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ Location out_loc = locations->Out();
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
DCHECK_NE(base, out);
DCHECK_NE(offset, out);
DCHECK_NE(expected, out);
if (type == Primitive::kPrimNot) {
- // Mark card for object assuming new value is stored.
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+ // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
+ // object and scan the receiver at the next GC for nothing.
bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(base, value, value_can_be_null);
+
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Need to make sure the reference stored in the field is a to-space
+ // one before attempting the CAS or the CAS could fail incorrectly.
+ codegen->GenerateReferenceLoadWithBakerReadBarrier(
+ invoke,
+ out_loc, // Unused, used only as a "temporary" within the read barrier.
+ base,
+ /* offset */ 0u,
+ /* index */ offset_loc,
+ ScaleFactor::TIMES_1,
+ temp,
+ /* needs_null_check */ false,
+ /* always_update_field */ true);
+ }
}
Mips64Label loop_head, exit_loop;
@@ -1521,29 +1620,39 @@
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
- GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+ GenCas(invoke, Primitive::kPrimInt, codegen_);
}
// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
- GenCas(invoke->GetLocations(), Primitive::kPrimLong, codegen_);
+ GenCas(invoke, Primitive::kPrimLong, codegen_);
}
// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke);
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ return;
+ }
+
+ CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
- GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+ GenCas(invoke, Primitive::kPrimNot, codegen_);
}
// int java.lang.String.compareTo(String anotherString)
@@ -1895,7 +2004,7 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1903,17 +2012,9 @@
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // We will call memcpy() to do the actual work. Allocate the temporary
- // registers to use the correct input registers, and output register.
- // memcpy() uses the normal MIPS calling conventions.
- InvokeRuntimeCallingConvention calling_convention;
-
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-
- Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimLong);
- locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -1932,16 +2033,11 @@
GpuRegister dstBegin = locations->InAt(4).AsRegister<GpuRegister>();
GpuRegister dstPtr = locations->GetTemp(0).AsRegister<GpuRegister>();
- DCHECK_EQ(dstPtr, A0);
GpuRegister srcPtr = locations->GetTemp(1).AsRegister<GpuRegister>();
- DCHECK_EQ(srcPtr, A1);
GpuRegister numChrs = locations->GetTemp(2).AsRegister<GpuRegister>();
- DCHECK_EQ(numChrs, A2);
-
- GpuRegister dstReturn = locations->GetTemp(3).AsRegister<GpuRegister>();
- DCHECK_EQ(dstReturn, V0);
Mips64Label done;
+ Mips64Label loop;
// Location of data in char array buffer.
const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
@@ -1965,7 +2061,7 @@
__ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
__ Dext(TMP, TMP, 0, 1);
- // If string is uncompressed, use memcpy() path.
+ // If string is uncompressed, use uncompressed path.
__ Bnezc(TMP, &uncompressed_copy);
// Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
@@ -1986,10 +2082,13 @@
__ Daddiu(srcPtr, srcObj, value_offset);
__ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
- // Calculate number of bytes to copy from number of characters.
- __ Dsll(numChrs, numChrs, char_shift);
-
- codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+ __ Bind(&loop);
+ __ Lh(AT, srcPtr, 0);
+ __ Daddiu(numChrs, numChrs, -1);
+ __ Daddiu(srcPtr, srcPtr, char_size);
+ __ Sh(AT, dstPtr, 0);
+ __ Daddiu(dstPtr, dstPtr, char_size);
+ __ Bnezc(numChrs, &loop);
__ Bind(&done);
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index a671788..ecf919b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2878,6 +2878,49 @@
return instruction->InputAt(input0) == instruction->InputAt(input1);
}
+// Compute base address for the System.arraycopy intrinsic in `base`.
+static void GenSystemArrayCopyBaseAddress(X86Assembler* assembler,
+ Primitive::Type type,
+ const Register& array,
+ const Location& pos,
+ const Register& base) {
+ // This routine is only used by the SystemArrayCopy intrinsic at the
+ // moment. We can allow Primitive::kPrimNot as `type` to implement
+ // the SystemArrayCopyChar intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type));
+ const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t constant = pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(base, Address(array, element_size * constant + data_offset));
+ } else {
+ __ leal(base, Address(array, pos.AsRegister<Register>(), scale_factor, data_offset));
+ }
+}
+
+// Compute end source address for the System.arraycopy intrinsic in `end`.
+static void GenSystemArrayCopyEndAddress(X86Assembler* assembler,
+ Primitive::Type type,
+ const Location& copy_length,
+ const Register& base,
+ const Register& end) {
+ // This routine is only used by the SystemArrayCopy intrinsic at the
+ // moment. We can allow Primitive::kPrimNot as `type` to implement
+ // the SystemArrayCopyChar intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type));
+
+ if (copy_length.IsConstant()) {
+ int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(end, Address(base, element_size * constant));
+ } else {
+ __ leal(end, Address(base, copy_length.AsRegister<Register>(), scale_factor, 0));
+ }
+}
+
void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
@@ -3182,16 +3225,11 @@
__ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
+ const Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
+
// Compute the base source address in `temp1`.
- int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- DCHECK_EQ(element_size, 4);
- uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
- if (src_pos.IsConstant()) {
- int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp1, Address(src, element_size * constant + offset));
- } else {
- __ leal(temp1, Address(src, src_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
- }
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// If it is needed (in the case of the fast-path loop), the base
@@ -3199,20 +3237,15 @@
// intermediate computations.
// Compute the end source address in `temp3`.
- if (length.IsConstant()) {
- int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp3, Address(temp1, element_size * constant));
- } else {
- if (length.IsStackSlot()) {
- // Location `length` is again pointing at a stack slot, as
- // register `temp3` (which was containing the length parameter
- // earlier) has been overwritten; restore it now
- DCHECK(length.Equals(length_arg));
- __ movl(temp3, Address(ESP, length.GetStackIndex()));
- length = Location::RegisterLocation(temp3);
- }
- __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0));
+ if (length.IsStackSlot()) {
+ // Location `length` is again pointing at a stack slot, as
+ // register `temp3` (which was containing the length parameter
+ // earlier) has been overwritten; restore it now
+ DCHECK(length.Equals(length_arg));
+ __ movl(temp3, Address(ESP, length.GetStackIndex()));
+ length = Location::RegisterLocation(temp3);
}
+ GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
// SystemArrayCopy implementation for Baker read barriers (see
// also CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier):
@@ -3266,15 +3299,8 @@
__ j(kNotZero, read_barrier_slow_path->GetEntryLabel());
// Fast-path copy.
-
- // Set the base destination address in `temp2`.
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp2, Address(dest, element_size * constant + offset));
- } else {
- __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
- }
-
+ // Compute the base destination address in `temp2`.
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
// Iterate over the arrays and do a raw copy of the objects. We don't need to
// poison/unpoison.
__ Bind(&loop);
@@ -3291,23 +3317,10 @@
__ Bind(&done);
} else {
// Non read barrier code.
-
// Compute the base destination address in `temp2`.
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp2, Address(dest, element_size * constant + offset));
- } else {
- __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
- }
-
+ GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
// Compute the end source address in `temp3`.
- if (length.IsConstant()) {
- int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp3, Address(temp1, element_size * constant));
- } else {
- __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0));
- }
-
+ GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
// Iterate over the arrays and do a raw copy of the objects. We don't need to
// poison/unpoison.
NearLabel loop, done;
@@ -3326,11 +3339,7 @@
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1,
- temp2,
- dest,
- Register(kNoRegister),
- /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 1e17c6e..13956df 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1118,6 +1118,47 @@
CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
}
+// Compute base source address, base destination address, and end
+// source address for the System.arraycopy intrinsic in `src_base`,
+// `dst_base` and `src_end` respectively.
+static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler,
+ Primitive::Type type,
+ const CpuRegister& src,
+ const Location& src_pos,
+ const CpuRegister& dst,
+ const Location& dst_pos,
+ const Location& copy_length,
+ const CpuRegister& src_base,
+ const CpuRegister& dst_base,
+ const CpuRegister& src_end) {
+ // This routine is only used by the SystemArrayCopy intrinsic.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type));
+ const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ if (src_pos.IsConstant()) {
+ int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(src_base, Address(src, element_size * constant + data_offset));
+ } else {
+ __ leal(src_base, Address(src, src_pos.AsRegister<CpuRegister>(), scale_factor, data_offset));
+ }
+
+ if (dst_pos.IsConstant()) {
+ int32_t constant = dst_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(dst_base, Address(dst, element_size * constant + data_offset));
+ } else {
+ __ leal(dst_base, Address(dst, dst_pos.AsRegister<CpuRegister>(), scale_factor, data_offset));
+ }
+
+ if (copy_length.IsConstant()) {
+ int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(src_end, Address(src_base, element_size * constant));
+ } else {
+ __ leal(src_end, Address(src_base, copy_length.AsRegister<CpuRegister>(), scale_factor, 0));
+ }
+}
+
void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
@@ -1366,30 +1407,13 @@
__ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
- // Compute base source address, base destination address, and end source address.
+ const Primitive::Type type = Primitive::kPrimNot;
+ const int32_t element_size = Primitive::ComponentSize(type);
- int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
- if (src_pos.IsConstant()) {
- int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp1, Address(src, element_size * constant + offset));
- } else {
- __ leal(temp1, Address(src, src_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset));
- }
-
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp2, Address(dest, element_size * constant + offset));
- } else {
- __ leal(temp2, Address(dest, dest_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset));
- }
-
- if (length.IsConstant()) {
- int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp3, Address(temp1, element_size * constant));
- } else {
- __ leal(temp3, Address(temp1, length.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, 0));
- }
+ // Compute base source address, base destination address, and end
+ // source address in `temp1`, `temp2` and `temp3` respectively.
+ GenSystemArrayCopyAddresses(
+ GetAssembler(), type, src, src_pos, dest, dest_pos, length, temp1, temp2, temp3);
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// SystemArrayCopy implementation for Baker read barriers (see
@@ -1474,11 +1498,7 @@
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1,
- temp2,
- dest,
- CpuRegister(kNoRegister),
- /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 5bcfa4c..8d15f78 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -28,7 +28,18 @@
*/
class LICMTest : public CommonCompilerTest {
public:
- LICMTest() : pool_(), allocator_(&pool_) {
+ LICMTest()
+ : pool_(),
+ allocator_(&pool_),
+ entry_(nullptr),
+ loop_preheader_(nullptr),
+ loop_header_(nullptr),
+ loop_body_(nullptr),
+ return_(nullptr),
+ exit_(nullptr),
+ parameter_(nullptr),
+ int_constant_(nullptr),
+ float_constant_(nullptr) {
graph_ = CreateGraph(&allocator_);
}
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 091b58a..6f0dbce 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -69,11 +69,13 @@
// We do not use the value 9 because it conflicts with kLocationConstantMask.
kDoNotUse9 = 9,
+ kSIMDStackSlot = 10, // 128bit stack slot. TODO: generalize with encoded #bytes?
+
// Unallocated location represents a location that is not fixed and can be
// allocated by a register allocator. Each unallocated location has
// a policy that specifies what kind of location is suitable. Payload
// contains register allocation policy.
- kUnallocated = 10,
+ kUnallocated = 11,
};
Location() : ValueObject(), value_(kInvalid) {
@@ -82,6 +84,7 @@
static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kSIMDStackSlot & kLocationConstantMask) != kConstant, "TagError");
static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
@@ -266,8 +269,20 @@
return GetKind() == kDoubleStackSlot;
}
+ static Location SIMDStackSlot(intptr_t stack_index) {
+ uintptr_t payload = EncodeStackIndex(stack_index);
+ Location loc(kSIMDStackSlot, payload);
+ // Ensure that sign is preserved.
+ DCHECK_EQ(loc.GetStackIndex(), stack_index);
+ return loc;
+ }
+
+ bool IsSIMDStackSlot() const {
+ return GetKind() == kSIMDStackSlot;
+ }
+
intptr_t GetStackIndex() const {
- DCHECK(IsStackSlot() || IsDoubleStackSlot());
+ DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSIMDStackSlot());
// Decode stack index manually to preserve sign.
return GetPayload() - kStackIndexBias;
}
@@ -315,6 +330,7 @@
case kRegister: return "R";
case kStackSlot: return "S";
case kDoubleStackSlot: return "DS";
+ case kSIMDStackSlot: return "SIMD";
case kUnallocated: return "U";
case kConstant: return "C";
case kFpuRegister: return "F";
@@ -417,6 +433,7 @@
class RegisterSet : public ValueObject {
public:
static RegisterSet Empty() { return RegisterSet(); }
+ static RegisterSet AllFpu() { return RegisterSet(0, -1); }
void Add(Location loc) {
if (loc.IsRegister()) {
@@ -462,6 +479,7 @@
private:
RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
+ RegisterSet(uint32_t core, uint32_t fp) : core_registers_(core), floating_point_registers_(fp) {}
uint32_t core_registers_;
uint32_t floating_point_registers_;
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 8df513f..4710b32 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -16,11 +16,21 @@
#include "loop_optimization.h"
+#include "arch/instruction_set.h"
+#include "arch/arm/instruction_set_features_arm.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "arch/x86/instruction_set_features_x86.h"
+#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "driver/compiler_driver.h"
#include "linear_order.h"
namespace art {
+// Enables vectorization (SIMDization) in the loop optimizer.
+static constexpr bool kEnableVectorization = true;
+
// Remove the instruction from the graph. A bit more elaborate than the usual
// instruction removal, since there may be a cycle in the use structure.
static void RemoveFromCycle(HInstruction* instruction) {
@@ -53,6 +63,19 @@
return false;
}
+// Test vector restrictions.
+static bool HasVectorRestrictions(uint64_t restrictions, uint64_t tested) {
+ return (restrictions & tested) != 0;
+}
+
+// Inserts an instruction.
+static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
+ DCHECK(block != nullptr);
+ DCHECK(instruction != nullptr);
+ block->InsertInstructionBefore(instruction, block->GetLastInstruction());
+ return instruction;
+}
+
//
// Class methods.
//
@@ -64,11 +87,15 @@
compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
+ global_allocator_(graph_->GetArena()),
top_loop_(nullptr),
last_loop_(nullptr),
iset_(nullptr),
induction_simplication_count_(0),
- simplified_(false) {
+ simplified_(false),
+ vector_length_(0),
+ vector_refs_(nullptr),
+ vector_map_(nullptr) {
}
void HLoopOptimization::Run() {
@@ -81,15 +108,13 @@
// Phase-local allocator that draws from the global pool. Since the allocator
// itself resides on the stack, it is destructed on exiting Run(), which
// implies its underlying memory is released immediately.
- ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+ ArenaAllocator allocator(global_allocator_->GetArenaPool());
loop_allocator_ = &allocator;
// Perform loop optimizations.
LocalRun();
-
if (top_loop_ == nullptr) {
- // All loops have been eliminated.
- graph_->SetHasLoops(false);
+ graph_->SetHasLoops(false); // no more loops
}
// Detach.
@@ -111,18 +136,29 @@
}
// Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use
- // a temporary set that stores instructions using the phase-local allocator.
+ // temporary data structures using the phase-local allocator. All new HIR
+ // should use the global allocator.
if (top_loop_ != nullptr) {
ArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ArenaSafeMap<HInstruction*, HInstruction*> map(
+ std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ // Attach.
iset_ = &iset;
+ vector_refs_ = &refs;
+ vector_map_ = ↦
+ // Traverse.
TraverseLoopsInnerToOuter(top_loop_);
- iset_ = nullptr; // detach
+ // Detach.
+ iset_ = nullptr;
+ vector_refs_ = nullptr;
+ vector_map_ = nullptr;
}
}
void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
DCHECK(loop_info != nullptr);
- LoopNode* node = new (loop_allocator_) LoopNode(loop_info); // phase-local allocator
+ LoopNode* node = new (loop_allocator_) LoopNode(loop_info);
if (last_loop_ == nullptr) {
// First loop.
DCHECK(top_loop_ == nullptr);
@@ -170,7 +206,7 @@
void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
for ( ; node != nullptr; node = node->next) {
// Visit inner loops first.
- int current_induction_simplification_count = induction_simplication_count_;
+ uint32_t current_induction_simplification_count = induction_simplication_count_;
if (node->inner != nullptr) {
TraverseLoopsInnerToOuter(node->inner);
}
@@ -179,7 +215,7 @@
if (current_induction_simplification_count != induction_simplication_count_) {
induction_range_.ReVisit(node->loop_info);
}
- // Repeat simplifications in the body of this loop until no more changes occur.
+ // Repeat simplifications in the loop-body until no more changes occur.
// Note that since each simplification consists of eliminating code (without
// introducing new code), this process is always finite.
do {
@@ -187,13 +223,17 @@
SimplifyInduction(node);
SimplifyBlocks(node);
} while (simplified_);
- // Simplify inner loop.
+ // Optimize inner loop.
if (node->inner == nullptr) {
- SimplifyInnerLoop(node);
+ OptimizeInnerLoop(node);
}
}
}
+//
+// Optimization.
+//
+
void HLoopOptimization::SimplifyInduction(LoopNode* node) {
HBasicBlock* header = node->loop_info->GetHeader();
HBasicBlock* preheader = node->loop_info->GetPreHeader();
@@ -204,13 +244,9 @@
// for (int i = 0; i < 10; i++, k++) { .... no k .... } return k;
for (HInstructionIterator it(header->GetPhis()); !it.Done(); it.Advance()) {
HPhi* phi = it.Current()->AsPhi();
- iset_->clear();
- int32_t use_count = 0;
- if (IsPhiInduction(phi) &&
- IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ false, &use_count) &&
- // No uses, or no early-exit with proper replacement.
- (use_count == 0 ||
- (!IsEarlyExit(node->loop_info) && TryReplaceWithLastValue(phi, preheader)))) {
+ iset_->clear(); // prepare phi induction
+ if (TrySetPhiInduction(phi, /*restrict_uses*/ true) &&
+ TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ false)) {
for (HInstruction* i : *iset_) {
RemoveFromCycle(i);
}
@@ -256,49 +292,47 @@
}
}
-bool HLoopOptimization::SimplifyInnerLoop(LoopNode* node) {
+void HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
HBasicBlock* header = node->loop_info->GetHeader();
HBasicBlock* preheader = node->loop_info->GetPreHeader();
// Ensure loop header logic is finite.
- int64_t tc = 0;
- if (!induction_range_.IsFinite(node->loop_info, &tc)) {
- return false;
+ int64_t trip_count = 0;
+ if (!induction_range_.IsFinite(node->loop_info, &trip_count)) {
+ return;
}
+
// Ensure there is only a single loop-body (besides the header).
HBasicBlock* body = nullptr;
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
if (it.Current() != header) {
if (body != nullptr) {
- return false;
+ return;
}
body = it.Current();
}
}
// Ensure there is only a single exit point.
if (header->GetSuccessors().size() != 2) {
- return false;
+ return;
}
HBasicBlock* exit = (header->GetSuccessors()[0] == body)
? header->GetSuccessors()[1]
: header->GetSuccessors()[0];
// Ensure exit can only be reached by exiting loop.
if (exit->GetPredecessors().size() != 1) {
- return false;
+ return;
}
// Detect either an empty loop (no side effects other than plain iteration) or
// a trivial loop (just iterating once). Replace subsequent index uses, if any,
// with the last value and remove the loop, possibly after unrolling its body.
HInstruction* phi = header->GetFirstPhi();
- iset_->clear();
- int32_t use_count = 0;
- if (IsEmptyHeader(header)) {
+ iset_->clear(); // prepare phi induction
+ if (TrySetSimpleLoopHeader(header)) {
bool is_empty = IsEmptyBody(body);
- if ((is_empty || tc == 1) &&
- IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
- // No uses, or proper replacement.
- (use_count == 0 || TryReplaceWithLastValue(phi, preheader))) {
+ if ((is_empty || trip_count == 1) &&
+ TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ true)) {
if (!is_empty) {
- // Unroll the loop body, which sees initial value of the index.
+ // Unroll the loop-body, which sees initial value of the index.
phi->ReplaceWith(phi->InputAt(0));
preheader->MergeInstructionsWith(body);
}
@@ -308,28 +342,719 @@
header->RemoveDominatedBlock(exit);
header->DisconnectAndDelete();
preheader->AddSuccessor(exit);
- preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
+ preheader->AddInstruction(new (global_allocator_) HGoto());
preheader->AddDominatedBlock(exit);
exit->SetDominator(preheader);
RemoveLoop(node); // update hierarchy
+ return;
+ }
+ }
+
+ // Vectorize loop, if possible and valid.
+ if (kEnableVectorization) {
+ iset_->clear(); // prepare phi induction
+ if (TrySetSimpleLoopHeader(header) &&
+ CanVectorize(node, body, trip_count) &&
+ TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ true)) {
+ Vectorize(node, body, exit, trip_count);
+ graph_->SetHasSIMD(true); // flag SIMD usage
+ return;
+ }
+ }
+}
+
+//
+// Loop vectorization. The implementation is based on the book by Aart J.C. Bik:
+// "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance."
+// Intel Press, June, 2004 (http://www.aartbik.com/).
+//
+
+bool HLoopOptimization::CanVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count) {
+ // Reset vector bookkeeping.
+ vector_length_ = 0;
+ vector_refs_->clear();
+ vector_runtime_test_a_ =
+ vector_runtime_test_b_= nullptr;
+
+ // Phis in the loop-body prevent vectorization.
+ if (!block->GetPhis().IsEmpty()) {
+ return false;
+ }
+
+ // Scan the loop-body, starting a right-hand-side tree traversal at each left-hand-side
+ // occurrence, which allows passing down attributes down the use tree.
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ if (!VectorizeDef(node, it.Current(), /*generate_code*/ false)) {
+ return false; // failure to vectorize a left-hand-side
+ }
+ }
+
+ // Heuristics. Does vectorization seem profitable?
+ // TODO: refine
+ if (vector_length_ == 0) {
+ return false; // nothing found
+ } else if (0 < trip_count && trip_count < vector_length_) {
+ return false; // insufficient iterations
+ }
+
+ // Data dependence analysis. Find each pair of references with same type, where
+ // at least one is a write. Each such pair denotes a possible data dependence.
+ // This analysis exploits the property that differently typed arrays cannot be
+ // aliased, as well as the property that references either point to the same
+ // array or to two completely disjoint arrays, i.e., no partial aliasing.
+ // Other than a few simply heuristics, no detailed subscript analysis is done.
+ for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
+ for (auto j = i; ++j != vector_refs_->end(); ) {
+ if (i->type == j->type && (i->lhs || j->lhs)) {
+ // Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
+ HInstruction* a = i->base;
+ HInstruction* b = j->base;
+ HInstruction* x = i->offset;
+ HInstruction* y = j->offset;
+ if (a == b) {
+ // Found a[i+x] vs. a[i+y]. Accept if x == y (loop-independent data dependence).
+ // Conservatively assume a loop-carried data dependence otherwise, and reject.
+ if (x != y) {
+ return false;
+ }
+ } else {
+ // Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
+ // Conservatively assume a potential loop-carried data dependence otherwise, avoided by
+ // generating an explicit a != b disambiguation runtime test on the two references.
+ if (x != y) {
+ // For now, we reject after one test to avoid excessive overhead.
+ if (vector_runtime_test_a_ != nullptr) {
+ return false;
+ }
+ vector_runtime_test_a_ = a;
+ vector_runtime_test_b_ = b;
+ }
+ }
+ }
+ }
+ }
+
+ // Success!
+ return true;
+}
+
+void HLoopOptimization::Vectorize(LoopNode* node,
+ HBasicBlock* block,
+ HBasicBlock* exit,
+ int64_t trip_count) {
+ Primitive::Type induc_type = Primitive::kPrimInt;
+ HBasicBlock* header = node->loop_info->GetHeader();
+ HBasicBlock* preheader = node->loop_info->GetPreHeader();
+
+ // A cleanup is needed for any unknown trip count or for a known trip count
+ // with remainder iterations after vectorization.
+ bool needs_cleanup = trip_count == 0 || (trip_count % vector_length_) != 0;
+
+ // Adjust vector bookkeeping.
+ iset_->clear(); // prepare phi induction
+ bool is_simple_loop_header = TrySetSimpleLoopHeader(header); // fills iset_
+ DCHECK(is_simple_loop_header);
+
+ // Generate preheader:
+ // stc = <trip-count>;
+ // vtc = stc - stc % VL;
+ HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
+ HInstruction* vtc = stc;
+ if (needs_cleanup) {
+ DCHECK(IsPowerOfTwo(vector_length_));
+ HInstruction* rem = Insert(
+ preheader, new (global_allocator_) HAnd(induc_type,
+ stc,
+ graph_->GetIntConstant(vector_length_ - 1)));
+ vtc = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, rem));
+ }
+
+ // Generate runtime disambiguation test:
+ // vtc = a != b ? vtc : 0;
+ if (vector_runtime_test_a_ != nullptr) {
+ HInstruction* rt = Insert(
+ preheader,
+ new (global_allocator_) HNotEqual(vector_runtime_test_a_, vector_runtime_test_b_));
+ vtc = Insert(preheader,
+ new (global_allocator_) HSelect(rt, vtc, graph_->GetIntConstant(0), kNoDexPc));
+ needs_cleanup = true;
+ }
+
+ // Generate vector loop:
+ // for (i = 0; i < vtc; i += VL)
+ // <vectorized-loop-body>
+ vector_mode_ = kVector;
+ GenerateNewLoop(node,
+ block,
+ graph_->TransformLoopForVectorization(header, block, exit),
+ graph_->GetIntConstant(0),
+ vtc,
+ graph_->GetIntConstant(vector_length_));
+ HLoopInformation* vloop = vector_header_->GetLoopInformation();
+
+ // Generate cleanup loop, if needed:
+ // for ( ; i < stc; i += 1)
+ // <loop-body>
+ if (needs_cleanup) {
+ vector_mode_ = kSequential;
+ GenerateNewLoop(node,
+ block,
+ graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
+ vector_phi_,
+ stc,
+ graph_->GetIntConstant(1));
+ }
+
+ // Remove the original loop by disconnecting the body block
+ // and removing all instructions from the header.
+ block->DisconnectAndDelete();
+ while (!header->GetFirstInstruction()->IsGoto()) {
+ header->RemoveInstruction(header->GetFirstInstruction());
+ }
+ // Update loop hierarchy: the old header now resides in the
+ // same outer loop as the old preheader.
+ header->SetLoopInformation(preheader->GetLoopInformation()); // outward
+ node->loop_info = vloop;
+}
+
+void HLoopOptimization::GenerateNewLoop(LoopNode* node,
+ HBasicBlock* block,
+ HBasicBlock* new_preheader,
+ HInstruction* lo,
+ HInstruction* hi,
+ HInstruction* step) {
+ Primitive::Type induc_type = Primitive::kPrimInt;
+ // Prepare new loop.
+ vector_map_->clear();
+ vector_preheader_ = new_preheader,
+ vector_header_ = vector_preheader_->GetSingleSuccessor();
+ vector_body_ = vector_header_->GetSuccessors()[1];
+ vector_phi_ = new (global_allocator_) HPhi(global_allocator_,
+ kNoRegNumber,
+ 0,
+ HPhi::ToPhiType(induc_type));
+ // Generate header and prepare body.
+ // for (i = lo; i < hi; i += step)
+ // <loop-body>
+ HInstruction* cond = new (global_allocator_) HAboveOrEqual(vector_phi_, hi);
+ vector_header_->AddPhi(vector_phi_);
+ vector_header_->AddInstruction(cond);
+ vector_header_->AddInstruction(new (global_allocator_) HIf(cond));
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
+ DCHECK(vectorized_def);
+ }
+ // Generate body from the instruction map, but in original program order.
+ HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ auto i = vector_map_->find(it.Current());
+ if (i != vector_map_->end() && !i->second->IsInBlock()) {
+ Insert(vector_body_, i->second);
+ // Deal with instructions that need an environment, such as the scalar intrinsics.
+ if (i->second->NeedsEnvironment()) {
+ i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
+ }
+ }
+ }
+ // Finalize increment and phi.
+ HInstruction* inc = new (global_allocator_) HAdd(induc_type, vector_phi_, step);
+ vector_phi_->AddInput(lo);
+ vector_phi_->AddInput(Insert(vector_body_, inc));
+}
+
+// TODO: accept reductions at left-hand-side, mixed-type store idioms, etc.
+bool HLoopOptimization::VectorizeDef(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code) {
+ // Accept a left-hand-side array base[index] for
+ // (1) supported vector type,
+ // (2) loop-invariant base,
+ // (3) unit stride index,
+ // (4) vectorizable right-hand-side value.
+ uint64_t restrictions = kNone;
+ if (instruction->IsArraySet()) {
+ Primitive::Type type = instruction->AsArraySet()->GetComponentType();
+ HInstruction* base = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ HInstruction* value = instruction->InputAt(2);
+ HInstruction* offset = nullptr;
+ if (TrySetVectorType(type, &restrictions) &&
+ node->loop_info->IsDefinedOutOfTheLoop(base) &&
+ induction_range_.IsUnitStride(instruction, index, &offset) &&
+ VectorizeUse(node, value, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecSub(index, offset);
+ GenerateVecMem(instruction, vector_map_->Get(index), vector_map_->Get(value), type);
+ } else {
+ vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ true));
+ }
return true;
}
+ return false;
+ }
+ // Branch back okay.
+ if (instruction->IsGoto()) {
+ return true;
+ }
+ // Otherwise accept only expressions with no effects outside the immediate loop-body.
+ // Note that actual uses are inspected during right-hand-side tree traversal.
+ return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite();
+}
+
+// TODO: more operations and intrinsics, detect saturation arithmetic, etc.
+bool HLoopOptimization::VectorizeUse(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ Primitive::Type type,
+ uint64_t restrictions) {
+ // Accept anything for which code has already been generated.
+ if (generate_code) {
+ if (vector_map_->find(instruction) != vector_map_->end()) {
+ return true;
+ }
+ }
+ // Continue the right-hand-side tree traversal, passing in proper
+ // types and vector restrictions along the way. During code generation,
+ // all new nodes are drawn from the global allocator.
+ if (node->loop_info->IsDefinedOutOfTheLoop(instruction)) {
+ // Accept invariant use, using scalar expansion.
+ if (generate_code) {
+ GenerateVecInv(instruction, type);
+ }
+ return true;
+ } else if (instruction->IsArrayGet()) {
+ // Strings are different, with a different offset to the actual data
+ // and some compressed to save memory. For now, all cases are rejected
+ // to avoid the complexity.
+ if (instruction->AsArrayGet()->IsStringCharAt()) {
+ return false;
+ }
+ // Accept a right-hand-side array base[index] for
+ // (1) exact matching vector type,
+ // (2) loop-invariant base,
+ // (3) unit stride index,
+ // (4) vectorizable right-hand-side value.
+ HInstruction* base = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ HInstruction* offset = nullptr;
+ if (type == instruction->GetType() &&
+ node->loop_info->IsDefinedOutOfTheLoop(base) &&
+ induction_range_.IsUnitStride(instruction, index, &offset)) {
+ if (generate_code) {
+ GenerateVecSub(index, offset);
+ GenerateVecMem(instruction, vector_map_->Get(index), nullptr, type);
+ } else {
+ vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false));
+ }
+ return true;
+ }
+ } else if (instruction->IsTypeConversion()) {
+ // Accept particular type conversions.
+ HTypeConversion* conversion = instruction->AsTypeConversion();
+ HInstruction* opa = conversion->InputAt(0);
+ Primitive::Type from = conversion->GetInputType();
+ Primitive::Type to = conversion->GetResultType();
+ if ((to == Primitive::kPrimByte ||
+ to == Primitive::kPrimChar ||
+ to == Primitive::kPrimShort) && from == Primitive::kPrimInt) {
+ // Accept a "narrowing" type conversion from a "wider" computation for
+ // (1) conversion into final required type,
+ // (2) vectorizable operand,
+ // (3) "wider" operations cannot bring in higher order bits.
+ if (to == type && VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) {
+ if (generate_code) {
+ if (vector_mode_ == kVector) {
+ vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
+ } else {
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ }
+ }
+ return true;
+ }
+ } else if (to == Primitive::kPrimFloat && from == Primitive::kPrimInt) {
+ DCHECK_EQ(to, type);
+ // Accept int to float conversion for
+ // (1) supported int,
+ // (2) vectorizable operand.
+ if (TrySetVectorType(from, &restrictions) &&
+ VectorizeUse(node, opa, generate_code, from, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ }
+ return true;
+ }
+ }
+ return false;
+ } else if (instruction->IsNeg() || instruction->IsNot() || instruction->IsBooleanNot()) {
+ // Accept unary operator for vectorizable operand.
+ HInstruction* opa = instruction->InputAt(0);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ }
+ return true;
+ }
+ } else if (instruction->IsAdd() || instruction->IsSub() ||
+ instruction->IsMul() || instruction->IsDiv() ||
+ instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
+ // Deal with vector restrictions.
+ if ((instruction->IsMul() && HasVectorRestrictions(restrictions, kNoMul)) ||
+ (instruction->IsDiv() && HasVectorRestrictions(restrictions, kNoDiv))) {
+ return false;
+ }
+ // Accept binary operator for vectorizable operands.
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
+ VectorizeUse(node, opb, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
+ }
+ return true;
+ }
+ } else if (instruction->IsShl() || instruction->IsShr() || instruction->IsUShr()) {
+ // Deal with vector restrictions.
+ if ((HasVectorRestrictions(restrictions, kNoShift)) ||
+ (instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
+ return false; // unsupported instruction
+ } else if ((instruction->IsShr() || instruction->IsUShr()) &&
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ return false; // hibits may impact lobits; TODO: we can do better!
+ }
+ // Accept shift operator for vectorizable/invariant operands.
+ // TODO: accept symbolic, albeit loop invariant shift factors.
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions) && opb->IsIntConstant()) {
+ if (generate_code) {
+ // Make sure shift factor only looks at lower bits, as defined for sequential shifts.
+ // Note that even the narrower SIMD shifts do the right thing after that.
+ int32_t mask = (instruction->GetType() == Primitive::kPrimLong)
+ ? kMaxLongShiftDistance
+ : kMaxIntShiftDistance;
+ HInstruction* s = graph_->GetIntConstant(opb->AsIntConstant()->GetValue() & mask);
+ GenerateVecOp(instruction, vector_map_->Get(opa), s, type);
+ }
+ return true;
+ }
+ } else if (instruction->IsInvokeStaticOrDirect()) {
+ // Accept particular intrinsics.
+ HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
+ switch (invoke->GetIntrinsic()) {
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ case Intrinsics::kMathAbsFloat:
+ case Intrinsics::kMathAbsDouble: {
+ // Deal with vector restrictions.
+ if (HasVectorRestrictions(restrictions, kNoAbs) ||
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // TODO: we can do better for some hibits cases.
+ return false;
+ }
+ // Accept ABS(x) for vectorizable operand.
+ HInstruction* opa = instruction->InputAt(0);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ }
+ return true;
+ }
+ return false;
+ }
+ default:
+ return false;
+ } // switch
}
return false;
}
-bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
+bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restrictions) {
+ const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
+ switch (compiler_driver_->GetInstructionSet()) {
+ case kArm:
+ case kThumb2:
+ return false;
+ case kArm64:
+ // Allow vectorization for all ARM devices, because Android assumes that
+ // ARMv8 AArch64 always supports advanced SIMD.
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ *restrictions |= kNoDiv | kNoAbs;
+ return TrySetVectorLength(16);
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ *restrictions |= kNoDiv | kNoAbs;
+ return TrySetVectorLength(8);
+ case Primitive::kPrimInt:
+ *restrictions |= kNoDiv;
+ return TrySetVectorLength(4);
+ case Primitive::kPrimLong:
+ *restrictions |= kNoDiv | kNoMul;
+ return TrySetVectorLength(2);
+ case Primitive::kPrimFloat:
+ return TrySetVectorLength(4);
+ case Primitive::kPrimDouble:
+ return TrySetVectorLength(2);
+ default:
+ return false;
+ }
+ case kX86:
+ case kX86_64:
+ // Allow vectorization for SSE4-enabled X86 devices only (128-bit vectors).
+ if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ *restrictions |= kNoMul | kNoDiv | kNoShift | kNoAbs;
+ return TrySetVectorLength(16);
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ *restrictions |= kNoDiv | kNoAbs;
+ return TrySetVectorLength(8);
+ case Primitive::kPrimInt:
+ *restrictions |= kNoDiv;
+ return TrySetVectorLength(4);
+ case Primitive::kPrimLong:
+ *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs;
+ return TrySetVectorLength(2);
+ case Primitive::kPrimFloat:
+ return TrySetVectorLength(4);
+ case Primitive::kPrimDouble:
+ return TrySetVectorLength(2);
+ default:
+ break;
+ } // switch type
+ }
+ return false;
+ case kMips:
+ case kMips64:
+ // TODO: implement MIPS SIMD.
+ return false;
+ default:
+ return false;
+ } // switch instruction set
+}
+
+bool HLoopOptimization::TrySetVectorLength(uint32_t length) {
+ DCHECK(IsPowerOfTwo(length) && length >= 2u);
+ // First time set?
+ if (vector_length_ == 0) {
+ vector_length_ = length;
+ }
+ // Different types are acceptable within a loop-body, as long as all the corresponding vector
+ // lengths match exactly to obtain a uniform traversal through the vector iteration space
+ // (idiomatic exceptions to this rule can be handled by further unrolling sub-expressions).
+ return vector_length_ == length;
+}
+
+void HLoopOptimization::GenerateVecInv(HInstruction* org, Primitive::Type type) {
+ if (vector_map_->find(org) == vector_map_->end()) {
+ // In scalar code, just use a self pass-through for scalar invariants
+ // (viz. expression remains itself).
+ if (vector_mode_ == kSequential) {
+ vector_map_->Put(org, org);
+ return;
+ }
+ // In vector code, explicit scalar expansion is needed.
+ HInstruction* vector = new (global_allocator_) HVecReplicateScalar(
+ global_allocator_, org, type, vector_length_);
+ vector_map_->Put(org, Insert(vector_preheader_, vector));
+ }
+}
+
+void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) {
+ if (vector_map_->find(org) == vector_map_->end()) {
+ HInstruction* subscript = vector_phi_;
+ if (offset != nullptr) {
+ subscript = new (global_allocator_) HAdd(Primitive::kPrimInt, subscript, offset);
+ if (org->IsPhi()) {
+ Insert(vector_body_, subscript); // lacks layout placeholder
+ }
+ }
+ vector_map_->Put(org, subscript);
+ }
+}
+
+void HLoopOptimization::GenerateVecMem(HInstruction* org,
+ HInstruction* opa,
+ HInstruction* opb,
+ Primitive::Type type) {
+ HInstruction* vector = nullptr;
+ if (vector_mode_ == kVector) {
+ // Vector store or load.
+ if (opb != nullptr) {
+ vector = new (global_allocator_) HVecStore(
+ global_allocator_, org->InputAt(0), opa, opb, type, vector_length_);
+ } else {
+ vector = new (global_allocator_) HVecLoad(
+ global_allocator_, org->InputAt(0), opa, type, vector_length_);
+ }
+ } else {
+ // Scalar store or load.
+ DCHECK(vector_mode_ == kSequential);
+ if (opb != nullptr) {
+ vector = new (global_allocator_) HArraySet(org->InputAt(0), opa, opb, type, kNoDexPc);
+ } else {
+ vector = new (global_allocator_) HArrayGet(org->InputAt(0), opa, type, kNoDexPc);
+ }
+ }
+ vector_map_->Put(org, vector);
+}
+
+#define GENERATE_VEC(x, y) \
+ if (vector_mode_ == kVector) { \
+ vector = (x); \
+ } else { \
+ DCHECK(vector_mode_ == kSequential); \
+ vector = (y); \
+ } \
+ break;
+
+void HLoopOptimization::GenerateVecOp(HInstruction* org,
+ HInstruction* opa,
+ HInstruction* opb,
+ Primitive::Type type) {
+ if (vector_mode_ == kSequential) {
+ // Scalar code follows implicit integral promotion.
+ if (type == Primitive::kPrimBoolean ||
+ type == Primitive::kPrimByte ||
+ type == Primitive::kPrimChar ||
+ type == Primitive::kPrimShort) {
+ type = Primitive::kPrimInt;
+ }
+ }
+ HInstruction* vector = nullptr;
+ switch (org->GetKind()) {
+ case HInstruction::kNeg:
+ DCHECK(opb == nullptr);
+ GENERATE_VEC(
+ new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_),
+ new (global_allocator_) HNeg(type, opa));
+ case HInstruction::kNot:
+ DCHECK(opb == nullptr);
+ GENERATE_VEC(
+ new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_),
+ new (global_allocator_) HNot(type, opa));
+ case HInstruction::kBooleanNot:
+ DCHECK(opb == nullptr);
+ GENERATE_VEC(
+ new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_),
+ new (global_allocator_) HBooleanNot(opa));
+ case HInstruction::kTypeConversion:
+ DCHECK(opb == nullptr);
+ GENERATE_VEC(
+ new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_),
+ new (global_allocator_) HTypeConversion(type, opa, kNoDexPc));
+ case HInstruction::kAdd:
+ GENERATE_VEC(
+ new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HAdd(type, opa, opb));
+ case HInstruction::kSub:
+ GENERATE_VEC(
+ new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HSub(type, opa, opb));
+ case HInstruction::kMul:
+ GENERATE_VEC(
+ new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HMul(type, opa, opb));
+ case HInstruction::kDiv:
+ GENERATE_VEC(
+ new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HDiv(type, opa, opb, kNoDexPc));
+ case HInstruction::kAnd:
+ GENERATE_VEC(
+ new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HAnd(type, opa, opb));
+ case HInstruction::kOr:
+ GENERATE_VEC(
+ new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HOr(type, opa, opb));
+ case HInstruction::kXor:
+ GENERATE_VEC(
+ new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HXor(type, opa, opb));
+ case HInstruction::kShl:
+ GENERATE_VEC(
+ new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HShl(type, opa, opb));
+ case HInstruction::kShr:
+ GENERATE_VEC(
+ new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HShr(type, opa, opb));
+ case HInstruction::kUShr:
+ GENERATE_VEC(
+ new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_),
+ new (global_allocator_) HUShr(type, opa, opb));
+ case HInstruction::kInvokeStaticOrDirect: {
+ HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect();
+ if (vector_mode_ == kVector) {
+ switch (invoke->GetIntrinsic()) {
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ case Intrinsics::kMathAbsFloat:
+ case Intrinsics::kMathAbsDouble:
+ DCHECK(opb == nullptr);
+ vector = new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD intrinsic";
+ UNREACHABLE();
+ } // switch invoke
+ } else {
+ // In scalar code, simply clone the method invoke, and replace its operands with the
+ // corresponding new scalar instructions in the loop. The instruction will get an
+ // environment while being inserted from the instruction map in original program order.
+ DCHECK(vector_mode_ == kSequential);
+ HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
+ global_allocator_,
+ invoke->GetNumberOfArguments(),
+ invoke->GetType(),
+ invoke->GetDexPc(),
+ invoke->GetDexMethodIndex(),
+ invoke->GetResolvedMethod(),
+ invoke->GetDispatchInfo(),
+ invoke->GetInvokeType(),
+ invoke->GetTargetMethod(),
+ invoke->GetClinitCheckRequirement());
+ HInputsRef inputs = invoke->GetInputs();
+ for (size_t index = 0; index < inputs.size(); ++index) {
+ new_invoke->SetArgumentAt(index, vector_map_->Get(inputs[index]));
+ }
+ new_invoke->SetIntrinsic(invoke->GetIntrinsic(),
+ kNeedsEnvironmentOrCache,
+ kNoSideEffects,
+ kNoThrow);
+ vector = new_invoke;
+ }
+ break;
+ }
+ default:
+ break;
+ } // switch
+ CHECK(vector != nullptr) << "Unsupported SIMD operator";
+ vector_map_->Put(org, vector);
+}
+
+#undef GENERATE_VEC
+
+//
+// Helpers.
+//
+
+bool HLoopOptimization::TrySetPhiInduction(HPhi* phi, bool restrict_uses) {
+ DCHECK(iset_->empty());
ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
if (set != nullptr) {
- DCHECK(iset_->empty());
for (HInstruction* i : *set) {
// Check that, other than instructions that are no longer in the graph (removed earlier)
- // each instruction is removable and, other than the phi, uses are contained in the cycle.
+ // each instruction is removable and, when restrict uses are requested, other than for phi,
+ // all uses are contained within the cycle.
if (!i->IsInBlock()) {
continue;
} else if (!i->IsRemovable()) {
return false;
- } else if (i != phi) {
+ } else if (i != phi && restrict_uses) {
for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
if (set->find(use.GetUser()) == set->end()) {
return false;
@@ -348,10 +1073,12 @@
// c: Condition(phi, bound)
// i: If(c)
// TODO: Find a less pattern matching approach?
-bool HLoopOptimization::IsEmptyHeader(HBasicBlock* block) {
+bool HLoopOptimization::TrySetSimpleLoopHeader(HBasicBlock* block) {
DCHECK(iset_->empty());
HInstruction* phi = block->GetFirstPhi();
- if (phi != nullptr && phi->GetNext() == nullptr && IsPhiInduction(phi->AsPhi())) {
+ if (phi != nullptr &&
+ phi->GetNext() == nullptr &&
+ TrySetPhiInduction(phi->AsPhi(), /*restrict_uses*/ false)) {
HInstruction* s = block->GetFirstInstruction();
if (s != nullptr && s->IsSuspendCheck()) {
HInstruction* c = s->GetNext();
@@ -369,14 +1096,24 @@
}
bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
- if (block->GetFirstPhi() == nullptr) {
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
- return false;
- }
+ if (!block->GetPhis().IsEmpty()) {
+ return false;
+ }
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
+ return false;
}
- return true;
+ }
+ return true;
+}
+
+bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info,
+ HInstruction* instruction) {
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock()->GetLoopInformation() != loop_info) {
+ return true;
+ }
}
return false;
}
@@ -438,6 +1175,19 @@
return false;
}
+bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info,
+ HInstruction* instruction,
+ HBasicBlock* block,
+ bool collect_loop_uses) {
+ // Assigning the last value is always successful if there are no uses.
+ // Otherwise, it succeeds in a no early-exit loop by generating the
+ // proper last value assignment.
+ int32_t use_count = 0;
+ return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
+ (use_count == 0 ||
+ (!IsEarlyExit(loop_info) && TryReplaceWithLastValue(instruction, block)));
+}
+
void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
HInstruction* instruction = i.Current();
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 0b798fc..d8f50aa 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -27,7 +27,8 @@
/**
* Loop optimizations. Builds a loop hierarchy and applies optimizations to
- * the detected nested loops, such as removal of dead induction and empty loops.
+ * the detected nested loops, such as removal of dead induction and empty loops
+ * and inner loop vectorization.
*/
class HLoopOptimization : public HOptimization {
public:
@@ -50,34 +51,106 @@
inner(nullptr),
previous(nullptr),
next(nullptr) {}
- HLoopInformation* const loop_info;
+ HLoopInformation* loop_info;
LoopNode* outer;
LoopNode* inner;
LoopNode* previous;
LoopNode* next;
};
- void LocalRun();
+ /*
+ * Vectorization restrictions (bit mask).
+ */
+ enum VectorRestrictions {
+ kNone = 0, // no restrictions
+ kNoMul = 1, // no multiplication
+ kNoDiv = 2, // no division
+ kNoShift = 4, // no shift
+ kNoShr = 8, // no arithmetic shift right
+ kNoHiBits = 16, // "wider" operations cannot bring in higher order bits
+ kNoAbs = 32, // no absolute value
+ };
+ /*
+ * Vectorization mode during synthesis
+ * (sequential peeling/cleanup loop or vector loop).
+ */
+ enum VectorMode {
+ kSequential,
+ kVector
+ };
+
+ /*
+ * Representation of a unit-stride array reference.
+ */
+ struct ArrayReference {
+ ArrayReference(HInstruction* b, HInstruction* o, Primitive::Type t, bool l)
+ : base(b), offset(o), type(t), lhs(l) { }
+ bool operator<(const ArrayReference& other) const {
+ return
+ (base < other.base) ||
+ (base == other.base &&
+ (offset < other.offset || (offset == other.offset &&
+ (type < other.type ||
+ (type == other.type && lhs < other.lhs)))));
+ }
+ HInstruction* base; // base address
+ HInstruction* offset; // offset + i
+ Primitive::Type type; // component type
+ bool lhs; // def/use
+ };
+
+ // Loop setup and traversal.
+ void LocalRun();
void AddLoop(HLoopInformation* loop_info);
void RemoveLoop(LoopNode* node);
-
void TraverseLoopsInnerToOuter(LoopNode* node);
- // Simplification.
+ // Optimization.
void SimplifyInduction(LoopNode* node);
void SimplifyBlocks(LoopNode* node);
- bool SimplifyInnerLoop(LoopNode* node);
+ void OptimizeInnerLoop(LoopNode* node);
+
+ // Vectorization analysis and synthesis.
+ bool CanVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count);
+ void Vectorize(LoopNode* node, HBasicBlock* block, HBasicBlock* exit, int64_t trip_count);
+ void GenerateNewLoop(LoopNode* node,
+ HBasicBlock* block,
+ HBasicBlock* new_preheader,
+ HInstruction* lo,
+ HInstruction* hi,
+ HInstruction* step);
+ bool VectorizeDef(LoopNode* node, HInstruction* instruction, bool generate_code);
+ bool VectorizeUse(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ Primitive::Type type,
+ uint64_t restrictions);
+ bool TrySetVectorType(Primitive::Type type, /*out*/ uint64_t* restrictions);
+ bool TrySetVectorLength(uint32_t length);
+ void GenerateVecInv(HInstruction* org, Primitive::Type type);
+ void GenerateVecSub(HInstruction* org, HInstruction* off);
+ void GenerateVecMem(HInstruction* org,
+ HInstruction* opa,
+ HInstruction* opb,
+ Primitive::Type type);
+ void GenerateVecOp(HInstruction* org, HInstruction* opa, HInstruction* opb, Primitive::Type type);
// Helpers.
- bool IsPhiInduction(HPhi* phi);
- bool IsEmptyHeader(HBasicBlock* block);
+ bool TrySetPhiInduction(HPhi* phi, bool restrict_uses);
+ bool TrySetSimpleLoopHeader(HBasicBlock* block);
bool IsEmptyBody(HBasicBlock* block);
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
bool collect_loop_uses,
/*out*/ int32_t* use_count);
+ bool IsUsedOutsideLoop(HLoopInformation* loop_info,
+ HInstruction* instruction);
bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
+ bool TryAssignLastValue(HLoopInformation* loop_info,
+ HInstruction* instruction,
+ HBasicBlock* block,
+ bool collect_loop_uses);
void RemoveDeadInstructions(const HInstructionList& list);
// Compiler driver (to query ISA features).
@@ -90,6 +163,9 @@
// through this allocator is immediately released when the loop optimizer is done.
ArenaAllocator* loop_allocator_;
+ // Global heap memory allocator. Used to build HIR.
+ ArenaAllocator* global_allocator_;
+
// Entries into the loop hierarchy representation. The hierarchy resides
// in phase-local heap memory.
LoopNode* top_loop_;
@@ -102,11 +178,33 @@
// Counter that tracks how many induction cycles have been simplified. Useful
// to trigger incremental updates of induction variable analysis of outer loops
// when the induction of inner loops has changed.
- int32_t induction_simplication_count_;
+ uint32_t induction_simplication_count_;
// Flag that tracks if any simplifications have occurred.
bool simplified_;
+ // Number of "lanes" for selected packed type.
+ uint32_t vector_length_;
+
+ // Set of array references in the vector loop.
+ // Contents reside in phase-local heap memory.
+ ArenaSet<ArrayReference>* vector_refs_;
+
+ // Mapping used during vectorization synthesis for both the scalar peeling/cleanup
+ // loop (simd_ is false) and the actual vector loop (simd_ is true). The data
+ // structure maps original instructions into the new instructions.
+ // Contents reside in phase-local heap memory.
+ ArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
+
+ // Temporary vectorization bookkeeping.
+ HBasicBlock* vector_preheader_; // preheader of the new loop
+ HBasicBlock* vector_header_; // header of the new loop
+ HBasicBlock* vector_body_; // body of the new loop
+ HInstruction* vector_runtime_test_a_;
+ HInstruction* vector_runtime_test_b_; // defines a != b runtime test
+ HPhi* vector_phi_; // the Phi representing the normalized loop index
+ VectorMode vector_mode_; // selects synthesis mode
+
friend class LoopOptimizationTest;
DISALLOW_COPY_AND_ASSIGN(HLoopOptimization);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 020e446..e71fea9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1088,6 +1088,19 @@
DCHECK(env_uses_.empty());
}
+void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement) {
+ const HUseList<HInstruction*>& uses = GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
+ ++it;
+ if (dominator->StrictlyDominates(user)) {
+ user->ReplaceInput(replacement, index);
+ }
+ }
+}
+
void HInstruction::ReplaceInput(HInstruction* replacement, size_t index) {
HUserRecord<HInstruction*> input_use = InputRecordAt(index);
if (input_use.GetInstruction() == replacement) {
@@ -1323,6 +1336,18 @@
}
}
+std::ostream& operator<<(std::ostream& os, const HDeoptimize::Kind& rhs) {
+ switch (rhs) {
+ case HDeoptimize::Kind::kBCE:
+ return os << "bce";
+ case HDeoptimize::Kind::kInline:
+ return os << "inline";
+ default:
+ LOG(FATAL) << "Unknown Deoptimization kind: " << static_cast<int>(rhs);
+ UNREACHABLE();
+ }
+}
+
bool HCondition::IsBeforeWhenDisregardMoves(HInstruction* instruction) const {
return this == instruction->GetPreviousDisregardingMoves();
}
@@ -2046,6 +2071,9 @@
if (HasTryCatch()) {
outer_graph->SetHasTryCatch(true);
}
+ if (HasSIMD()) {
+ outer_graph->SetHasSIMD(true);
+ }
HInstruction* return_value = nullptr;
if (GetBlocks().size() == 3) {
@@ -2312,6 +2340,68 @@
new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
}
+HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
+ HBasicBlock* body,
+ HBasicBlock* exit) {
+ DCHECK(header->IsLoopHeader());
+ HLoopInformation* loop = header->GetLoopInformation();
+
+ // Add new loop blocks.
+ HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_body = new (arena_) HBasicBlock(this, header->GetDexPc());
+ AddBlock(new_pre_header);
+ AddBlock(new_header);
+ AddBlock(new_body);
+
+ // Set up control flow.
+ header->ReplaceSuccessor(exit, new_pre_header);
+ new_pre_header->AddSuccessor(new_header);
+ new_header->AddSuccessor(exit);
+ new_header->AddSuccessor(new_body);
+ new_body->AddSuccessor(new_header);
+
+ // Set up dominators.
+ header->ReplaceDominatedBlock(exit, new_pre_header);
+ new_pre_header->SetDominator(header);
+ new_pre_header->dominated_blocks_.push_back(new_header);
+ new_header->SetDominator(new_pre_header);
+ new_header->dominated_blocks_.push_back(new_body);
+ new_body->SetDominator(new_header);
+ new_header->dominated_blocks_.push_back(exit);
+ exit->SetDominator(new_header);
+
+ // Fix reverse post order.
+ size_t index_of_header = IndexOfElement(reverse_post_order_, header);
+ MakeRoomFor(&reverse_post_order_, 2, index_of_header);
+ reverse_post_order_[++index_of_header] = new_pre_header;
+ reverse_post_order_[++index_of_header] = new_header;
+ size_t index_of_body = IndexOfElement(reverse_post_order_, body);
+ MakeRoomFor(&reverse_post_order_, 1, index_of_body - 1);
+ reverse_post_order_[index_of_body] = new_body;
+
+ // Add gotos and suspend check (client must add conditional in header).
+ new_pre_header->AddInstruction(new (arena_) HGoto());
+ HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(header->GetDexPc());
+ new_header->AddInstruction(suspend_check);
+ new_body->AddInstruction(new (arena_) HGoto());
+ suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
+ loop->GetSuspendCheck()->GetEnvironment(), header);
+
+ // Update loop information.
+ new_header->AddBackEdge(new_body);
+ new_header->GetLoopInformation()->SetSuspendCheck(suspend_check);
+ new_header->GetLoopInformation()->Populate();
+ new_pre_header->SetLoopInformation(loop->GetPreHeader()->GetLoopInformation()); // outward
+ HLoopInformationOutwardIterator it(*new_header);
+ for (it.Advance(); !it.Done(); it.Advance()) {
+ it.Current()->Add(new_pre_header);
+ it.Current()->Add(new_header);
+ it.Current()->Add(new_body);
+ }
+ return new_pre_header;
+}
+
static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (rti.IsValid()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 542b218..c109369 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -305,7 +305,6 @@
HGraph(ArenaAllocator* arena,
const DexFile& dex_file,
uint32_t method_idx,
- bool should_generate_constructor_barrier,
InstructionSet instruction_set,
InvokeType invoke_type = kInvalidInvokeType,
bool debuggable = false,
@@ -323,6 +322,7 @@
temporaries_vreg_slots_(0),
has_bounds_checks_(false),
has_try_catch_(false),
+ has_simd_(false),
has_loops_(false),
has_irreducible_loops_(false),
debuggable_(debuggable),
@@ -331,7 +331,6 @@
method_idx_(method_idx),
invoke_type_(invoke_type),
in_ssa_form_(false),
- should_generate_constructor_barrier_(should_generate_constructor_barrier),
number_of_cha_guards_(0),
instruction_set_(instruction_set),
cached_null_constant_(nullptr),
@@ -340,6 +339,7 @@
cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
+ art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
@@ -398,6 +398,12 @@
// put deoptimization instructions, etc.
void TransformLoopHeaderForBCE(HBasicBlock* header);
+ // Adds a new loop directly after the loop with the given header and exit.
+ // Returns the new preheader.
+ HBasicBlock* TransformLoopForVectorization(HBasicBlock* header,
+ HBasicBlock* body,
+ HBasicBlock* exit);
+
// Removes `block` from the graph. Assumes `block` has been disconnected from
// other blocks and has no instructions or phis.
void DeleteDeadEmptyBlock(HBasicBlock* block);
@@ -496,10 +502,6 @@
has_bounds_checks_ = value;
}
- bool ShouldGenerateConstructorBarrier() const {
- return should_generate_constructor_barrier_;
- }
-
bool IsDebuggable() const { return debuggable_; }
// Returns a constant of the given type and value. If it does not exist
@@ -560,6 +562,9 @@
bool HasTryCatch() const { return has_try_catch_; }
void SetHasTryCatch(bool value) { has_try_catch_ = value; }
+ bool HasSIMD() const { return has_simd_; }
+ void SetHasSIMD(bool value) { has_simd_ = value; }
+
bool HasLoops() const { return has_loops_; }
void SetHasLoops(bool value) { has_loops_ = value; }
@@ -652,6 +657,11 @@
// false positives.
bool has_try_catch_;
+ // Flag whether SIMD instructions appear in the graph. If true, the
+ // code generators may have to be more careful spilling the wider
+ // contents of SIMD registers.
+ bool has_simd_;
+
// Flag whether there are any loops in the graph. We can skip loop
// optimization if it's false. It's only best effort to keep it up
// to date in the presence of code elimination so there might be false
@@ -685,8 +695,6 @@
// for non-SSA form (like the number of temporaries).
bool in_ssa_form_;
- const bool should_generate_constructor_barrier_;
-
// Number of CHA guards in the graph. Used to short-circuit the
// CHA guard optimization pass when there is no CHA guard left.
uint32_t number_of_cha_guards_;
@@ -1353,6 +1361,26 @@
M(TypeConversion, Instruction) \
M(UShr, BinaryOperation) \
M(Xor, BinaryOperation) \
+ M(VecReplicateScalar, VecUnaryOperation) \
+ M(VecSetScalars, VecUnaryOperation) \
+ M(VecSumReduce, VecUnaryOperation) \
+ M(VecCnv, VecUnaryOperation) \
+ M(VecNeg, VecUnaryOperation) \
+ M(VecAbs, VecUnaryOperation) \
+ M(VecNot, VecUnaryOperation) \
+ M(VecAdd, VecBinaryOperation) \
+ M(VecSub, VecBinaryOperation) \
+ M(VecMul, VecBinaryOperation) \
+ M(VecDiv, VecBinaryOperation) \
+ M(VecAnd, VecBinaryOperation) \
+ M(VecAndNot, VecBinaryOperation) \
+ M(VecOr, VecBinaryOperation) \
+ M(VecXor, VecBinaryOperation) \
+ M(VecShl, VecBinaryOperation) \
+ M(VecShr, VecBinaryOperation) \
+ M(VecUShr, VecBinaryOperation) \
+ M(VecLoad, VecMemoryOperation) \
+ M(VecStore, VecMemoryOperation) \
/*
* Instructions, shared across several (not all) architectures.
@@ -1414,7 +1442,11 @@
M(Constant, Instruction) \
M(UnaryOperation, Instruction) \
M(BinaryOperation, Instruction) \
- M(Invoke, Instruction)
+ M(Invoke, Instruction) \
+ M(VecOperation, Instruction) \
+ M(VecUnaryOperation, VecOperation) \
+ M(VecBinaryOperation, VecOperation) \
+ M(VecMemoryOperation, VecOperation)
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -2071,6 +2103,7 @@
void SetLocations(LocationSummary* locations) { locations_ = locations; }
void ReplaceWith(HInstruction* instruction);
+ void ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement);
void ReplaceInput(HInstruction* replacement, size_t index);
// This is almost the same as doing `ReplaceWith()`. But in this helper, the
@@ -2934,28 +2967,97 @@
};
// Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize FINAL : public HTemplateInstruction<1> {
+class HDeoptimize FINAL : public HVariableInputSizeInstruction {
public:
- // We set CanTriggerGC to prevent any intermediate address to be live
- // at the point of the `HDeoptimize`.
- HDeoptimize(HInstruction* cond, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
+ enum class Kind {
+ kBCE,
+ kInline,
+ kLast = kInline
+ };
+
+ // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
+ // across.
+ HDeoptimize(ArenaAllocator* arena, HInstruction* cond, Kind kind, uint32_t dex_pc)
+ : HVariableInputSizeInstruction(
+ SideEffects::All(),
+ dex_pc,
+ arena,
+ /* number_of_inputs */ 1,
+ kArenaAllocMisc) {
+ SetPackedFlag<kFieldCanBeMoved>(false);
+ SetPackedField<DeoptimizeKindField>(kind);
SetRawInputAt(0, cond);
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
- return true;
+ // Use this constructor when the `HDeoptimize` guards an instruction, and any user
+ // that relies on the deoptimization to pass should have its input be the `HDeoptimize`
+ // instead of `guard`.
+ // We set CanTriggerGC to prevent any intermediate address to be live
+ // at the point of the `HDeoptimize`.
+ HDeoptimize(ArenaAllocator* arena,
+ HInstruction* cond,
+ HInstruction* guard,
+ Kind kind,
+ uint32_t dex_pc)
+ : HVariableInputSizeInstruction(
+ SideEffects::CanTriggerGC(),
+ dex_pc,
+ arena,
+ /* number_of_inputs */ 2,
+ kArenaAllocMisc) {
+ SetPackedFlag<kFieldCanBeMoved>(true);
+ SetPackedField<DeoptimizeKindField>(kind);
+ SetRawInputAt(0, cond);
+ SetRawInputAt(1, guard);
}
+
+ bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); }
+
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind());
+ }
+
bool NeedsEnvironment() const OVERRIDE { return true; }
+
bool CanThrow() const OVERRIDE { return true; }
+ Kind GetKind() const { return GetPackedField<DeoptimizeKindField>(); }
+
+ Primitive::Type GetType() const OVERRIDE {
+ return GuardsAnInput() ? GuardedInput()->GetType() : Primitive::kPrimVoid;
+ }
+
+ bool GuardsAnInput() const {
+ return InputCount() == 2;
+ }
+
+ HInstruction* GuardedInput() const {
+ DCHECK(GuardsAnInput());
+ return InputAt(1);
+ }
+
+ void RemoveGuard() {
+ RemoveInputAt(1);
+ }
+
DECLARE_INSTRUCTION(Deoptimize);
private:
+ static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1;
+ static constexpr size_t kFieldDeoptimizeKindSize =
+ MinimumBitsToStore(static_cast<size_t>(Kind::kLast));
+ static constexpr size_t kNumberOfDeoptimizePackedBits =
+ kFieldDeoptimizeKind + kFieldDeoptimizeKindSize;
+ static_assert(kNumberOfDeoptimizePackedBits <= kMaxNumberOfPackedBits,
+ "Too many packed fields.");
+ using DeoptimizeKindField = BitField<Kind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
+
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
+std::ostream& operator<<(std::ostream& os, const HDeoptimize::Kind& rhs);
+
// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
// The compiled code checks this flag value in a guard before devirtualized call and
// if it's true, starts to do deoptimization.
@@ -4115,6 +4217,10 @@
dispatch_info_ = dispatch_info;
}
+ DispatchInfo GetDispatchInfo() const {
+ return dispatch_info_;
+ }
+
void AddSpecialInput(HInstruction* input) {
// We allow only one special input.
DCHECK(!IsStringInit() && !HasCurrentMethodInput());
@@ -4959,7 +5065,7 @@
const DexFile& GetDexFile() const { return dex_file_; }
dex::TypeIndex GetTypeIndex() const { return type_index_; }
uint8_t GetIndex() const { return index_; }
- bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
+ bool IsThis() const ATTRIBUTE_UNUSED { return GetPackedFlag<kFlagIsThis>(); }
bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
@@ -6609,6 +6715,8 @@
} // namespace art
+#include "nodes_vector.h"
+
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
#include "nodes_shared.h"
#endif
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
new file mode 100644
index 0000000..0cbbf2a
--- /dev/null
+++ b/compiler/optimizing/nodes_vector.h
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_VECTOR_H_
+#define ART_COMPILER_OPTIMIZING_NODES_VECTOR_H_
+
+// This #include should never be used by compilation, because this header file (nodes_vector.h)
+// is included in the header file nodes.h itself. However it gives editing tools better context.
+#include "nodes.h"
+
+namespace art {
+
+// Memory alignment, represented as an offset relative to a base, where 0 <= offset < base,
+// and base is a power of two. For example, the value Alignment(16, 0) means memory is
+// perfectly aligned at a 16-byte boundary, whereas the value Alignment(16, 4) means
+// memory is always exactly 4 bytes above such a boundary.
+class Alignment {
+ public:
+ Alignment(size_t base, size_t offset) : base_(base), offset_(offset) {
+ DCHECK_LT(offset, base);
+ DCHECK(IsPowerOfTwo(base));
+ }
+
+ // Returns true if memory is "at least" aligned at the given boundary.
+ // Assumes requested base is power of two.
+ bool IsAlignedAt(size_t base) const {
+ DCHECK_NE(0u, base);
+ DCHECK(IsPowerOfTwo(base));
+ return ((offset_ | base_) & (base - 1u)) == 0;
+ }
+
+ std::string ToString() const {
+ return "ALIGN(" + std::to_string(base_) + "," + std::to_string(offset_) + ")";
+ }
+
+ private:
+ size_t base_;
+ size_t offset_;
+};
+
+//
+// Definitions of abstract vector operations in HIR.
+//
+
+// Abstraction of a vector operation, i.e., an operation that performs
+// GetVectorLength() x GetPackedType() operations simultaneously.
+class HVecOperation : public HVariableInputSizeInstruction {
+ public:
+ HVecOperation(ArenaAllocator* arena,
+ Primitive::Type packed_type,
+ SideEffects side_effects,
+ size_t number_of_inputs,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVariableInputSizeInstruction(side_effects,
+ dex_pc,
+ arena,
+ number_of_inputs,
+ kArenaAllocVectorNode),
+ vector_length_(vector_length) {
+ SetPackedField<TypeField>(packed_type);
+ DCHECK_LT(1u, vector_length);
+ }
+
+ // Returns the number of elements packed in a vector.
+ size_t GetVectorLength() const {
+ return vector_length_;
+ }
+
+ // Returns the number of bytes in a full vector.
+ size_t GetVectorNumberOfBytes() const {
+ return vector_length_ * Primitive::ComponentSize(GetPackedType());
+ }
+
+ // Returns the type of the vector operation: a SIMD operation looks like a FPU location.
+ // TODO: we could introduce SIMD types in HIR.
+ Primitive::Type GetType() const OVERRIDE {
+ return Primitive::kPrimDouble;
+ }
+
+ // Returns the true component type packed in a vector.
+ Primitive::Type GetPackedType() const {
+ return GetPackedField<TypeField>();
+ }
+
+ DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
+
+ private:
+ // Additional packed bits.
+ static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldTypeSize =
+ MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+ static constexpr size_t kNumberOfVectorOpPackedBits = kFieldType + kFieldTypeSize;
+ static_assert(kNumberOfVectorOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+ using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>;
+
+ const size_t vector_length_;
+
+ DISALLOW_COPY_AND_ASSIGN(HVecOperation);
+};
+
+// Abstraction of a unary vector operation.
+class HVecUnaryOperation : public HVecOperation {
+ public:
+ HVecUnaryOperation(ArenaAllocator* arena,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVecOperation(arena,
+ packed_type,
+ SideEffects::None(),
+ /*number_of_inputs*/ 1,
+ vector_length,
+ dex_pc) { }
+ DECLARE_ABSTRACT_INSTRUCTION(VecUnaryOperation);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecUnaryOperation);
+};
+
+// Abstraction of a binary vector operation.
+class HVecBinaryOperation : public HVecOperation {
+ public:
+ HVecBinaryOperation(ArenaAllocator* arena,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVecOperation(arena,
+ packed_type,
+ SideEffects::None(),
+ /*number_of_inputs*/ 2,
+ vector_length,
+ dex_pc) { }
+ DECLARE_ABSTRACT_INSTRUCTION(VecBinaryOperation);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecBinaryOperation);
+};
+
+// Abstraction of a vector operation that references memory, with an alignment.
+// The Android runtime guarantees at least "component size" alignment for array
+// elements and, thus, vectors.
+class HVecMemoryOperation : public HVecOperation {
+ public:
+ HVecMemoryOperation(ArenaAllocator* arena,
+ Primitive::Type packed_type,
+ SideEffects side_effects,
+ size_t number_of_inputs,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc),
+ alignment_(Primitive::ComponentSize(packed_type), 0) { }
+
+ void SetAlignment(Alignment alignment) { alignment_ = alignment; }
+
+ Alignment GetAlignment() const { return alignment_; }
+
+ DECLARE_ABSTRACT_INSTRUCTION(VecMemoryOperation);
+
+ private:
+ Alignment alignment_;
+
+ DISALLOW_COPY_AND_ASSIGN(HVecMemoryOperation);
+};
+
+//
+// Definitions of concrete vector operations in HIR.
+//
+
+// Replicates the given scalar into a vector,
+// viz. replicate(x) = [ x, .. , x ].
+class HVecReplicateScalar FINAL : public HVecUnaryOperation {
+ public:
+ HVecReplicateScalar(ArenaAllocator* arena,
+ HInstruction* scalar,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ SetRawInputAt(0, scalar);
+ }
+ DECLARE_INSTRUCTION(VecReplicateScalar);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecReplicateScalar);
+};
+
+// Assigns the given scalar elements to a vector,
+// viz. set( array(x1, .., xn) ) = [ x1, .. , xn ].
+class HVecSetScalars FINAL : public HVecUnaryOperation {
+ HVecSetScalars(ArenaAllocator* arena,
+ HInstruction** scalars, // array
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ for (size_t i = 0; i < vector_length; i++) {
+ SetRawInputAt(0, scalars[i]);
+ }
+ }
+ DECLARE_INSTRUCTION(VecSetScalars);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecSetScalars);
+};
+
+// Sum-reduces the given vector into a shorter vector (m < n) or scalar (m = 1),
+// viz. sum-reduce[ x1, .. , xn ] = [ y1, .., ym ], where yi = sum_j x_j.
+class HVecSumReduce FINAL : public HVecUnaryOperation {
+ HVecSumReduce(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ DCHECK_EQ(input->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, input);
+ }
+
+ // TODO: probably integral promotion
+ Primitive::Type GetType() const OVERRIDE { return GetPackedType(); }
+
+ DECLARE_INSTRUCTION(VecSumReduce);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecSumReduce);
+};
+
+// Converts every component in the vector,
+// viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
+class HVecCnv FINAL : public HVecUnaryOperation {
+ public:
+ HVecCnv(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ DCHECK_NE(input->AsVecOperation()->GetPackedType(), packed_type); // actual convert
+ SetRawInputAt(0, input);
+ }
+
+ Primitive::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
+ Primitive::Type GetResultType() const { return GetPackedType(); }
+
+ DECLARE_INSTRUCTION(VecCnv);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecCnv);
+};
+
+// Negates every component in the vector,
+// viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
+class HVecNeg FINAL : public HVecUnaryOperation {
+ public:
+ HVecNeg(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ DCHECK_EQ(input->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, input);
+ }
+ DECLARE_INSTRUCTION(VecNeg);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecNeg);
+};
+
+// Takes absolute value of every component in the vector,
+// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ].
+class HVecAbs FINAL : public HVecUnaryOperation {
+ public:
+ HVecAbs(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ DCHECK_EQ(input->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, input);
+ }
+ DECLARE_INSTRUCTION(VecAbs);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecAbs);
+};
+
+// Bitwise- or boolean-nots every component in the vector,
+// viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or
+// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
+class HVecNot FINAL : public HVecUnaryOperation {
+ public:
+ HVecNot(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ SetRawInputAt(0, input);
+ }
+ DECLARE_INSTRUCTION(VecNot);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecNot);
+};
+
+// Adds every component in the two vectors,
+// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
+class HVecAdd FINAL : public HVecBinaryOperation {
+ public:
+ HVecAdd(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ DCHECK_EQ(right->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecAdd);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecAdd);
+};
+
+// Subtracts every component in the two vectors,
+// viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
+class HVecSub FINAL : public HVecBinaryOperation {
+ public:
+ HVecSub(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ DCHECK_EQ(right->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecSub);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecSub);
+};
+
+// Multiplies every component in the two vectors,
+// viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
+class HVecMul FINAL : public HVecBinaryOperation {
+ public:
+ HVecMul(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ DCHECK_EQ(right->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecMul);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecMul);
+};
+
+// Divides every component in the two vectors,
+// viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
+class HVecDiv FINAL : public HVecBinaryOperation {
+ public:
+ HVecDiv(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ DCHECK_EQ(right->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecDiv);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecDiv);
+};
+
+// Bitwise-ands every component in the two vectors,
+// viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
+class HVecAnd FINAL : public HVecBinaryOperation {
+ public:
+ HVecAnd(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecAnd);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecAnd);
+};
+
+// Bitwise-and-nots every component in the two vectors,
+// viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
+class HVecAndNot FINAL : public HVecBinaryOperation {
+ public:
+ HVecAndNot(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecAndNot);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecAndNot);
+};
+
+// Bitwise-ors every component in the two vectors,
+// viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
+class HVecOr FINAL : public HVecBinaryOperation {
+ public:
+ HVecOr(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecOr);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecOr);
+};
+
+// Bitwise-xors every component in the two vectors,
+// viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
+class HVecXor FINAL : public HVecBinaryOperation {
+ public:
+ HVecXor(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation() && right->IsVecOperation());
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecXor);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecXor);
+};
+
+// Logically shifts every component in the vector left by the given distance,
+// viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
+class HVecShl FINAL : public HVecBinaryOperation {
+ public:
+ HVecShl(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecShl);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecShl);
+};
+
+// Arithmetically shifts every component in the vector right by the given distance,
+// viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
+class HVecShr FINAL : public HVecBinaryOperation {
+ public:
+ HVecShr(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecShr);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecShr);
+};
+
+// Logically shifts every component in the vector right by the given distance,
+// viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
+class HVecUShr FINAL : public HVecBinaryOperation {
+ public:
+ HVecUShr(ArenaAllocator* arena,
+ HInstruction* left,
+ HInstruction* right,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecBinaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(left->IsVecOperation());
+ DCHECK_EQ(left->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, left);
+ SetRawInputAt(1, right);
+ }
+ DECLARE_INSTRUCTION(VecUShr);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecUShr);
+};
+
+// Loads a vector from memory, viz. load(mem, 1)
+// yield the vector [ mem(1), .. , mem(n) ].
+class HVecLoad FINAL : public HVecMemoryOperation {
+ public:
+ HVecLoad(ArenaAllocator* arena,
+ HInstruction* base,
+ HInstruction* index,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecMemoryOperation(arena,
+ packed_type,
+ SideEffects::ArrayReadOfType(packed_type),
+ /*number_of_inputs*/ 2,
+ vector_length,
+ dex_pc) {
+ SetRawInputAt(0, base);
+ SetRawInputAt(1, index);
+ }
+ DECLARE_INSTRUCTION(VecLoad);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecLoad);
+};
+
+// Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
+// sets mem(1) = x1, .. , mem(n) = xn.
+class HVecStore FINAL : public HVecMemoryOperation {
+ public:
+ HVecStore(ArenaAllocator* arena,
+ HInstruction* base,
+ HInstruction* index,
+ HInstruction* value,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecMemoryOperation(arena,
+ packed_type,
+ SideEffects::ArrayWriteOfType(packed_type),
+ /*number_of_inputs*/ 3,
+ vector_length,
+ dex_pc) {
+ DCHECK(value->IsVecOperation());
+ DCHECK_EQ(value->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, base);
+ SetRawInputAt(1, index);
+ SetRawInputAt(2, value);
+ }
+ DECLARE_INSTRUCTION(VecStore);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecStore);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_NODES_VECTOR_H_
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index d84fe6c..60af2b4 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -174,53 +174,45 @@
// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
- 0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
- 0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x18, 0x00, 0xBD, 0x67,
- 0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7, 0x10, 0x00, 0xB0, 0xDF,
- 0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF, 0x28, 0x00, 0xBD, 0x67,
- 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0xFF, 0xBD, 0x67, 0x38, 0x00, 0xBF, 0xFF, 0x30, 0x00, 0xB1, 0xFF,
+ 0x28, 0x00, 0xB0, 0xFF, 0x20, 0x00, 0xB9, 0xF7, 0x18, 0x00, 0xB8, 0xF7,
+ 0x38, 0x00, 0xBF, 0xDF, 0x30, 0x00, 0xB1, 0xDF, 0x28, 0x00, 0xB0, 0xDF,
+ 0x20, 0x00, 0xB9, 0xD7, 0x18, 0x00, 0xB8, 0xD7, 0x40, 0x00, 0xBD, 0x67,
+ 0x00, 0x00, 0x1F, 0xD8,
};
-
static constexpr uint8_t expected_cfi_kMips64[] = {
- 0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x0A, 0x44,
- 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0, 0x44, 0xD1, 0x44, 0xDF,
- 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44,
+ 0xD0, 0x44, 0xF9, 0x44, 0xF8, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: daddiu r29, r29, -40
-// 0x00000004: .cfi_def_cfa_offset: 40
-// 0x00000004: sd r31, +32(r29)
+// 0x00000000: daddiu r29, r29, -64
+// 0x00000004: .cfi_def_cfa_offset: 64
+// 0x00000004: sd r31, +56(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r17, +24(r29)
+// 0x00000008: sd r17, +48(r29)
// 0x0000000c: .cfi_offset: r17 at cfa-16
-// 0x0000000c: sd r16, +16(r29)
+// 0x0000000c: sd r16, +40(r29)
// 0x00000010: .cfi_offset: r16 at cfa-24
-// 0x00000010: sdc1 f25, +8(r29)
+// 0x00000010: sdc1 f25, +32(r29)
// 0x00000014: .cfi_offset: r57 at cfa-32
-// 0x00000014: sdc1 f24, +0(r29)
+// 0x00000014: sdc1 f24, +24(r29)
// 0x00000018: .cfi_offset: r56 at cfa-40
-// 0x00000018: daddiu r29, r29, -24
-// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: daddiu r29, r29, 24
-// 0x00000020: .cfi_def_cfa_offset: 40
-// 0x00000020: ldc1 f24, +0(r29)
-// 0x00000024: .cfi_restore: r56
-// 0x00000024: ldc1 f25, +8(r29)
+// 0x00000018: .cfi_remember_state
+// 0x00000018: ld r31, +56(r29)
+// 0x0000001c: .cfi_restore: r31
+// 0x0000001c: ld r17, +48(r29)
+// 0x00000020: .cfi_restore: r17
+// 0x00000020: ld r16, +40(r29)
+// 0x00000024: .cfi_restore: r16
+// 0x00000024: ldc1 f25, +32(r29)
// 0x00000028: .cfi_restore: r57
-// 0x00000028: ld r16, +16(r29)
-// 0x0000002c: .cfi_restore: r16
-// 0x0000002c: ld r17, +24(r29)
-// 0x00000030: .cfi_restore: r17
-// 0x00000030: ld r31, +32(r29)
-// 0x00000034: .cfi_restore: r31
-// 0x00000034: daddiu r29, r29, 40
-// 0x00000038: .cfi_def_cfa_offset: 0
-// 0x00000038: jr r31
-// 0x0000003c: nop
-// 0x00000040: .cfi_restore_state
-// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000028: ldc1 f24, +24(r29)
+// 0x0000002c: .cfi_restore: r56
+// 0x0000002c: daddiu r29, r29, 64
+// 0x00000030: .cfi_def_cfa_offset: 0
+// 0x00000030: jic r31, 0
+// 0x00000034: .cfi_restore_state
+// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
#ifdef ART_USE_OLD_ARM_BACKEND
@@ -403,58 +395,52 @@
// 0x00020060: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
- 0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
- 0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x02, 0x00, 0xA6, 0x60,
- 0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
+ 0xC0, 0xFF, 0xBD, 0x67, 0x38, 0x00, 0xBF, 0xFF, 0x30, 0x00, 0xB1, 0xFF,
+ 0x28, 0x00, 0xB0, 0xFF, 0x20, 0x00, 0xB9, 0xF7, 0x18, 0x00, 0xB8, 0xF7,
+ 0x02, 0x00, 0xA6, 0x60, 0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
};
static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
- 0x18, 0x00, 0xBD, 0x67, 0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7,
- 0x10, 0x00, 0xB0, 0xDF, 0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF,
- 0x28, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0xBF, 0xDF, 0x30, 0x00, 0xB1, 0xDF, 0x28, 0x00, 0xB0, 0xDF,
+ 0x20, 0x00, 0xB9, 0xD7, 0x18, 0x00, 0xB8, 0xD7, 0x40, 0x00, 0xBD, 0x67,
+ 0x00, 0x00, 0x1F, 0xD8,
};
static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
- 0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x10, 0x00,
- 0x02, 0x00, 0x0A, 0x44, 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0,
- 0x44, 0xD1, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x04, 0x10, 0x00, 0x02, 0x00, 0x0A,
+ 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x44, 0xF9, 0x44, 0xF8, 0x44, 0x0E,
+ 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: daddiu r29, r29, -40
-// 0x00000004: .cfi_def_cfa_offset: 40
-// 0x00000004: sd r31, +32(r29)
+// 0x00000000: daddiu r29, r29, -64
+// 0x00000004: .cfi_def_cfa_offset: 64
+// 0x00000004: sd r31, +56(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r17, +24(r29)
+// 0x00000008: sd r17, +48(r29)
// 0x0000000c: .cfi_offset: r17 at cfa-16
-// 0x0000000c: sd r16, +16(r29)
+// 0x0000000c: sd r16, +40(r29)
// 0x00000010: .cfi_offset: r16 at cfa-24
-// 0x00000010: sdc1 f25, +8(r29)
+// 0x00000010: sdc1 f25, +32(r29)
// 0x00000014: .cfi_offset: r57 at cfa-32
-// 0x00000014: sdc1 f24, +0(r29)
+// 0x00000014: sdc1 f24, +24(r29)
// 0x00000018: .cfi_offset: r56 at cfa-40
-// 0x00000018: daddiu r29, r29, -24
-// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: bnec r5, r6, 0x0000002c ; +12
-// 0x00000020: auipc r1, 2
-// 0x00000024: jic r1, 12 ; b 0x00020030 ; +131080
-// 0x00000028: nop
+// 0x00000018: bnec r5, r6, 0x00000024 ; +12
+// 0x0000001c: auipc r1, 2
+// 0x00000020: jic r1, 12 ; bc 0x00020028 ; +131080
+// 0x00000024: nop
// ...
-// 0x00020028: nop
-// 0x0002002c: .cfi_remember_state
-// 0x0002002c: daddiu r29, r29, 24
-// 0x00020030: .cfi_def_cfa_offset: 40
-// 0x00020030: ldc1 f24, +0(r29)
-// 0x00020034: .cfi_restore: r56
-// 0x00020034: ldc1 f25, +8(r29)
+// 0x00020024: nop
+// 0x00020028: .cfi_remember_state
+// 0x00020028: ld r31, +56(r29)
+// 0x0002002c: .cfi_restore: r31
+// 0x0002002c: ld r17, +48(r29)
+// 0x00020030: .cfi_restore: r17
+// 0x00020030: ld r16, +40(r29)
+// 0x00020034: .cfi_restore: r16
+// 0x00020034: ldc1 f25, +32(r29)
// 0x00020038: .cfi_restore: r57
-// 0x00020038: ld r16, +16(r29)
-// 0x0002003c: .cfi_restore: r16
-// 0x0002003c: ld r17, +24(r29)
-// 0x00020040: .cfi_restore: r17
-// 0x00020040: ld r31, +32(r29)
-// 0x00020044: .cfi_restore: r31
-// 0x00020044: daddiu r29, r29, 40
-// 0x00020047: .cfi_def_cfa_offset: 0
-// 0x00020048: jr r31
-// 0x0002004c: nop
-// 0x00020050: .cfi_restore_state
-// 0x00020050: .cfi_def_cfa_offset: 64
+// 0x00020038: ldc1 f24, +24(r29)
+// 0x0002003c: .cfi_restore: r56
+// 0x0002003c: daddiu r29, r29, 64
+// 0x00020040: .cfi_def_cfa_offset: 0
+// 0x00020040: jic r31, 0
+// 0x00020044: .cfi_restore_state
+// 0x00020044: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 23ccd9e..8aad539 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -449,15 +449,6 @@
|| instruction_set == kX86_64;
}
-// Read barrier are supported on ARM, ARM64, x86 and x86-64 at the moment.
-// TODO: Add support for other architectures and remove this function
-static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) {
- return instruction_set == kArm64
- || instruction_set == kThumb2
- || instruction_set == kX86
- || instruction_set == kX86_64;
-}
-
// Strip pass name suffix to get optimization name.
static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
size_t pos = pass_name.find(kPassNameSeparator);
@@ -499,7 +490,8 @@
handles,
stats,
number_of_dex_registers,
- /* depth */ 0);
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
} else if (opt_name == HSharpening::kSharpeningPassName) {
return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
} else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
@@ -607,8 +599,7 @@
VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
- bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
- && (compiler_options.GetInlineMaxCodeUnits() > 0);
+ bool should_inline = (compiler_options.GetInlineMaxCodeUnits() > 0);
if (!should_inline) {
return;
}
@@ -623,7 +614,8 @@
handles,
stats,
number_of_dex_registers,
- /* depth */ 0);
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
HOptimization* optimizations[] = { inliner };
RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
@@ -911,12 +903,6 @@
return nullptr;
}
- // When read barriers are enabled, do not attempt to compile for
- // instruction sets that have no read barrier support.
- if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) {
- return nullptr;
- }
-
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
@@ -944,16 +930,10 @@
/* verified_method */ nullptr,
dex_cache);
- bool requires_barrier = dex_compilation_unit.IsConstructor()
- && compiler_driver->RequiresConstructorBarrier(Thread::Current(),
- dex_compilation_unit.GetDexFile(),
- dex_compilation_unit.GetClassDefIndex());
-
HGraph* graph = new (arena) HGraph(
arena,
dex_file,
method_idx,
- requires_barrier,
compiler_driver->GetInstructionSet(),
kInvalidInvokeType,
compiler_driver->GetCompilerOptions().GetDebuggable(),
@@ -1107,13 +1087,10 @@
if (kIsDebugBuild &&
IsCompilingWithCoreImage() &&
- IsInstructionSetSupported(compiler_driver->GetInstructionSet()) &&
- (!kEmitCompilerReadBarrier ||
- InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) {
+ IsInstructionSetSupported(compiler_driver->GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
- // that should be compiled with this compiler (when the the
- // instruction set is supported -- and has support for read
- // barriers, if they are enabled). This makes sure we're not
+ // that should be compiled with this compiler (when the
+ // instruction set is supported). This makes sure we're not
// regressing.
std::string method_name = dex_file.PrettyMethod(method_idx);
bool shouldCompile = method_name.find("$opt$") != std::string::npos;
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index ae9a811..a211c54 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -69,6 +69,23 @@
kExplicitNullCheckGenerated,
kSimplifyIf,
kInstructionSunk,
+ kNotInlinedUnresolvedEntrypoint,
+ kNotInlinedDexCache,
+ kNotInlinedStackMaps,
+ kNotInlinedEnvironmentBudget,
+ kNotInlinedInstructionBudget,
+ kNotInlinedLoopWithoutExit,
+ kNotInlinedIrreducibleLoop,
+ kNotInlinedAlwaysThrows,
+ kNotInlinedInfiniteLoop,
+ kNotInlinedTryCatch,
+ kNotInlinedRegisterAllocator,
+ kNotInlinedCannotBuild,
+ kNotInlinedNotVerified,
+ kNotInlinedCodeItem,
+ kNotInlinedWont,
+ kNotInlinedRecursiveBudget,
+ kNotInlinedProxy,
kLastStat
};
@@ -168,6 +185,23 @@
case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break;
case kSimplifyIf: name = "SimplifyIf"; break;
case kInstructionSunk: name = "InstructionSunk"; break;
+ case kNotInlinedUnresolvedEntrypoint: name = "NotInlinedUnresolvedEntrypoint"; break;
+ case kNotInlinedDexCache: name = "NotInlinedDexCache"; break;
+ case kNotInlinedStackMaps: name = "NotInlinedStackMaps"; break;
+ case kNotInlinedEnvironmentBudget: name = "NotInlinedEnvironmentBudget"; break;
+ case kNotInlinedInstructionBudget: name = "NotInlinedInstructionBudget"; break;
+ case kNotInlinedLoopWithoutExit: name = "NotInlinedLoopWithoutExit"; break;
+ case kNotInlinedIrreducibleLoop: name = "NotInlinedIrreducibleLoop"; break;
+ case kNotInlinedAlwaysThrows: name = "NotInlinedAlwaysThrows"; break;
+ case kNotInlinedInfiniteLoop: name = "NotInlinedInfiniteLoop"; break;
+ case kNotInlinedTryCatch: name = "NotInlinedTryCatch"; break;
+ case kNotInlinedRegisterAllocator: name = "NotInlinedRegisterAllocator"; break;
+ case kNotInlinedCannotBuild: name = "NotInlinedCannotBuild"; break;
+ case kNotInlinedNotVerified: name = "NotInlinedNotVerified"; break;
+ case kNotInlinedCodeItem: name = "NotInlinedCodeItem"; break;
+ case kNotInlinedWont: name = "NotInlinedWont"; break;
+ case kNotInlinedRecursiveBudget: name = "NotInlinedRecursiveBudget"; break;
+ case kNotInlinedProxy: name = "NotInlinedProxy"; break;
case kLastStat:
LOG(FATAL) << "invalid stat "
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index bf963b8..1cdcbd2 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -79,7 +79,9 @@
inline HGraph* CreateGraph(ArenaAllocator* allocator) {
return new (allocator) HGraph(
- allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, false,
+ allocator,
+ *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))),
+ /*method_idx*/-1,
kRuntimeISA);
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index efbaf6c..66bfea9 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -40,6 +40,14 @@
check->ReplaceWith(check->InputAt(0));
}
+void PrepareForRegisterAllocation::VisitDeoptimize(HDeoptimize* deoptimize) {
+ if (deoptimize->GuardsAnInput()) {
+ // Replace the uses with the actual guarded instruction.
+ deoptimize->ReplaceWith(deoptimize->GuardedInput());
+ deoptimize->RemoveGuard();
+ }
+}
+
void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
check->ReplaceWith(check->InputAt(0));
if (check->IsStringCharAt()) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index c128227..7ffbe44 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -44,6 +44,7 @@
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 6e332ca..d5637b9 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -310,8 +310,8 @@
BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
- if (compare->IsEqual()) {
- BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
+ if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) {
+ check->SetReferenceTypeInfo(class_rti);
}
}
}
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 84a4bab..0b49ce1 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -29,7 +29,7 @@
*/
class ReferenceTypePropagationTest : public CommonCompilerTest {
public:
- ReferenceTypePropagationTest() : pool_(), allocator_(&pool_) {
+ ReferenceTypePropagationTest() : pool_(), allocator_(&pool_), propagation_(nullptr) {
graph_ = CreateGraph(&allocator_);
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 8a9c1cc..c6a0b6a 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -299,11 +299,14 @@
// Currently, we spill unconditionnally the current method in the code generators.
&& !interval->GetDefinedBy()->IsCurrentMethod()) {
// We spill eagerly, so move must be at definition.
- InsertMoveAfter(interval->GetDefinedBy(),
- interval->ToLocation(),
- interval->NeedsTwoSpillSlots()
- ? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
- : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
+ Location loc;
+ switch (interval->NumberOfSpillSlotsNeeded()) {
+ case 1: loc = Location::StackSlot(interval->GetParent()->GetSpillSlot()); break;
+ case 2: loc = Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot()); break;
+ case 4: loc = Location::SIMDStackSlot(interval->GetParent()->GetSpillSlot()); break;
+ default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
+ }
+ InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
}
UsePosition* use = current->GetFirstUse();
EnvUsePosition* env_use = current->GetFirstEnvironmentUse();
@@ -459,9 +462,12 @@
location_source = defined_by->GetLocations()->Out();
} else {
DCHECK(defined_by->IsCurrentMethod());
- location_source = parent->NeedsTwoSpillSlots()
- ? Location::DoubleStackSlot(parent->GetSpillSlot())
- : Location::StackSlot(parent->GetSpillSlot());
+ switch (parent->NumberOfSpillSlotsNeeded()) {
+ case 1: location_source = Location::StackSlot(parent->GetSpillSlot()); break;
+ case 2: location_source = Location::DoubleStackSlot(parent->GetSpillSlot()); break;
+ case 4: location_source = Location::SIMDStackSlot(parent->GetSpillSlot()); break;
+ default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
+ }
}
} else {
DCHECK(source != nullptr);
@@ -492,7 +498,8 @@
|| destination.IsFpuRegister()
|| destination.IsFpuRegisterPair()
|| destination.IsStackSlot()
- || destination.IsDoubleStackSlot();
+ || destination.IsDoubleStackSlot()
+ || destination.IsSIMDStackSlot();
}
void RegisterAllocationResolver::AddMove(HParallelMove* move,
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 9064f86..87f709f 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1029,7 +1029,7 @@
interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
} else {
interval->SetSpillSlot(catch_phi_spill_slot_counter_);
- catch_phi_spill_slot_counter_ += interval->NeedsTwoSpillSlots() ? 2 : 1;
+ catch_phi_spill_slot_counter_ += interval->NumberOfSpillSlotsNeeded();
}
}
}
@@ -1996,43 +1996,48 @@
bool is_interval_beginning;
size_t position;
std::tie(position, is_interval_beginning, parent_interval) = *it;
-
- bool needs_two_slots = parent_interval->NeedsTwoSpillSlots();
+ size_t number_of_spill_slots_needed = parent_interval->NumberOfSpillSlotsNeeded();
if (is_interval_beginning) {
DCHECK(!parent_interval->HasSpillSlot());
DCHECK_EQ(position, parent_interval->GetStart());
- // Find a free stack slot.
+ // Find first available free stack slot(s).
size_t slot = 0;
- for (; taken.IsBitSet(slot) || (needs_two_slots && taken.IsBitSet(slot + 1)); ++slot) {
- // Skip taken slots.
+ for (; ; ++slot) {
+ bool found = true;
+ for (size_t s = slot, u = slot + number_of_spill_slots_needed; s < u; s++) {
+ if (taken.IsBitSet(s)) {
+ found = false;
+ break; // failure
+ }
+ }
+ if (found) {
+ break; // success
+ }
}
+
parent_interval->SetSpillSlot(slot);
- *num_stack_slots_used = std::max(*num_stack_slots_used,
- needs_two_slots ? slot + 1 : slot + 2);
- if (needs_two_slots && *num_stack_slots_used % 2 != 0) {
+ *num_stack_slots_used = std::max(*num_stack_slots_used, slot + number_of_spill_slots_needed);
+ if (number_of_spill_slots_needed > 1 && *num_stack_slots_used % 2 != 0) {
// The parallel move resolver requires that there be an even number of spill slots
// allocated for pair value types.
++(*num_stack_slots_used);
}
- taken.SetBit(slot);
- if (needs_two_slots) {
- taken.SetBit(slot + 1);
+ for (size_t s = slot, u = slot + number_of_spill_slots_needed; s < u; s++) {
+ taken.SetBit(s);
}
} else {
DCHECK_EQ(position, parent_interval->GetLastSibling()->GetEnd());
DCHECK(parent_interval->HasSpillSlot());
- // Free up the stack slot used by this interval.
+ // Free up the stack slot(s) used by this interval.
size_t slot = parent_interval->GetSpillSlot();
- DCHECK(taken.IsBitSet(slot));
- DCHECK(!needs_two_slots || taken.IsBitSet(slot + 1));
- taken.ClearBit(slot);
- if (needs_two_slots) {
- taken.ClearBit(slot + 1);
+ for (size_t s = slot, u = slot + number_of_spill_slots_needed; s < u; s++) {
+ DCHECK(taken.IsBitSet(s));
+ taken.ClearBit(s);
}
}
}
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 6354e76..ab8d540 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1125,36 +1125,31 @@
LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
}
- // Find an available spill slot.
+ // Find first available spill slots.
+ size_t number_of_spill_slots_needed = parent->NumberOfSpillSlotsNeeded();
size_t slot = 0;
for (size_t e = spill_slots->size(); slot < e; ++slot) {
- if ((*spill_slots)[slot] <= parent->GetStart()) {
- if (!parent->NeedsTwoSpillSlots()) {
- // One spill slot is sufficient.
+ bool found = true;
+ for (size_t s = slot, u = std::min(slot + number_of_spill_slots_needed, e); s < u; s++) {
+ if ((*spill_slots)[s] > parent->GetStart()) {
+ found = false; // failure
break;
}
- if (slot == e - 1 || (*spill_slots)[slot + 1] <= parent->GetStart()) {
- // Two spill slots are available.
- break;
- }
+ }
+ if (found) {
+ break; // success
}
}
+ // Need new spill slots?
+ size_t upper = slot + number_of_spill_slots_needed;
+ if (upper > spill_slots->size()) {
+ spill_slots->resize(upper);
+ }
+ // Set slots to end.
size_t end = interval->GetLastSibling()->GetEnd();
- if (parent->NeedsTwoSpillSlots()) {
- if (slot + 2u > spill_slots->size()) {
- // We need a new spill slot.
- spill_slots->resize(slot + 2u, end);
- }
- (*spill_slots)[slot] = end;
- (*spill_slots)[slot + 1] = end;
- } else {
- if (slot == spill_slots->size()) {
- // We need a new spill slot.
- spill_slots->push_back(end);
- } else {
- (*spill_slots)[slot] = end;
- }
+ for (size_t s = slot; s < upper; s++) {
+ (*spill_slots)[s] = end;
}
// Note that the exact spill slot location will be computed when we resolve,
@@ -1180,7 +1175,7 @@
// TODO: Reuse spill slots when intervals of phis from different catch
// blocks do not overlap.
interval->SetSpillSlot(catch_phi_spill_slots_);
- catch_phi_spill_slots_ += interval->NeedsTwoSpillSlots() ? 2 : 1;
+ catch_phi_spill_slots_ += interval->NumberOfSpillSlotsNeeded();
}
}
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index ab0dad4..9236a0e 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -315,7 +315,10 @@
// This class and its sub-classes will never be used to drive a visit of an
// `HGraph` but only to visit `HInstructions` one at a time, so we do not need
// to pass a valid graph to `HGraphDelegateVisitor()`.
- SchedulingLatencyVisitor() : HGraphDelegateVisitor(nullptr) {}
+ SchedulingLatencyVisitor()
+ : HGraphDelegateVisitor(nullptr),
+ last_visited_latency_(0),
+ last_visited_internal_latency_(0) {}
void VisitInstruction(HInstruction* instruction) OVERRIDE {
LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
@@ -413,6 +416,7 @@
selector_(selector),
only_optimize_loop_blocks_(true),
scheduling_graph_(this, arena),
+ cursor_(nullptr),
candidates_(arena_->Adapter(kArenaAllocScheduler)) {}
virtual ~HScheduler() {}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 7bd38c7..eedaf6e 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -259,7 +259,7 @@
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache.Get());
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
@@ -271,7 +271,7 @@
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
- string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache.Get());
if (string != nullptr &&
runtime->GetHeap()->ObjectIsInBootImageSpace(string) &&
!codegen_->GetCompilerOptions().GetCompilePic()) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index e8e12e1..b538a89 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -469,8 +469,15 @@
}
}
-bool LiveInterval::NeedsTwoSpillSlots() const {
- return type_ == Primitive::kPrimLong || type_ == Primitive::kPrimDouble;
+size_t LiveInterval::NumberOfSpillSlotsNeeded() const {
+ // For a SIMD operation, compute the number of needed spill slots.
+ // TODO: do through vector type?
+ HInstruction* definition = GetParent()->GetDefinedBy();
+ if (definition != nullptr && definition->IsVecOperation()) {
+ return definition->AsVecOperation()->GetVectorNumberOfBytes() / kVRegSize;
+ }
+ // Return number of needed spill slots based on type.
+ return (type_ == Primitive::kPrimLong || type_ == Primitive::kPrimDouble) ? 2 : 1;
}
Location LiveInterval::ToLocation() const {
@@ -494,10 +501,11 @@
if (defined_by->IsConstant()) {
return defined_by->GetLocations()->Out();
} else if (GetParent()->HasSpillSlot()) {
- if (NeedsTwoSpillSlots()) {
- return Location::DoubleStackSlot(GetParent()->GetSpillSlot());
- } else {
- return Location::StackSlot(GetParent()->GetSpillSlot());
+ switch (NumberOfSpillSlotsNeeded()) {
+ case 1: return Location::StackSlot(GetParent()->GetSpillSlot());
+ case 2: return Location::DoubleStackSlot(GetParent()->GetSpillSlot());
+ case 4: return Location::SIMDStackSlot(GetParent()->GetSpillSlot());
+ default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
}
} else {
return Location();
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 340d0cc..e9dffc1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -762,9 +762,9 @@
// Returns kNoRegister otherwise.
int FindHintAtDefinition() const;
- // Returns whether the interval needs two (Dex virtual register size `kVRegSize`)
- // slots for spilling.
- bool NeedsTwoSpillSlots() const;
+ // Returns the number of required spilling slots (measured as a multiple of the
+ // Dex virtual register size `kVRegSize`).
+ size_t NumberOfSpillSlotsNeeded() const;
bool IsFloatingPoint() const {
return type_ == Primitive::kPrimFloat || type_ == Primitive::kPrimDouble;
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 1916c73..a1016d1 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -189,13 +189,14 @@
// Use HAboveOrEqual+HDeoptimize as the bounds check.
HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length);
block->AddInstruction(ae);
- HInstruction* deoptimize = new(&allocator_) HDeoptimize(ae, /* dex_pc */ 0u);
+ HInstruction* deoptimize =
+ new(&allocator_) HDeoptimize(&allocator_, ae, HDeoptimize::Kind::kBCE, /* dex_pc */ 0u);
block->AddInstruction(deoptimize);
HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- deoptimize);
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ deoptimize);
deoptimize_env->CopyFrom(args);
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 5c48759..f655994 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -42,7 +42,10 @@
kUseQuaternaryName,
};
-template<typename Ass, typename Reg, typename FPReg, typename Imm>
+// For use in the template as the default type to get a nonvector registers version.
+struct NoVectorRegs {};
+
+template<typename Ass, typename Reg, typename FPReg, typename Imm, typename VecReg = NoVectorRegs>
class AssemblerTest : public testing::Test {
public:
Ass* GetAssembler() {
@@ -146,7 +149,8 @@
std::string (AssemblerTest::*GetName1)(const Reg1&),
std::string (AssemblerTest::*GetName2)(const Reg2&),
const std::string& fmt,
- int bias = 0) {
+ int bias = 0,
+ int multiplier = 1) {
std::string str;
std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
@@ -154,7 +158,7 @@
for (auto reg2 : reg2_registers) {
for (int64_t imm : imms) {
ImmType new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*reg1, *reg2, new_imm + bias);
+ (assembler_.get()->*f)(*reg1, *reg2, new_imm * multiplier + bias);
std::string base = fmt;
std::string reg1_string = (this->*GetName1)(*reg1);
@@ -172,7 +176,7 @@
size_t imm_index = base.find(IMM_TOKEN);
if (imm_index != std::string::npos) {
std::ostringstream sreg;
- sreg << imm + bias;
+ sreg << imm * multiplier + bias;
std::string imm_string = sreg.str();
base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
}
@@ -305,7 +309,7 @@
template <typename RegType, typename ImmType>
std::string RepeatTemplatedRegisterImmBits(void (Ass::*f)(RegType, ImmType),
int imm_bits,
- const std::vector<Reg*> registers,
+ const std::vector<RegType*> registers,
std::string (AssemblerTest::*GetName)(const RegType&),
const std::string& fmt,
int bias) {
@@ -538,6 +542,82 @@
return str;
}
+ std::string RepeatVV(void (Ass::*f)(VecReg, VecReg), const std::string& fmt) {
+ return RepeatTemplatedRegisters<VecReg, VecReg>(f,
+ GetVectorRegisters(),
+ GetVectorRegisters(),
+ &AssemblerTest::GetVecRegName,
+ &AssemblerTest::GetVecRegName,
+ fmt);
+ }
+
+ std::string RepeatVVV(void (Ass::*f)(VecReg, VecReg, VecReg), const std::string& fmt) {
+ return RepeatTemplatedRegisters<VecReg, VecReg, VecReg>(f,
+ GetVectorRegisters(),
+ GetVectorRegisters(),
+ GetVectorRegisters(),
+ &AssemblerTest::GetVecRegName,
+ &AssemblerTest::GetVecRegName,
+ &AssemblerTest::GetVecRegName,
+ fmt);
+ }
+
+ std::string RepeatVR(void (Ass::*f)(VecReg, Reg), const std::string& fmt) {
+ return RepeatTemplatedRegisters<VecReg, Reg>(
+ f,
+ GetVectorRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetVecRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename ImmType>
+ std::string RepeatVIb(void (Ass::*f)(VecReg, ImmType),
+ int imm_bits,
+ std::string fmt,
+ int bias = 0) {
+ return RepeatTemplatedRegisterImmBits<VecReg, ImmType>(f,
+ imm_bits,
+ GetVectorRegisters(),
+ &AssemblerTest::GetVecRegName,
+ fmt,
+ bias);
+ }
+
+ template <typename ImmType>
+ std::string RepeatVRIb(void (Ass::*f)(VecReg, Reg, ImmType),
+ int imm_bits,
+ const std::string& fmt,
+ int bias = 0,
+ int multiplier = 1) {
+ return RepeatTemplatedRegistersImmBits<VecReg, Reg, ImmType>(
+ f,
+ imm_bits,
+ GetVectorRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetVecRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt,
+ bias,
+ multiplier);
+ }
+
+ template <typename ImmType>
+ std::string RepeatVVIb(void (Ass::*f)(VecReg, VecReg, ImmType),
+ int imm_bits,
+ const std::string& fmt,
+ int bias = 0) {
+ return RepeatTemplatedRegistersImmBits<VecReg, VecReg, ImmType>(f,
+ imm_bits,
+ GetVectorRegisters(),
+ GetVectorRegisters(),
+ &AssemblerTest::GetVecRegName,
+ &AssemblerTest::GetVecRegName,
+ fmt,
+ bias);
+ }
+
// This is intended to be run as a test.
bool CheckTools() {
return test_helper_->CheckTools();
@@ -552,6 +632,11 @@
UNREACHABLE();
}
+ virtual std::vector<VecReg*> GetVectorRegisters() {
+ UNIMPLEMENTED(FATAL) << "Architecture does not support vector registers";
+ UNREACHABLE();
+ }
+
// Secondary register names are the secondary view on registers, e.g., 32b on 64b systems.
virtual std::string GetSecondaryRegisterName(const Reg& reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Architecture does not support secondary registers";
@@ -971,6 +1056,12 @@
return sreg.str();
}
+ std::string GetVecRegName(const VecReg& reg) {
+ std::ostringstream sreg;
+ sreg << reg;
+ return sreg.str();
+ }
+
// If the assembly file needs a header, return it in a sub-class.
virtual const char* GetAssemblyHeader() {
return nullptr;
diff --git a/compiler/utils/atomic_method_ref_map-inl.h b/compiler/utils/atomic_method_ref_map-inl.h
index d71c2fe..ad3a099 100644
--- a/compiler/utils/atomic_method_ref_map-inl.h
+++ b/compiler/utils/atomic_method_ref_map-inl.h
@@ -42,7 +42,7 @@
inline bool AtomicMethodRefMap<T>::Get(MethodReference ref, T* out) const {
const ElementArray* const array = GetArray(ref.dex_file);
if (array == nullptr) {
- return kInsertResultInvalidDexFile;
+ return false;
}
*out = (*array)[ref.dex_method_index].LoadRelaxed();
return true;
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 2f154fb..3ac6c3c 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -84,7 +84,11 @@
MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
ArenaAllocator* arena,
InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features ATTRIBUTE_UNUSED) {
+ const InstructionSetFeatures* instruction_set_features) {
+#ifndef ART_ENABLE_CODEGEN_mips64
+ UNUSED(instruction_set_features);
+#endif
+
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
@@ -92,7 +96,11 @@
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(arena));
+ return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(
+ arena,
+ instruction_set_features != nullptr
+ ? instruction_set_features->AsMips64InstructionSetFeatures()
+ : nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 2e2231b..a99d02d 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -635,6 +635,7 @@
DsFsmInstrRrr(EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04), rd, rd, rt);
}
+// TODO: This instruction is available in both R6 and MSA and it should be used when available.
void MipsAssembler::Lsa(Register rd, Register rs, Register rt, int saPlusOne) {
CHECK(IsR6());
CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
@@ -642,6 +643,24 @@
DsFsmInstrRrr(EmitR(0x0, rs, rt, rd, sa, 0x05), rd, rs, rt);
}
+void MipsAssembler::ShiftAndAdd(Register dst,
+ Register src_idx,
+ Register src_base,
+ int shamt,
+ Register tmp) {
+ CHECK(0 <= shamt && shamt <= 4) << shamt;
+ CHECK_NE(src_base, tmp);
+ if (shamt == TIMES_1) {
+ // Catch the special case where the shift amount is zero (0).
+ Addu(dst, src_base, src_idx);
+ } else if (IsR6()) {
+ Lsa(dst, src_idx, src_base, shamt);
+ } else {
+ Sll(tmp, src_idx, shamt);
+ Addu(dst, src_base, tmp);
+ }
+}
+
void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
DsFsmInstrRrr(EmitI(0x20, rs, rt, imm16), rt, rs, rs);
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 1a5a23d..463daeb 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -263,6 +263,7 @@
void Ext(Register rd, Register rt, int pos, int size); // R2+
void Ins(Register rd, Register rt, int pos, int size); // R2+
void Lsa(Register rd, Register rs, Register rt, int saPlusOne); // R6
+ void ShiftAndAdd(Register dst, Register src_idx, Register src_base, int shamt, Register tmp = AT);
void Lb(Register rt, Register rs, uint16_t imm16);
void Lh(Register rt, Register rs, uint16_t imm16);
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index c410365..0cff44d 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -184,6 +184,122 @@
Emit(encoding);
}
+void Mips64Assembler::EmitMsa3R(int operation,
+ int df,
+ VectorRegister wt,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(wt, kNoVectorRegister);
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaOperationShift |
+ df << kDfShift |
+ static_cast<uint32_t>(wt) << kWtShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitMsaBIT(int operation,
+ int df_m,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaOperationShift |
+ df_m << kDfMShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitMsaELM(int operation,
+ int df_n,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaELMOperationShift |
+ df_n << kDfNShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitMsaMI10(int s10,
+ GpuRegister rs,
+ VectorRegister wd,
+ int minor_opcode,
+ int df) {
+ CHECK_NE(rs, kNoGpuRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ CHECK(IsUint<10>(s10)) << s10;
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ s10 << kS10Shift |
+ static_cast<uint32_t>(rs) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode << kS10MinorShift |
+ df;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitMsaI10(int operation,
+ int df,
+ int i10,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(wd, kNoVectorRegister);
+ CHECK(IsUint<10>(i10)) << i10;
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaOperationShift |
+ df << kDfShift |
+ i10 << kI10Shift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitMsa2R(int operation,
+ int df,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsa2ROperationShift |
+ df << kDf2RShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitMsa2RF(int operation,
+ int df,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsa2RFOperationShift |
+ df << kDf2RShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+}
+
void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, rd, 0, 0x21);
}
@@ -1080,6 +1196,485 @@
Nor(rd, rs, ZERO);
}
+void Mips64Assembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::Ffint_sD(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::Ftint_sW(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::Ftint_sD(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd);
+}
+
+void Mips64Assembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(shamt3)) << shamt3;
+ EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(shamt4)) << shamt4;
+ EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
+ CHECK(IsUint<5>(shamt5)) << shamt5;
+ EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
+ CHECK(IsUint<6>(shamt6)) << shamt6;
+ EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(shamt3)) << shamt3;
+ EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(shamt4)) << shamt4;
+ EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
+ CHECK(IsUint<5>(shamt5)) << shamt5;
+ EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
+ CHECK(IsUint<6>(shamt6)) << shamt6;
+ EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(shamt3)) << shamt3;
+ EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(shamt4)) << shamt4;
+ EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
+ CHECK(IsUint<5>(shamt5)) << shamt5;
+ EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
+ CHECK(IsUint<6>(shamt6)) << shamt6;
+ EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
+}
+
+void Mips64Assembler::MoveV(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19);
+}
+
+void Mips64Assembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19);
+}
+
+void Mips64Assembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19);
+}
+
+void Mips64Assembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19);
+}
+
+void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
+ CHECK(HasMsa());
+ CHECK(IsUint<1>(n1)) << n1;
+ EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19);
+}
+
+void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
+ EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e);
+}
+
+void Mips64Assembler::FillH(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
+ EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e);
+}
+
+void Mips64Assembler::FillW(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
+ EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e);
+}
+
+void Mips64Assembler::FillD(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
+ EmitMsa2R(0xc0, 0x3, static_cast<VectorRegister>(rs), wd, 0x1e);
+}
+
+void Mips64Assembler::LdiB(VectorRegister wd, int imm8) {
+ CHECK(HasMsa());
+ CHECK(IsInt<8>(imm8)) << imm8;
+ EmitMsaI10(0x6, 0x0, imm8 & kMsaS10Mask, wd, 0x7);
+}
+
+void Mips64Assembler::LdiH(VectorRegister wd, int imm10) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(imm10)) << imm10;
+ EmitMsaI10(0x6, 0x1, imm10 & kMsaS10Mask, wd, 0x7);
+}
+
+void Mips64Assembler::LdiW(VectorRegister wd, int imm10) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(imm10)) << imm10;
+ EmitMsaI10(0x6, 0x2, imm10 & kMsaS10Mask, wd, 0x7);
+}
+
+void Mips64Assembler::LdiD(VectorRegister wd, int imm10) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(imm10)) << imm10;
+ EmitMsaI10(0x6, 0x3, imm10 & kMsaS10Mask, wd, 0x7);
+}
+
+void Mips64Assembler::LdB(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(offset)) << offset;
+ EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0);
+}
+
+void Mips64Assembler::LdH(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<11>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMips64HalfwordSize);
+ EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1);
+}
+
+void Mips64Assembler::LdW(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<12>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2);
+}
+
+void Mips64Assembler::LdD(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<13>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMips64DoublewordSize);
+ EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3);
+}
+
+void Mips64Assembler::StB(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(offset)) << offset;
+ EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0);
+}
+
+void Mips64Assembler::StH(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<11>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMips64HalfwordSize);
+ EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1);
+}
+
+void Mips64Assembler::StW(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<12>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2);
+}
+
+void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<13>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMips64DoublewordSize);
+ EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3);
+}
+
void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
TemplateLoadConst32(this, rd, value);
}
@@ -1106,6 +1701,7 @@
}
}
+// TODO: don't use rtmp, use daui, dahi, dati.
void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
if (IsInt<16>(value)) {
Daddiu(rt, rs, value);
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 2c5072e..666c693 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -21,6 +21,7 @@
#include <utility>
#include <vector>
+#include "arch/mips64/instruction_set_features_mips64.h"
#include "base/arena_containers.h"
#include "base/enums.h"
#include "base/macros.h"
@@ -266,6 +267,7 @@
}
}
+static constexpr size_t kMips64HalfwordSize = 2;
static constexpr size_t kMips64WordSize = 4;
static constexpr size_t kMips64DoublewordSize = 8;
@@ -412,7 +414,8 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
- explicit Mips64Assembler(ArenaAllocator* arena)
+ explicit Mips64Assembler(ArenaAllocator* arena,
+ const Mips64InstructionSetFeatures* instruction_set_features = nullptr)
: Assembler(arena),
overwriting_(false),
overwrite_location_(0),
@@ -421,7 +424,8 @@
jump_tables_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
- last_branch_id_(0) {
+ last_branch_id_(0),
+ has_msa_(instruction_set_features != nullptr ? instruction_set_features->HasMsa() : false) {
cfi().DelayEmittingAdvancePCs();
}
@@ -644,6 +648,105 @@
void Clear(GpuRegister rd);
void Not(GpuRegister rd, GpuRegister rs);
+ // MSA instructions.
+ void AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ void AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ void Ffint_sW(VectorRegister wd, VectorRegister ws);
+ void Ffint_sD(VectorRegister wd, VectorRegister ws);
+ void Ftint_sW(VectorRegister wd, VectorRegister ws);
+ void Ftint_sD(VectorRegister wd, VectorRegister ws);
+
+ void SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ // Immediate shift instructions, where shamtN denotes shift amount (must be between 0 and 2^N-1).
+ void SlliB(VectorRegister wd, VectorRegister ws, int shamt3);
+ void SlliH(VectorRegister wd, VectorRegister ws, int shamt4);
+ void SlliW(VectorRegister wd, VectorRegister ws, int shamt5);
+ void SlliD(VectorRegister wd, VectorRegister ws, int shamt6);
+ void SraiB(VectorRegister wd, VectorRegister ws, int shamt3);
+ void SraiH(VectorRegister wd, VectorRegister ws, int shamt4);
+ void SraiW(VectorRegister wd, VectorRegister ws, int shamt5);
+ void SraiD(VectorRegister wd, VectorRegister ws, int shamt6);
+ void SrliB(VectorRegister wd, VectorRegister ws, int shamt3);
+ void SrliH(VectorRegister wd, VectorRegister ws, int shamt4);
+ void SrliW(VectorRegister wd, VectorRegister ws, int shamt5);
+ void SrliD(VectorRegister wd, VectorRegister ws, int shamt6);
+
+ void MoveV(VectorRegister wd, VectorRegister ws);
+ void SplatiB(VectorRegister wd, VectorRegister ws, int n4);
+ void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
+ void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
+ void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
+ void FillB(VectorRegister wd, GpuRegister rs);
+ void FillH(VectorRegister wd, GpuRegister rs);
+ void FillW(VectorRegister wd, GpuRegister rs);
+ void FillD(VectorRegister wd, GpuRegister rs);
+
+ void LdiB(VectorRegister wd, int imm8);
+ void LdiH(VectorRegister wd, int imm10);
+ void LdiW(VectorRegister wd, int imm10);
+ void LdiD(VectorRegister wd, int imm10);
+ void LdB(VectorRegister wd, GpuRegister rs, int offset);
+ void LdH(VectorRegister wd, GpuRegister rs, int offset);
+ void LdW(VectorRegister wd, GpuRegister rs, int offset);
+ void LdD(VectorRegister wd, GpuRegister rs, int offset);
+ void StB(VectorRegister wd, GpuRegister rs, int offset);
+ void StH(VectorRegister wd, GpuRegister rs, int offset);
+ void StW(VectorRegister wd, GpuRegister rs, int offset);
+ void StD(VectorRegister wd, GpuRegister rs, int offset);
+
// Higher level composite instructions.
int InstrCountForLoadReplicatedConst32(int64_t);
void LoadConst32(GpuRegister rd, int32_t value);
@@ -1349,6 +1452,18 @@
void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct);
void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm);
void EmitBcondc(BranchCondition cond, GpuRegister rs, GpuRegister rt, uint32_t imm16_21);
+ void EmitMsa3R(int operation,
+ int df,
+ VectorRegister wt,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode);
+ void EmitMsaBIT(int operation, int df_m, VectorRegister ws, VectorRegister wd, int minor_opcode);
+ void EmitMsaELM(int operation, int df_n, VectorRegister ws, VectorRegister wd, int minor_opcode);
+ void EmitMsaMI10(int s10, GpuRegister rs, VectorRegister wd, int minor_opcode, int df);
+ void EmitMsaI10(int operation, int df, int i10, VectorRegister wd, int minor_opcode);
+ void EmitMsa2R(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode);
+ void EmitMsa2RF(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode);
void Buncond(Mips64Label* label);
void Bcond(Mips64Label* label,
@@ -1372,6 +1487,10 @@
// Emits exception block.
void EmitExceptionPoll(Mips64ExceptionSlowPath* exception);
+ bool HasMsa() const {
+ return has_msa_;
+ }
+
// List of exception blocks to generate at the end of the code cache.
std::vector<Mips64ExceptionSlowPath> exception_blocks_;
@@ -1395,6 +1514,8 @@
uint32_t last_old_position_;
uint32_t last_branch_id_;
+ const bool has_msa_;
+
DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
};
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index e5d3605..f2e3b16 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -37,12 +37,17 @@
class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
mips64::GpuRegister,
mips64::FpuRegister,
- uint32_t> {
+ uint32_t,
+ mips64::VectorRegister> {
public:
typedef AssemblerTest<mips64::Mips64Assembler,
mips64::GpuRegister,
mips64::FpuRegister,
- uint32_t> Base;
+ uint32_t,
+ mips64::VectorRegister> Base;
+
+ AssemblerMIPS64Test()
+ : instruction_set_features_(Mips64InstructionSetFeatures::FromVariant("default", nullptr)) {}
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
@@ -60,7 +65,7 @@
// (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative
// branches in the .text section and so they require a relocation pass (there's a relocation
// section, .rela.text, that has the needed info to fix up the branches).
- return " -march=mips64r6 -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
+ return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
}
void Pad(std::vector<uint8_t>& data) OVERRIDE {
@@ -76,6 +81,10 @@
return " -D -bbinary -mmips:isa64r6";
}
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
+ return new (arena) mips64::Mips64Assembler(arena, instruction_set_features_.get());
+ }
+
void SetUpHelpers() OVERRIDE {
if (registers_.size() == 0) {
registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
@@ -176,6 +185,39 @@
fp_registers_.push_back(new mips64::FpuRegister(mips64::F29));
fp_registers_.push_back(new mips64::FpuRegister(mips64::F30));
fp_registers_.push_back(new mips64::FpuRegister(mips64::F31));
+
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W0));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W1));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W2));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W3));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W4));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W5));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W6));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W7));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W8));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W9));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W10));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W11));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W12));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W13));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W14));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W15));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W16));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W17));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W18));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W19));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W20));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W21));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W22));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W23));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W24));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W25));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W26));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W27));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W28));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W29));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W30));
+ vec_registers_.push_back(new mips64::VectorRegister(mips64::W31));
}
}
@@ -183,6 +225,7 @@
AssemblerTest::TearDown();
STLDeleteElements(®isters_);
STLDeleteElements(&fp_registers_);
+ STLDeleteElements(&vec_registers_);
}
std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE {
@@ -193,6 +236,10 @@
return fp_registers_;
}
+ std::vector<mips64::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ return vec_registers_;
+ }
+
uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
return imm_value;
}
@@ -272,8 +319,10 @@
std::map<mips64::GpuRegister, std::string, MIPS64CpuRegisterCompare> secondary_register_names_;
std::vector<mips64::FpuRegister*> fp_registers_;
-};
+ std::vector<mips64::VectorRegister*> vec_registers_;
+ std::unique_ptr<const Mips64InstructionSetFeatures> instruction_set_features_;
+};
TEST_F(AssemblerMIPS64Test, Toolchain) {
EXPECT_TRUE(CheckTools());
@@ -2461,6 +2510,386 @@
EXPECT_EQ(tester.GetPathsCovered(), art::mips64::kLoadConst64PathAllPaths);
}
+// MSA instructions.
+
+TEST_F(AssemblerMIPS64Test, AndV) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::AndV, "and.v ${reg1}, ${reg2}, ${reg3}"), "and.v");
+}
+
+TEST_F(AssemblerMIPS64Test, OrV) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::OrV, "or.v ${reg1}, ${reg2}, ${reg3}"), "or.v");
+}
+
+TEST_F(AssemblerMIPS64Test, NorV) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::NorV, "nor.v ${reg1}, ${reg2}, ${reg3}"), "nor.v");
+}
+
+TEST_F(AssemblerMIPS64Test, XorV) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::XorV, "xor.v ${reg1}, ${reg2}, ${reg3}"), "xor.v");
+}
+
+TEST_F(AssemblerMIPS64Test, AddvB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvB, "addv.b ${reg1}, ${reg2}, ${reg3}"),
+ "addv.b");
+}
+
+TEST_F(AssemblerMIPS64Test, AddvH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvH, "addv.h ${reg1}, ${reg2}, ${reg3}"),
+ "addv.h");
+}
+
+TEST_F(AssemblerMIPS64Test, AddvW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvW, "addv.w ${reg1}, ${reg2}, ${reg3}"),
+ "addv.w");
+}
+
+TEST_F(AssemblerMIPS64Test, AddvD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvD, "addv.d ${reg1}, ${reg2}, ${reg3}"),
+ "addv.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SubvB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvB, "subv.b ${reg1}, ${reg2}, ${reg3}"),
+ "subv.b");
+}
+
+TEST_F(AssemblerMIPS64Test, SubvH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvH, "subv.h ${reg1}, ${reg2}, ${reg3}"),
+ "subv.h");
+}
+
+TEST_F(AssemblerMIPS64Test, SubvW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvW, "subv.w ${reg1}, ${reg2}, ${reg3}"),
+ "subv.w");
+}
+
+TEST_F(AssemblerMIPS64Test, SubvD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvD, "subv.d ${reg1}, ${reg2}, ${reg3}"),
+ "subv.d");
+}
+
+TEST_F(AssemblerMIPS64Test, MulvB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvB, "mulv.b ${reg1}, ${reg2}, ${reg3}"),
+ "mulv.b");
+}
+
+TEST_F(AssemblerMIPS64Test, MulvH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvH, "mulv.h ${reg1}, ${reg2}, ${reg3}"),
+ "mulv.h");
+}
+
+TEST_F(AssemblerMIPS64Test, MulvW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvW, "mulv.w ${reg1}, ${reg2}, ${reg3}"),
+ "mulv.w");
+}
+
+TEST_F(AssemblerMIPS64Test, MulvD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvD, "mulv.d ${reg1}, ${reg2}, ${reg3}"),
+ "mulv.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_sB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sB, "div_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sH, "div_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sW, "div_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sD, "div_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_uB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uB, "div_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uH, "div_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uW, "div_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Div_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uD, "div_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_sB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sB, "mod_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sH, "mod_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sW, "mod_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sD, "mod_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_uB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uB, "mod_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uH, "mod_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uW, "mod_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Mod_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uD, "mod_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FaddW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"),
+ "fadd.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FaddD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddD, "fadd.d ${reg1}, ${reg2}, ${reg3}"),
+ "fadd.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FsubW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FsubW, "fsub.w ${reg1}, ${reg2}, ${reg3}"),
+ "fsub.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FsubD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FsubD, "fsub.d ${reg1}, ${reg2}, ${reg3}"),
+ "fsub.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FmulW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmulW, "fmul.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmul.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FmulD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmulD, "fmul.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmul.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FdivW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FdivW, "fdiv.w ${reg1}, ${reg2}, ${reg3}"),
+ "fdiv.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FdivD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FdivD, "fdiv.d ${reg1}, ${reg2}, ${reg3}"),
+ "fdiv.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Ffint_sW) {
+ DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sW, "ffint_s.w ${reg1}, ${reg2}"),
+ "ffint_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Ffint_sD) {
+ DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sD, "ffint_s.d ${reg1}, ${reg2}"),
+ "ffint_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Ftint_sW) {
+ DriverStr(RepeatVV(&mips64::Mips64Assembler::Ftint_sW, "ftint_s.w ${reg1}, ${reg2}"),
+ "ftint_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Ftint_sD) {
+ DriverStr(RepeatVV(&mips64::Mips64Assembler::Ftint_sD, "ftint_s.d ${reg1}, ${reg2}"),
+ "ftint_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SllB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllB, "sll.b ${reg1}, ${reg2}, ${reg3}"), "sll.b");
+}
+
+TEST_F(AssemblerMIPS64Test, SllH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllH, "sll.h ${reg1}, ${reg2}, ${reg3}"), "sll.h");
+}
+
+TEST_F(AssemblerMIPS64Test, SllW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllW, "sll.w ${reg1}, ${reg2}, ${reg3}"), "sll.w");
+}
+
+TEST_F(AssemblerMIPS64Test, SllD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllD, "sll.d ${reg1}, ${reg2}, ${reg3}"), "sll.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SraB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraB, "sra.b ${reg1}, ${reg2}, ${reg3}"), "sra.b");
+}
+
+TEST_F(AssemblerMIPS64Test, SraH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraH, "sra.h ${reg1}, ${reg2}, ${reg3}"), "sra.h");
+}
+
+TEST_F(AssemblerMIPS64Test, SraW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraW, "sra.w ${reg1}, ${reg2}, ${reg3}"), "sra.w");
+}
+
+TEST_F(AssemblerMIPS64Test, SraD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraD, "sra.d ${reg1}, ${reg2}, ${reg3}"), "sra.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SrlB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlB, "srl.b ${reg1}, ${reg2}, ${reg3}"), "srl.b");
+}
+
+TEST_F(AssemblerMIPS64Test, SrlH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlH, "srl.h ${reg1}, ${reg2}, ${reg3}"), "srl.h");
+}
+
+TEST_F(AssemblerMIPS64Test, SrlW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlW, "srl.w ${reg1}, ${reg2}, ${reg3}"), "srl.w");
+}
+
+TEST_F(AssemblerMIPS64Test, SrlD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlD, "srl.d ${reg1}, ${reg2}, ${reg3}"), "srl.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SlliB) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliB, 3, "slli.b ${reg1}, ${reg2}, {imm}"),
+ "slli.b");
+}
+
+TEST_F(AssemblerMIPS64Test, SlliH) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliH, 4, "slli.h ${reg1}, ${reg2}, {imm}"),
+ "slli.h");
+}
+
+TEST_F(AssemblerMIPS64Test, SlliW) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliW, 5, "slli.w ${reg1}, ${reg2}, {imm}"),
+ "slli.w");
+}
+
+TEST_F(AssemblerMIPS64Test, SlliD) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliD, 6, "slli.d ${reg1}, ${reg2}, {imm}"),
+ "slli.d");
+}
+
+TEST_F(AssemblerMIPS64Test, MoveV) {
+ DriverStr(RepeatVV(&mips64::Mips64Assembler::MoveV, "move.v ${reg1}, ${reg2}"), "move.v");
+}
+
+TEST_F(AssemblerMIPS64Test, SplatiB) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiB, 4, "splati.b ${reg1}, ${reg2}[{imm}]"),
+ "splati.b");
+}
+
+TEST_F(AssemblerMIPS64Test, SplatiH) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiH, 3, "splati.h ${reg1}, ${reg2}[{imm}]"),
+ "splati.h");
+}
+
+TEST_F(AssemblerMIPS64Test, SplatiW) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiW, 2, "splati.w ${reg1}, ${reg2}[{imm}]"),
+ "splati.w");
+}
+
+TEST_F(AssemblerMIPS64Test, SplatiD) {
+ DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiD, 1, "splati.d ${reg1}, ${reg2}[{imm}]"),
+ "splati.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FillB) {
+ DriverStr(RepeatVR(&mips64::Mips64Assembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
+}
+
+TEST_F(AssemblerMIPS64Test, FillH) {
+ DriverStr(RepeatVR(&mips64::Mips64Assembler::FillH, "fill.h ${reg1}, ${reg2}"), "fill.h");
+}
+
+TEST_F(AssemblerMIPS64Test, FillW) {
+ DriverStr(RepeatVR(&mips64::Mips64Assembler::FillW, "fill.w ${reg1}, ${reg2}"), "fill.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FillD) {
+ DriverStr(RepeatVR(&mips64::Mips64Assembler::FillD, "fill.d ${reg1}, ${reg2}"), "fill.d");
+}
+
+TEST_F(AssemblerMIPS64Test, LdiB) {
+ DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiB, -8, "ldi.b ${reg}, {imm}"), "ldi.b");
+}
+
+TEST_F(AssemblerMIPS64Test, LdiH) {
+ DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiH, -10, "ldi.h ${reg}, {imm}"), "ldi.h");
+}
+
+TEST_F(AssemblerMIPS64Test, LdiW) {
+ DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiW, -10, "ldi.w ${reg}, {imm}"), "ldi.w");
+}
+
+TEST_F(AssemblerMIPS64Test, LdiD) {
+ DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiD, -10, "ldi.d ${reg}, {imm}"), "ldi.d");
+}
+
+TEST_F(AssemblerMIPS64Test, LdB) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdB, -10, "ld.b ${reg1}, {imm}(${reg2})"), "ld.b");
+}
+
+TEST_F(AssemblerMIPS64Test, LdH) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdH, -10, "ld.h ${reg1}, {imm}(${reg2})", 0, 2),
+ "ld.h");
+}
+
+TEST_F(AssemblerMIPS64Test, LdW) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdW, -10, "ld.w ${reg1}, {imm}(${reg2})", 0, 4),
+ "ld.w");
+}
+
+TEST_F(AssemblerMIPS64Test, LdD) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdD, -10, "ld.d ${reg1}, {imm}(${reg2})", 0, 8),
+ "ld.d");
+}
+
+TEST_F(AssemblerMIPS64Test, StB) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StB, -10, "st.b ${reg1}, {imm}(${reg2})"), "st.b");
+}
+
+TEST_F(AssemblerMIPS64Test, StH) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StH, -10, "st.h ${reg1}, {imm}(${reg2})", 0, 2),
+ "st.h");
+}
+
+TEST_F(AssemblerMIPS64Test, StW) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StW, -10, "st.w ${reg1}, {imm}(${reg2})", 0, 4),
+ "st.w");
+}
+
+TEST_F(AssemblerMIPS64Test, StD) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StD, -10, "st.d ${reg1}, {imm}(${reg2})", 0, 8),
+ "st.d");
+}
+
#undef __
} // namespace art
diff --git a/compiler/utils/mips64/constants_mips64.h b/compiler/utils/mips64/constants_mips64.h
index f57498d..bc8e40b 100644
--- a/compiler/utils/mips64/constants_mips64.h
+++ b/compiler/utils/mips64/constants_mips64.h
@@ -51,8 +51,36 @@
kFdShift = 6,
kFdBits = 5,
+ kMsaOperationShift = 23,
+ kMsaELMOperationShift = 22,
+ kMsa2ROperationShift = 18,
+ kMsa2RFOperationShift = 17,
+ kDfShift = 21,
+ kDfMShift = 16,
+ kDf2RShift = 16,
+ kDfNShift = 16,
+ kWtShift = 16,
+ kWtBits = 5,
+ kWsShift = 11,
+ kWsBits = 5,
+ kWdShift = 6,
+ kWdBits = 5,
+ kS10Shift = 16,
+ kI10Shift = 11,
+ kS10MinorShift = 2,
+
kBranchOffsetMask = 0x0000ffff,
kJumpOffsetMask = 0x03ffffff,
+ kMsaMajorOpcode = 0x1e,
+ kMsaDfMByteMask = 0x70,
+ kMsaDfMHalfwordMask = 0x60,
+ kMsaDfMWordMask = 0x40,
+ kMsaDfMDoublewordMask = 0x00,
+ kMsaDfNByteMask = 0x00,
+ kMsaDfNHalfwordMask = 0x20,
+ kMsaDfNWordMask = 0x30,
+ kMsaDfNDoublewordMask = 0x38,
+ kMsaS10Mask = 0x3ff,
};
enum ScaleFactor {
diff --git a/compiler/utils/mips64/managed_register_mips64.cc b/compiler/utils/mips64/managed_register_mips64.cc
index dea396e..42d061e 100644
--- a/compiler/utils/mips64/managed_register_mips64.cc
+++ b/compiler/utils/mips64/managed_register_mips64.cc
@@ -26,6 +26,11 @@
CHECK(IsValidManagedRegister());
CHECK(other.IsValidManagedRegister());
if (Equals(other)) return true;
+ if (IsFpuRegister() && other.IsVectorRegister()) {
+ return (AsFpuRegister() == other.AsOverlappingFpuRegister());
+ } else if (IsVectorRegister() && other.IsFpuRegister()) {
+ return (AsVectorRegister() == other.AsOverlappingVectorRegister());
+ }
return false;
}
@@ -36,6 +41,8 @@
os << "GPU: " << static_cast<int>(AsGpuRegister());
} else if (IsFpuRegister()) {
os << "FpuRegister: " << static_cast<int>(AsFpuRegister());
+ } else if (IsVectorRegister()) {
+ os << "VectorRegister: " << static_cast<int>(AsVectorRegister());
} else {
os << "??: " << RegId();
}
diff --git a/compiler/utils/mips64/managed_register_mips64.h b/compiler/utils/mips64/managed_register_mips64.h
index c9f9556..3980199 100644
--- a/compiler/utils/mips64/managed_register_mips64.h
+++ b/compiler/utils/mips64/managed_register_mips64.h
@@ -30,11 +30,27 @@
const int kNumberOfFpuRegIds = kNumberOfFpuRegisters;
const int kNumberOfFpuAllocIds = kNumberOfFpuRegisters;
-const int kNumberOfRegIds = kNumberOfGpuRegIds + kNumberOfFpuRegIds;
-const int kNumberOfAllocIds = kNumberOfGpuAllocIds + kNumberOfFpuAllocIds;
+const int kNumberOfVecRegIds = kNumberOfVectorRegisters;
+const int kNumberOfVecAllocIds = kNumberOfVectorRegisters;
-// An instance of class 'ManagedRegister' represents a single GPU register (enum
-// Register) or a double precision FP register (enum FpuRegister)
+const int kNumberOfRegIds = kNumberOfGpuRegIds + kNumberOfFpuRegIds + kNumberOfVecRegIds;
+const int kNumberOfAllocIds = kNumberOfGpuAllocIds + kNumberOfFpuAllocIds + kNumberOfVecAllocIds;
+
+// Register ids map:
+// [0..R[ core registers (enum GpuRegister)
+// [R..F[ floating-point registers (enum FpuRegister)
+// [F..W[ MSA vector registers (enum VectorRegister)
+// where
+// R = kNumberOfGpuRegIds
+// F = R + kNumberOfFpuRegIds
+// W = F + kNumberOfVecRegIds
+
+// An instance of class 'ManagedRegister' represents a single Mips64 register.
+// A register can be one of the following:
+// * core register (enum GpuRegister)
+// * floating-point register (enum FpuRegister)
+// * MSA vector register (enum VectorRegister)
+//
// 'ManagedRegister::NoRegister()' provides an invalid register.
// There is a one-to-one mapping between ManagedRegister and register id.
class Mips64ManagedRegister : public ManagedRegister {
@@ -49,6 +65,21 @@
return static_cast<FpuRegister>(id_ - kNumberOfGpuRegIds);
}
+ constexpr VectorRegister AsVectorRegister() const {
+ CHECK(IsVectorRegister());
+ return static_cast<VectorRegister>(id_ - (kNumberOfGpuRegIds + kNumberOfFpuRegisters));
+ }
+
+ constexpr FpuRegister AsOverlappingFpuRegister() const {
+ CHECK(IsValidManagedRegister());
+ return static_cast<FpuRegister>(AsVectorRegister());
+ }
+
+ constexpr VectorRegister AsOverlappingVectorRegister() const {
+ CHECK(IsValidManagedRegister());
+ return static_cast<VectorRegister>(AsFpuRegister());
+ }
+
constexpr bool IsGpuRegister() const {
CHECK(IsValidManagedRegister());
return (0 <= id_) && (id_ < kNumberOfGpuRegIds);
@@ -60,6 +91,12 @@
return (0 <= test) && (test < kNumberOfFpuRegIds);
}
+ constexpr bool IsVectorRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfGpuRegIds + kNumberOfFpuRegIds);
+ return (0 <= test) && (test < kNumberOfVecRegIds);
+ }
+
void Print(std::ostream& os) const;
// Returns true if the two managed-registers ('this' and 'other') overlap.
@@ -77,6 +114,11 @@
return FromRegId(r + kNumberOfGpuRegIds);
}
+ static constexpr Mips64ManagedRegister FromVectorRegister(VectorRegister r) {
+ CHECK_NE(r, kNoVectorRegister);
+ return FromRegId(r + kNumberOfGpuRegIds + kNumberOfFpuRegIds);
+ }
+
private:
constexpr bool IsValidManagedRegister() const {
return (0 <= id_) && (id_ < kNumberOfRegIds);
diff --git a/compiler/utils/mips64/managed_register_mips64_test.cc b/compiler/utils/mips64/managed_register_mips64_test.cc
new file mode 100644
index 0000000..8b72d7e
--- /dev/null
+++ b/compiler/utils/mips64/managed_register_mips64_test.cc
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_mips64.h"
+#include "globals.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace mips64 {
+
+TEST(Mips64ManagedRegister, NoRegister) {
+ Mips64ManagedRegister reg = ManagedRegister::NoRegister().AsMips64();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.Overlaps(reg));
+}
+
+TEST(Mips64ManagedRegister, GpuRegister) {
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(ZERO);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(ZERO, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(AT);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(AT, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(V0);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(V0, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(A0);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(A0, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(A7);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(A7, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(T0);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(T0, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(T3);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(T3, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(S0);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(S0, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(GP);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(GP, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(SP);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(SP, reg.AsGpuRegister());
+
+ reg = Mips64ManagedRegister::FromGpuRegister(RA);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_EQ(RA, reg.AsGpuRegister());
+}
+
+TEST(Mips64ManagedRegister, FpuRegister) {
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(F0);
+ Mips64ManagedRegister vreg = Mips64ManagedRegister::FromVectorRegister(W0);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_TRUE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(vreg));
+ EXPECT_EQ(F0, reg.AsFpuRegister());
+ EXPECT_EQ(W0, reg.AsOverlappingVectorRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+
+ reg = Mips64ManagedRegister::FromFpuRegister(F1);
+ vreg = Mips64ManagedRegister::FromVectorRegister(W1);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_TRUE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(vreg));
+ EXPECT_EQ(F1, reg.AsFpuRegister());
+ EXPECT_EQ(W1, reg.AsOverlappingVectorRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F1)));
+
+ reg = Mips64ManagedRegister::FromFpuRegister(F20);
+ vreg = Mips64ManagedRegister::FromVectorRegister(W20);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_TRUE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(vreg));
+ EXPECT_EQ(F20, reg.AsFpuRegister());
+ EXPECT_EQ(W20, reg.AsOverlappingVectorRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F20)));
+
+ reg = Mips64ManagedRegister::FromFpuRegister(F31);
+ vreg = Mips64ManagedRegister::FromVectorRegister(W31);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_TRUE(reg.IsFpuRegister());
+ EXPECT_FALSE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(vreg));
+ EXPECT_EQ(F31, reg.AsFpuRegister());
+ EXPECT_EQ(W31, reg.AsOverlappingVectorRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F31)));
+}
+
+TEST(Mips64ManagedRegister, VectorRegister) {
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromVectorRegister(W0);
+ Mips64ManagedRegister freg = Mips64ManagedRegister::FromFpuRegister(F0);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_TRUE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(freg));
+ EXPECT_EQ(W0, reg.AsVectorRegister());
+ EXPECT_EQ(F0, reg.AsOverlappingFpuRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W2);
+ freg = Mips64ManagedRegister::FromFpuRegister(F2);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_TRUE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(freg));
+ EXPECT_EQ(W2, reg.AsVectorRegister());
+ EXPECT_EQ(F2, reg.AsOverlappingFpuRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W2)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W13);
+ freg = Mips64ManagedRegister::FromFpuRegister(F13);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_TRUE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(freg));
+ EXPECT_EQ(W13, reg.AsVectorRegister());
+ EXPECT_EQ(F13, reg.AsOverlappingFpuRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W13)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W29);
+ freg = Mips64ManagedRegister::FromFpuRegister(F29);
+ EXPECT_FALSE(reg.IsNoRegister());
+ EXPECT_FALSE(reg.IsGpuRegister());
+ EXPECT_FALSE(reg.IsFpuRegister());
+ EXPECT_TRUE(reg.IsVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(freg));
+ EXPECT_EQ(W29, reg.AsVectorRegister());
+ EXPECT_EQ(F29, reg.AsOverlappingFpuRegister());
+ EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W29)));
+}
+
+TEST(Mips64ManagedRegister, Equals) {
+ ManagedRegister no_reg = ManagedRegister::NoRegister();
+ EXPECT_TRUE(no_reg.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
+ EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ Mips64ManagedRegister reg_ZERO = Mips64ManagedRegister::FromGpuRegister(ZERO);
+ EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_TRUE(reg_ZERO.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
+ EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ Mips64ManagedRegister reg_A1 = Mips64ManagedRegister::FromGpuRegister(A1);
+ EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_TRUE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
+ EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ Mips64ManagedRegister reg_S2 = Mips64ManagedRegister::FromGpuRegister(S2);
+ EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(S1)));
+ EXPECT_TRUE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
+ EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ Mips64ManagedRegister reg_F0 = Mips64ManagedRegister::FromFpuRegister(F0);
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
+ EXPECT_TRUE(reg_F0.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromFpuRegister(F1)));
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ Mips64ManagedRegister reg_F31 = Mips64ManagedRegister::FromFpuRegister(F31);
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromFpuRegister(F1)));
+ EXPECT_TRUE(reg_F31.Equals(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+
+ Mips64ManagedRegister reg_W0 = Mips64ManagedRegister::FromVectorRegister(W0);
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromGpuRegister(S1)));
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_TRUE(reg_W0.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromVectorRegister(W1)));
+ EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ Mips64ManagedRegister reg_W31 = Mips64ManagedRegister::FromVectorRegister(W31);
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::NoRegister()));
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromGpuRegister(S1)));
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromVectorRegister(W1)));
+ EXPECT_TRUE(reg_W31.Equals(Mips64ManagedRegister::FromVectorRegister(W31)));
+}
+
+TEST(Mips64ManagedRegister, Overlaps) {
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(F0);
+ Mips64ManagedRegister reg_o = Mips64ManagedRegister::FromVectorRegister(W0);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(F0, reg_o.AsOverlappingFpuRegister());
+ EXPECT_EQ(W0, reg.AsOverlappingVectorRegister());
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromFpuRegister(F4);
+ reg_o = Mips64ManagedRegister::FromVectorRegister(W4);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(F4, reg_o.AsOverlappingFpuRegister());
+ EXPECT_EQ(W4, reg.AsOverlappingVectorRegister());
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromFpuRegister(F16);
+ reg_o = Mips64ManagedRegister::FromVectorRegister(W16);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(F16, reg_o.AsOverlappingFpuRegister());
+ EXPECT_EQ(W16, reg.AsOverlappingVectorRegister());
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromFpuRegister(F31);
+ reg_o = Mips64ManagedRegister::FromVectorRegister(W31);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(F31, reg_o.AsOverlappingFpuRegister());
+ EXPECT_EQ(W31, reg.AsOverlappingVectorRegister());
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W0);
+ reg_o = Mips64ManagedRegister::FromFpuRegister(F0);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(W0, reg_o.AsOverlappingVectorRegister());
+ EXPECT_EQ(F0, reg.AsOverlappingFpuRegister());
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W4);
+ reg_o = Mips64ManagedRegister::FromFpuRegister(F4);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(W4, reg_o.AsOverlappingVectorRegister());
+ EXPECT_EQ(F4, reg.AsOverlappingFpuRegister());
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W16);
+ reg_o = Mips64ManagedRegister::FromFpuRegister(F16);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(W16, reg_o.AsOverlappingVectorRegister());
+ EXPECT_EQ(F16, reg.AsOverlappingFpuRegister());
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromVectorRegister(W31);
+ reg_o = Mips64ManagedRegister::FromFpuRegister(F31);
+ EXPECT_TRUE(reg.Overlaps(reg_o));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_EQ(W31, reg_o.AsOverlappingVectorRegister());
+ EXPECT_EQ(F31, reg.AsOverlappingFpuRegister());
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromGpuRegister(ZERO);
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromGpuRegister(A0);
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromGpuRegister(S0);
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+
+ reg = Mips64ManagedRegister::FromGpuRegister(RA);
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
+ EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
+}
+
+} // namespace mips64
+} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 5307dc0..1736618 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1221,6 +1221,24 @@
}
+void X86Assembler::pavgb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xE0);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pavgw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xE3);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1258,6 +1276,43 @@
}
+void X86Assembler::pcmpgtb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x64);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x65);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x66);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x37);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index f52cf16..a747cda 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -495,11 +495,19 @@
void orps(XmmRegister dst, XmmRegister src);
void por(XmmRegister dst, XmmRegister src);
+ void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pavgw(XmmRegister dst, XmmRegister src);
+
void pcmpeqb(XmmRegister dst, XmmRegister src);
void pcmpeqw(XmmRegister dst, XmmRegister src);
void pcmpeqd(XmmRegister dst, XmmRegister src);
void pcmpeqq(XmmRegister dst, XmmRegister src);
+ void pcmpgtb(XmmRegister dst, XmmRegister src);
+ void pcmpgtw(XmmRegister dst, XmmRegister src);
+ void pcmpgtd(XmmRegister dst, XmmRegister src);
+ void pcmpgtq(XmmRegister dst, XmmRegister src); // SSE4.2
+
void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 2304907..f75f972 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -605,6 +605,14 @@
DriverStr(RepeatFF(&x86::X86Assembler::por, "por %{reg2}, %{reg1}"), "por");
}
+TEST_F(AssemblerX86Test, PAvgB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pavgb, "pavgb %{reg2}, %{reg1}"), "pavgb");
+}
+
+TEST_F(AssemblerX86Test, PAvgW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pavgw, "pavgw %{reg2}, %{reg1}"), "pavgw");
+}
+
TEST_F(AssemblerX86Test, PCmpeqB) {
DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "cmpeqb");
}
@@ -621,6 +629,22 @@
DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqq, "pcmpeqq %{reg2}, %{reg1}"), "cmpeqq");
}
+TEST_F(AssemblerX86Test, PCmpgtB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtb, "pcmpgtb %{reg2}, %{reg1}"), "cmpgtb");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtw, "pcmpgtw %{reg2}, %{reg1}"), "cmpgtw");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtd, "pcmpgtd %{reg2}, %{reg1}"), "cmpgtd");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtQ) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtq, "pcmpgtq %{reg2}, %{reg1}"), "cmpgtq");
+}
+
TEST_F(AssemblerX86Test, ShufPS) {
DriverStr(RepeatFFI(&x86::X86Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps");
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index d20a696..1b7a485 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1427,6 +1427,24 @@
EmitXmmRegisterOperand(dst.LowBits(), src);
}
+void X86_64Assembler::pavgb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xE0);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pavgw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xE3);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
void X86_64Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1464,6 +1482,43 @@
EmitXmmRegisterOperand(dst.LowBits(), src);
}
+void X86_64Assembler::pcmpgtb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x64);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pcmpgtw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x65);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pcmpgtd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x66);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pcmpgtq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x37);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
void X86_64Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 08e17e8..0ddc46c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -523,11 +523,19 @@
void orps(XmmRegister dst, XmmRegister src);
void por(XmmRegister dst, XmmRegister src);
+ void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pavgw(XmmRegister dst, XmmRegister src);
+
void pcmpeqb(XmmRegister dst, XmmRegister src);
void pcmpeqw(XmmRegister dst, XmmRegister src);
void pcmpeqd(XmmRegister dst, XmmRegister src);
void pcmpeqq(XmmRegister dst, XmmRegister src);
+ void pcmpgtb(XmmRegister dst, XmmRegister src);
+ void pcmpgtw(XmmRegister dst, XmmRegister src);
+ void pcmpgtd(XmmRegister dst, XmmRegister src);
+ void pcmpgtq(XmmRegister dst, XmmRegister src); // SSE4.2
+
void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 20062fd..e7d8401 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1293,6 +1293,14 @@
DriverStr(RepeatFF(&x86_64::X86_64Assembler::por, "por %{reg2}, %{reg1}"), "por");
}
+TEST_F(AssemblerX86_64Test, Pavgb) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pavgb, "pavgb %{reg2}, %{reg1}"), "pavgb");
+}
+
+TEST_F(AssemblerX86_64Test, Pavgw) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pavgw, "pavgw %{reg2}, %{reg1}"), "pavgw");
+}
+
TEST_F(AssemblerX86_64Test, PCmpeqb) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "pcmpeqb");
}
@@ -1309,6 +1317,22 @@
DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqq, "pcmpeqq %{reg2}, %{reg1}"), "pcmpeqq");
}
+TEST_F(AssemblerX86_64Test, PCmpgtb) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpgtb, "pcmpgtb %{reg2}, %{reg1}"), "pcmpgtb");
+}
+
+TEST_F(AssemblerX86_64Test, PCmpgtw) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpgtw, "pcmpgtw %{reg2}, %{reg1}"), "pcmpgtw");
+}
+
+TEST_F(AssemblerX86_64Test, PCmpgtd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpgtd, "pcmpgtd %{reg2}, %{reg1}"), "pcmpgtd");
+}
+
+TEST_F(AssemblerX86_64Test, PCmpgtq) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpgtq, "pcmpgtq %{reg2}, %{reg1}"), "pcmpgtq");
+}
+
TEST_F(AssemblerX86_64Test, Shufps) {
DriverStr(RepeatFFI(&x86_64::X86_64Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps");
}
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 4bfc849..fa7e985 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -18,21 +18,21 @@
#include "verifier/verifier_deps.h"
#include "class_linker.h"
-#include "compiler/common_compiler_test.h"
-#include "compiler/dex/verification_results.h"
-#include "compiler/dex/verified_method.h"
-#include "compiler/driver/compiler_options.h"
-#include "compiler/driver/compiler_driver.h"
-#include "compiler/utils/atomic_method_ref_map-inl.h"
+#include "common_compiler_test.h"
#include "compiler_callbacks.h"
+#include "dex/verification_results.h"
+#include "dex/verified_method.h"
#include "dex_file.h"
#include "dex_file_types.h"
+#include "driver/compiler_options.h"
+#include "driver/compiler_driver.h"
#include "handle_scope-inl.h"
#include "verifier/method_verifier-inl.h"
#include "mirror/class_loader.h"
#include "runtime.h"
#include "thread.h"
#include "scoped_thread_state_change-inl.h"
+#include "utils/atomic_method_ref_map-inl.h"
namespace art {
namespace verifier {
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 0924aec..048f36d 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -14,6 +14,12 @@
// limitations under the License.
//
+cc_library_headers {
+ name: "dex2oat_headers",
+ host_supported: true,
+ export_include_dirs: ["include"],
+}
+
cc_defaults {
name: "dex2oat-defaults",
host_supported: true,
@@ -40,6 +46,7 @@
include_dirs: [
"art/cmdline",
],
+ header_libs: ["dex2oat_headers"],
}
art_cc_binary {
@@ -132,4 +139,5 @@
"art_gtest_defaults",
],
srcs: ["dex2oat_test.cc"],
+ header_libs: ["dex2oat_headers"],
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 92a12c8..b4ea20b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -54,6 +54,7 @@
#include "debug/method_debug_info.h"
#include "dex/quick_compiler_callbacks.h"
#include "dex/verification_results.h"
+#include "dex2oat_return_codes.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -311,13 +312,6 @@
UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError("");
- UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning");
- UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing.");
- UsageError(" Has priority over the --compiler-filter option. Intended for ");
- UsageError(" development/experimental use.");
- UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit);
- UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit);
- UsageError("");
UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method");
UsageError(" can have to be considered for inlining. A zero value will disable inlining.");
UsageError(" Honored only by Optimizing. Has priority over the --compiler-filter option.");
@@ -869,22 +863,8 @@
}
}
- // It they are not set, use default values for inlining settings.
- // TODO: We should rethink the compiler filter. We mostly save
- // time here, which is orthogonal to space.
- if (compiler_options_->inline_depth_limit_ == CompilerOptions::kUnsetInlineDepthLimit) {
- compiler_options_->inline_depth_limit_ =
- (compiler_options_->compiler_filter_ == CompilerFilter::kSpace)
- // Implementation of the space filter: limit inlining depth.
- ? CompilerOptions::kSpaceFilterInlineDepthLimit
- : CompilerOptions::kDefaultInlineDepthLimit;
- }
if (compiler_options_->inline_max_code_units_ == CompilerOptions::kUnsetInlineMaxCodeUnits) {
- compiler_options_->inline_max_code_units_ =
- (compiler_options_->compiler_filter_ == CompilerFilter::kSpace)
- // Implementation of the space filter: limit inlining max code units.
- ? CompilerOptions::kSpaceFilterInlineMaxCodeUnits
- : CompilerOptions::kDefaultInlineMaxCodeUnits;
+ compiler_options_->inline_max_code_units_ = CompilerOptions::kDefaultInlineMaxCodeUnits;
}
// Checks are all explicit until we know the architecture.
@@ -1442,11 +1422,11 @@
// Set up the environment for compilation. Includes starting the runtime and loading/opening the
// boot class path.
- bool Setup() {
+ dex2oat::ReturnCode Setup() {
TimingLogger::ScopedTiming t("dex2oat Setup", timings_);
if (!PrepareImageClasses() || !PrepareCompiledClasses() || !PrepareCompiledMethods()) {
- return false;
+ return dex2oat::ReturnCode::kOther;
}
verification_results_.reset(new VerificationResults(compiler_options_.get()));
@@ -1458,12 +1438,12 @@
RuntimeArgumentMap runtime_options;
if (!PrepareRuntimeOptions(&runtime_options)) {
- return false;
+ return dex2oat::ReturnCode::kOther;
}
CreateOatWriters();
if (!AddDexFileSources()) {
- return false;
+ return dex2oat::ReturnCode::kOther;
}
if (IsBootImage() && image_filenames_.size() > 1) {
@@ -1479,7 +1459,7 @@
// When compiling an app, create the runtime early to retrieve
// the image location key needed for the oat header.
if (!CreateRuntime(std::move(runtime_options))) {
- return false;
+ return dex2oat::ReturnCode::kCreateRuntime;
}
if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
@@ -1550,7 +1530,7 @@
update_input_vdex_,
&opened_dex_files_map,
&opened_dex_files)) {
- return false;
+ return dex2oat::ReturnCode::kOther;
}
dex_files_per_oat_file_.push_back(MakeNonOwningPointerVector(opened_dex_files));
if (opened_dex_files_map != nullptr) {
@@ -1602,7 +1582,7 @@
// Note: Runtime acquires ownership of these dex files.
runtime_options.Set(RuntimeArgumentMap::BootClassPathDexList, &opened_dex_files_);
if (!CreateRuntime(std::move(runtime_options))) {
- return false;
+ return dex2oat::ReturnCode::kOther;
}
}
@@ -1636,7 +1616,7 @@
for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
if (!map->Protect(PROT_READ | PROT_WRITE)) {
PLOG(ERROR) << "Failed to make .dex files writeable.";
- return false;
+ return dex2oat::ReturnCode::kOther;
}
}
@@ -1651,14 +1631,14 @@
soa.Self()->AssertPendingException();
soa.Self()->ClearException();
PLOG(ERROR) << "Failed to register dex file.";
- return false;
+ return dex2oat::ReturnCode::kOther;
}
// Pre-register dex files so that we can access verification results without locks during
// compilation and verification.
verification_results_->AddDexFile(dex_file);
}
- return true;
+ return dex2oat::ReturnCode::kNoFailure;
}
// If we need to keep the oat file open for the image writer.
@@ -2789,13 +2769,13 @@
#endif
}
-static int CompileImage(Dex2Oat& dex2oat) {
+static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) {
dex2oat.LoadClassProfileDescriptors();
dex2oat.Compile();
if (!dex2oat.WriteOutputFiles()) {
dex2oat.EraseOutputFiles();
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// Flush boot.oat. We always expect the output file by name, and it will be re-opened from the
@@ -2804,46 +2784,46 @@
if (dex2oat.ShouldKeepOatFileOpen()) {
if (!dex2oat.FlushOutputFiles()) {
dex2oat.EraseOutputFiles();
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
} else if (!dex2oat.FlushCloseOutputFiles()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// Creates the boot.art and patches the oat files.
if (!dex2oat.HandleImage()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
if (!dex2oat.FlushCloseOutputFiles()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
dex2oat.DumpTiming();
- return EXIT_SUCCESS;
+ return dex2oat::ReturnCode::kNoFailure;
}
// Copy stripped to unstripped location, if necessary.
if (!dex2oat.CopyStrippedToUnstripped()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// FlushClose again, as stripping might have re-opened the oat files.
if (!dex2oat.FlushCloseOutputFiles()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
dex2oat.DumpTiming();
- return EXIT_SUCCESS;
+ return dex2oat::ReturnCode::kNoFailure;
}
-static int CompileApp(Dex2Oat& dex2oat) {
+static dex2oat::ReturnCode CompileApp(Dex2Oat& dex2oat) {
dex2oat.Compile();
if (!dex2oat.WriteOutputFiles()) {
dex2oat.EraseOutputFiles();
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// Do not close the oat files here. We might have gotten the output file by file descriptor,
@@ -2852,29 +2832,29 @@
// When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
if (!dex2oat.FlushCloseOutputFiles()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
dex2oat.DumpTiming();
- return EXIT_SUCCESS;
+ return dex2oat::ReturnCode::kNoFailure;
}
// Copy stripped to unstripped location, if necessary. This will implicitly flush & close the
// stripped versions. If this is given, we expect to be able to open writable files by name.
if (!dex2oat.CopyStrippedToUnstripped()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// Flush and close the files.
if (!dex2oat.FlushCloseOutputFiles()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
dex2oat.DumpTiming();
- return EXIT_SUCCESS;
+ return dex2oat::ReturnCode::kNoFailure;
}
-static int dex2oat(int argc, char** argv) {
+static dex2oat::ReturnCode Dex2oat(int argc, char** argv) {
b13564922();
TimingLogger timings("compiler", false, false);
@@ -2893,14 +2873,14 @@
if (dex2oat->UseProfile()) {
if (!dex2oat->LoadProfile()) {
LOG(ERROR) << "Failed to process profile file";
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
}
if (dex2oat->DoDexLayoutOptimizations()) {
if (dex2oat->HasInputVdexFile()) {
LOG(ERROR) << "Dexlayout is incompatible with an input VDEX";
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
}
@@ -2908,7 +2888,7 @@
// Check early that the result of compilation can be written
if (!dex2oat->OpenFile()) {
- return EXIT_FAILURE;
+ return dex2oat::ReturnCode::kOther;
}
// Print the complete line when any of the following is true:
@@ -2923,16 +2903,17 @@
LOG(INFO) << StrippedCommandLine();
}
- if (!dex2oat->Setup()) {
+ dex2oat::ReturnCode setup_code = dex2oat->Setup();
+ if (setup_code != dex2oat::ReturnCode::kNoFailure) {
dex2oat->EraseOutputFiles();
- return EXIT_FAILURE;
+ return setup_code;
}
// Helps debugging on device. Can be used to determine which dalvikvm instance invoked a dex2oat
// instance. Used by tools/bisection_search/bisection_search.py.
VLOG(compiler) << "Running dex2oat (parent PID = " << getppid() << ")";
- bool result;
+ dex2oat::ReturnCode result;
if (dex2oat->IsImage()) {
result = CompileImage(*dex2oat);
} else {
@@ -2945,7 +2926,7 @@
} // namespace art
int main(int argc, char** argv) {
- int result = art::dex2oat(argc, argv);
+ int result = static_cast<int>(art::Dex2oat(argc, argv));
// Everything was done, do an explicit exit here to avoid running Runtime destructors that take
// time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class
// should not destruct the runtime in this case.
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 289b8ab..8c14b50 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -30,6 +30,7 @@
#include "base/macros.h"
#include "dex_file-inl.h"
#include "dex2oat_environment_test.h"
+#include "dex2oat_return_codes.h"
#include "jit/profile_compilation_info.h"
#include "oat.h"
#include "oat_file.h"
@@ -50,12 +51,12 @@
}
protected:
- void GenerateOdexForTest(const std::string& dex_location,
- const std::string& odex_location,
- CompilerFilter::Filter filter,
- const std::vector<std::string>& extra_args = {},
- bool expect_success = true,
- bool use_fd = false) {
+ int GenerateOdexForTestWithStatus(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter,
+ std::string* error_msg,
+ const std::vector<std::string>& extra_args = {},
+ bool use_fd = false) {
std::unique_ptr<File> oat_file;
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
@@ -73,12 +74,27 @@
args.insert(args.end(), extra_args.begin(), extra_args.end());
- std::string error_msg;
- bool success = Dex2Oat(args, &error_msg);
+ int status = Dex2Oat(args, error_msg);
if (oat_file != nullptr) {
- ASSERT_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file";
+ CHECK_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file";
}
+ return status;
+ }
+ void GenerateOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter,
+ const std::vector<std::string>& extra_args = {},
+ bool expect_success = true,
+ bool use_fd = false) {
+ std::string error_msg;
+ int status = GenerateOdexForTestWithStatus(dex_location,
+ odex_location,
+ filter,
+ &error_msg,
+ extra_args,
+ use_fd);
+ bool success = (status == 0);
if (expect_success) {
ASSERT_TRUE(success) << error_msg << std::endl << output_;
@@ -118,7 +134,7 @@
EXPECT_EQ(expected, actual);
}
- bool Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) {
+ int Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) {
Runtime* runtime = Runtime::Current();
const std::vector<gc::space::ImageSpace*>& image_spaces =
@@ -196,6 +212,7 @@
c_args.push_back(nullptr);
execv(c_args[0], const_cast<char* const*>(c_args.data()));
exit(1);
+ UNREACHABLE();
} else {
close(link[1]);
char buffer[128];
@@ -206,12 +223,12 @@
output_ += std::string(buffer, bytes_read);
}
close(link[0]);
- int status = 0;
+ int status = -1;
if (waitpid(pid, &status, 0) != -1) {
success_ = (status == 0);
}
+ return status;
}
- return success_;
}
std::string output_ = "";
@@ -845,4 +862,30 @@
RunTest(false, { "--watchdog-timeout=10" });
}
+class Dex2oatReturnCodeTest : public Dex2oatTest {
+ protected:
+ int RunTest(const std::vector<std::string>& extra_args = {}) {
+ std::string dex_location = GetScratchDir() + "/Dex2OatSwapTest.jar";
+ std::string odex_location = GetOdexDir() + "/Dex2OatSwapTest.odex";
+
+ Copy(GetTestDexFileName(), dex_location);
+
+ std::string error_msg;
+ return GenerateOdexForTestWithStatus(dex_location,
+ odex_location,
+ CompilerFilter::kSpeed,
+ &error_msg,
+ extra_args);
+ }
+
+ std::string GetTestDexFileName() {
+ return GetDexSrc1();
+ }
+};
+
+TEST_F(Dex2oatReturnCodeTest, TestCreateRuntime) {
+ int status = RunTest({ "--boot-image=/this/does/not/exist/yolo.oat" });
+ EXPECT_EQ(static_cast<int>(dex2oat::ReturnCode::kCreateRuntime), WEXITSTATUS(status)) << output_;
+}
+
} // namespace art
diff --git a/test/ti-agent/common_load.h b/dex2oat/include/dex2oat_return_codes.h
similarity index 63%
copy from test/ti-agent/common_load.h
copy to dex2oat/include/dex2oat_return_codes.h
index e79a006..cc5400f 100644
--- a/test/ti-agent/common_load.h
+++ b/dex2oat/include/dex2oat_return_codes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,19 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
-
-#include "jvmti.h"
+#ifndef ART_DEX2OAT_INCLUDE_DEX2OAT_RETURN_CODES_H_
+#define ART_DEX2OAT_INCLUDE_DEX2OAT_RETURN_CODES_H_
namespace art {
+namespace dex2oat {
-extern jvmtiEnv* jvmti_env;
+enum class ReturnCode : int {
+ kNoFailure = 0,
+ kOther = 1,
+ kCreateRuntime = 2,
+};
+} // namespace dex2oat
} // namespace art
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#endif // ART_DEX2OAT_INCLUDE_DEX2OAT_RETURN_CODES_H_
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index cf523ec..e26d051 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -56,3 +56,16 @@
defaults: ["art_gtest_defaults"],
srcs: ["dexlayout_test.cc"],
}
+
+art_cc_binary {
+ name: "dexdiag",
+ host_supported: false,
+ srcs: ["dexdiag.cc"],
+ cflags: ["-Wall"],
+ shared_libs: [
+ "libart",
+ "libart-dexlayout",
+ "libpagemap",
+ ],
+}
+
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 4228503..4905b5c 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -50,10 +50,11 @@
DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
LocalInfoVector& locals = debug_info->GetLocalInfo();
const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
+ const char* descriptor = entry.descriptor_ != nullptr ? entry.descriptor_ : "";
const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
locals.push_back(std::unique_ptr<LocalInfo>(
- new LocalInfo(name, entry.descriptor_, signature, entry.start_address_,
- entry.end_address_, entry.reg_)));
+ new LocalInfo(name, descriptor, signature, entry.start_address_, entry.end_address_,
+ entry.reg_)));
}
static uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item) {
@@ -763,5 +764,138 @@
return class_data;
}
+static uint32_t HeaderOffset(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
+ return 0;
+}
+
+static uint32_t HeaderSize(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
+ // Size is in elements, so there is only one header.
+ return 1;
+}
+
+// The description of each dex file section type.
+struct FileSectionDescriptor {
+ public:
+ std::string name;
+ uint16_t type;
+ // A function that when applied to a collection object, gives the size of the section.
+ std::function<uint32_t(const dex_ir::Collections&)> size_fn;
+ // A function that when applied to a collection object, gives the offset of the section.
+ std::function<uint32_t(const dex_ir::Collections&)> offset_fn;
+};
+
+static const std::vector<FileSectionDescriptor> kFileSectionDescriptors = {
+ {
+ "Header",
+ DexFile::kDexTypeHeaderItem,
+ &HeaderSize,
+ &HeaderOffset,
+ }, {
+ "StringId",
+ DexFile::kDexTypeStringIdItem,
+ &dex_ir::Collections::StringIdsSize,
+ &dex_ir::Collections::StringIdsOffset
+ }, {
+ "TypeId",
+ DexFile::kDexTypeTypeIdItem,
+ &dex_ir::Collections::TypeIdsSize,
+ &dex_ir::Collections::TypeIdsOffset
+ }, {
+ "ProtoId",
+ DexFile::kDexTypeProtoIdItem,
+ &dex_ir::Collections::ProtoIdsSize,
+ &dex_ir::Collections::ProtoIdsOffset
+ }, {
+ "FieldId",
+ DexFile::kDexTypeFieldIdItem,
+ &dex_ir::Collections::FieldIdsSize,
+ &dex_ir::Collections::FieldIdsOffset
+ }, {
+ "MethodId",
+ DexFile::kDexTypeMethodIdItem,
+ &dex_ir::Collections::MethodIdsSize,
+ &dex_ir::Collections::MethodIdsOffset
+ }, {
+ "ClassDef",
+ DexFile::kDexTypeClassDefItem,
+ &dex_ir::Collections::ClassDefsSize,
+ &dex_ir::Collections::ClassDefsOffset
+ }, {
+ "StringData",
+ DexFile::kDexTypeStringDataItem,
+ &dex_ir::Collections::StringDatasSize,
+ &dex_ir::Collections::StringDatasOffset
+ }, {
+ "TypeList",
+ DexFile::kDexTypeTypeList,
+ &dex_ir::Collections::TypeListsSize,
+ &dex_ir::Collections::TypeListsOffset
+ }, {
+ "EncArr",
+ DexFile::kDexTypeEncodedArrayItem,
+ &dex_ir::Collections::EncodedArrayItemsSize,
+ &dex_ir::Collections::EncodedArrayItemsOffset
+ }, {
+ "Annotation",
+ DexFile::kDexTypeAnnotationItem,
+ &dex_ir::Collections::AnnotationItemsSize,
+ &dex_ir::Collections::AnnotationItemsOffset
+ }, {
+ "AnnoSet",
+ DexFile::kDexTypeAnnotationSetItem,
+ &dex_ir::Collections::AnnotationSetItemsSize,
+ &dex_ir::Collections::AnnotationSetItemsOffset
+ }, {
+ "AnnoSetRL",
+ DexFile::kDexTypeAnnotationSetRefList,
+ &dex_ir::Collections::AnnotationSetRefListsSize,
+ &dex_ir::Collections::AnnotationSetRefListsOffset
+ }, {
+ "AnnoDir",
+ DexFile::kDexTypeAnnotationsDirectoryItem,
+ &dex_ir::Collections::AnnotationsDirectoryItemsSize,
+ &dex_ir::Collections::AnnotationsDirectoryItemsOffset
+ }, {
+ "DebugInfo",
+ DexFile::kDexTypeDebugInfoItem,
+ &dex_ir::Collections::DebugInfoItemsSize,
+ &dex_ir::Collections::DebugInfoItemsOffset
+ }, {
+ "CodeItem",
+ DexFile::kDexTypeCodeItem,
+ &dex_ir::Collections::CodeItemsSize,
+ &dex_ir::Collections::CodeItemsOffset
+ }, {
+ "ClassData",
+ DexFile::kDexTypeClassDataItem,
+ &dex_ir::Collections::ClassDatasSize,
+ &dex_ir::Collections::ClassDatasOffset
+ }
+};
+
+std::vector<dex_ir::DexFileSection> GetSortedDexFileSections(dex_ir::Header* header,
+ dex_ir::SortDirection direction) {
+ const dex_ir::Collections& collections = header->GetCollections();
+ std::vector<dex_ir::DexFileSection> sorted_sections;
+ // Build the table that will map from offset to color
+ for (const FileSectionDescriptor& s : kFileSectionDescriptors) {
+ sorted_sections.push_back(dex_ir::DexFileSection(s.name,
+ s.type,
+ s.size_fn(collections),
+ s.offset_fn(collections)));
+ }
+ // Sort by offset.
+ std::sort(sorted_sections.begin(),
+ sorted_sections.end(),
+ [=](dex_ir::DexFileSection& a, dex_ir::DexFileSection& b) {
+ if (direction == SortDirection::kSortDescending) {
+ return a.offset > b.offset;
+ } else {
+ return a.offset < b.offset;
+ }
+ });
+ return sorted_sections;
+}
+
} // namespace dex_ir
} // namespace art
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 78ddde8..cad0395 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -1104,6 +1104,28 @@
DISALLOW_COPY_AND_ASSIGN(MapItem);
};
+// Interface for building a vector of file sections for use by other clients.
+struct DexFileSection {
+ public:
+ DexFileSection(const std::string& name, uint16_t type, uint32_t size, uint32_t offset)
+ : name(name), type(type), size(size), offset(offset) { }
+ std::string name;
+ // The type (DexFile::MapItemType).
+ uint16_t type;
+ // The size (in elements, not bytes).
+ uint32_t size;
+ // The byte offset from the start of the file.
+ uint32_t offset;
+};
+
+enum class SortDirection {
+ kSortAscending,
+ kSortDescending
+};
+
+std::vector<DexFileSection> GetSortedDexFileSections(dex_ir::Header* header,
+ SortDirection direction);
+
} // namespace dex_ir
} // namespace art
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 8997146..829e9fe 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -35,148 +35,19 @@
namespace art {
-std::string MultidexName(const std::string& prefix,
- size_t dex_file_index,
- const std::string& suffix) {
+static std::string MultidexName(const std::string& prefix,
+ size_t dex_file_index,
+ const std::string& suffix) {
return prefix + ((dex_file_index > 0) ? std::to_string(dex_file_index + 1) : "") + suffix;
}
-struct FileSection {
- public:
- std::string name_;
- uint16_t type_;
- std::function<uint32_t(const dex_ir::Collections&)> size_fn_;
- std::function<uint32_t(const dex_ir::Collections&)> offset_fn_;
-};
-
-static uint32_t HeaderOffset(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
- return 0;
-}
-
-static uint32_t HeaderSize(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
- // Size is in elements, so there is only one header.
- return 1;
-}
-
-static const std::vector<FileSection> kFileSections = {
- {
- "Header",
- DexFile::kDexTypeHeaderItem,
- &HeaderSize,
- &HeaderOffset,
- }, {
- "StringId",
- DexFile::kDexTypeStringIdItem,
- &dex_ir::Collections::StringIdsSize,
- &dex_ir::Collections::StringIdsOffset
- }, {
- "TypeId",
- DexFile::kDexTypeTypeIdItem,
- &dex_ir::Collections::TypeIdsSize,
- &dex_ir::Collections::TypeIdsOffset
- }, {
- "ProtoId",
- DexFile::kDexTypeProtoIdItem,
- &dex_ir::Collections::ProtoIdsSize,
- &dex_ir::Collections::ProtoIdsOffset
- }, {
- "FieldId",
- DexFile::kDexTypeFieldIdItem,
- &dex_ir::Collections::FieldIdsSize,
- &dex_ir::Collections::FieldIdsOffset
- }, {
- "MethodId",
- DexFile::kDexTypeMethodIdItem,
- &dex_ir::Collections::MethodIdsSize,
- &dex_ir::Collections::MethodIdsOffset
- }, {
- "ClassDef",
- DexFile::kDexTypeClassDefItem,
- &dex_ir::Collections::ClassDefsSize,
- &dex_ir::Collections::ClassDefsOffset
- }, {
- "StringData",
- DexFile::kDexTypeStringDataItem,
- &dex_ir::Collections::StringDatasSize,
- &dex_ir::Collections::StringDatasOffset
- }, {
- "TypeList",
- DexFile::kDexTypeTypeList,
- &dex_ir::Collections::TypeListsSize,
- &dex_ir::Collections::TypeListsOffset
- }, {
- "EncArr",
- DexFile::kDexTypeEncodedArrayItem,
- &dex_ir::Collections::EncodedArrayItemsSize,
- &dex_ir::Collections::EncodedArrayItemsOffset
- }, {
- "Annotation",
- DexFile::kDexTypeAnnotationItem,
- &dex_ir::Collections::AnnotationItemsSize,
- &dex_ir::Collections::AnnotationItemsOffset
- }, {
- "AnnoSet",
- DexFile::kDexTypeAnnotationSetItem,
- &dex_ir::Collections::AnnotationSetItemsSize,
- &dex_ir::Collections::AnnotationSetItemsOffset
- }, {
- "AnnoSetRL",
- DexFile::kDexTypeAnnotationSetRefList,
- &dex_ir::Collections::AnnotationSetRefListsSize,
- &dex_ir::Collections::AnnotationSetRefListsOffset
- }, {
- "AnnoDir",
- DexFile::kDexTypeAnnotationsDirectoryItem,
- &dex_ir::Collections::AnnotationsDirectoryItemsSize,
- &dex_ir::Collections::AnnotationsDirectoryItemsOffset
- }, {
- "DebugInfo",
- DexFile::kDexTypeDebugInfoItem,
- &dex_ir::Collections::DebugInfoItemsSize,
- &dex_ir::Collections::DebugInfoItemsOffset
- }, {
- "CodeItem",
- DexFile::kDexTypeCodeItem,
- &dex_ir::Collections::CodeItemsSize,
- &dex_ir::Collections::CodeItemsOffset
- }, {
- "ClassData",
- DexFile::kDexTypeClassDataItem,
- &dex_ir::Collections::ClassDatasSize,
- &dex_ir::Collections::ClassDatasOffset
- }
-};
-
-static constexpr bool kSortAscending = false;
-static constexpr bool kSortDescending = true;
-
-static std::vector<const FileSection*> GetSortedSections(
- const dex_ir::Collections& collections,
- bool sort_descending) {
- std::vector<const FileSection*> sorted_sections;
- // Build the table that will map from offset to color
- for (const FileSection& s : kFileSections) {
- sorted_sections.push_back(&s);
- }
- // Sort by offset.
- std::sort(sorted_sections.begin(),
- sorted_sections.end(),
- [&](const FileSection* a, const FileSection* b) {
- if (sort_descending) {
- return a->offset_fn_(collections) > b->offset_fn_(collections);
- } else {
- return a->offset_fn_(collections) < b->offset_fn_(collections);
- }
- });
- return sorted_sections;
-}
-
class Dumper {
public:
// Colors are based on the type of the section in MapList.
- explicit Dumper(const dex_ir::Collections& collections)
- : collections_(collections), out_file_(nullptr),
- sorted_sections_(GetSortedSections(collections, kSortDescending)) { }
+ explicit Dumper(dex_ir::Header* header)
+ : out_file_(nullptr),
+ sorted_sections_(
+ dex_ir::GetSortedDexFileSections(header, dex_ir::SortDirection::kSortDescending)) { }
bool OpenAndPrintHeader(size_t dex_index) {
// Open the file and emit the gnuplot prologue.
@@ -191,12 +62,13 @@
fprintf(out_file_, "set ylabel \"ClassDef index\"\n");
fprintf(out_file_, "set xtics rotate out (");
bool printed_one = false;
- for (const FileSection& s : kFileSections) {
- if (s.size_fn_(collections_) > 0) {
+
+ for (const dex_ir::DexFileSection& s : sorted_sections_) {
+ if (s.size > 0) {
if (printed_one) {
fprintf(out_file_, ", ");
}
- fprintf(out_file_, "\"%s\" %d", s.name_.c_str(), s.offset_fn_(collections_) / kPageSize);
+ fprintf(out_file_, "\"%s\" %d", s.name.c_str(), s.offset / kPageSize);
printed_one = true;
}
}
@@ -209,9 +81,9 @@
int GetColor(uint32_t offset) const {
// The dread linear search to find the right section for the reference.
uint16_t section = 0;
- for (const FileSection* file_section : sorted_sections_) {
- if (file_section->offset_fn_(collections_) < offset) {
- section = file_section->type_;
+ for (const dex_ir::DexFileSection& file_section : sorted_sections_) {
+ if (file_section.offset < offset) {
+ section = file_section.type;
break;
}
}
@@ -362,9 +234,8 @@
{ DexFile::kDexTypeAnnotationsDirectoryItem, 16 }
};
- const dex_ir::Collections& collections_;
FILE* out_file_;
- std::vector<const FileSection*> sorted_sections_;
+ std::vector<dex_ir::DexFileSection> sorted_sections_;
DISALLOW_COPY_AND_ASSIGN(Dumper);
};
@@ -377,7 +248,7 @@
const DexFile* dex_file,
size_t dex_file_index,
ProfileCompilationInfo* profile_info) {
- std::unique_ptr<Dumper> dumper(new Dumper(header->GetCollections()));
+ std::unique_ptr<Dumper> dumper(new Dumper(header));
if (!dumper->OpenAndPrintHeader(dex_file_index)) {
fprintf(stderr, "Could not open output file.\n");
return;
@@ -432,20 +303,43 @@
} // for
}
+static uint32_t FindNextByteAfterSection(dex_ir::Header* header,
+ const std::vector<dex_ir::DexFileSection>& sorted_sections,
+ size_t section_index) {
+ for (size_t i = section_index + 1; i < sorted_sections.size(); ++i) {
+ const dex_ir::DexFileSection& section = sorted_sections.at(i);
+ if (section.size != 0) {
+ return section.offset;
+ }
+ }
+ return header->FileSize();
+}
+
/*
* Dumps the offset and size of sections within the file.
*/
void ShowDexSectionStatistics(dex_ir::Header* header, size_t dex_file_index) {
// Compute the (multidex) class file name).
- fprintf(stdout, "%s\n", MultidexName("classes", dex_file_index, ".dex").c_str());
- fprintf(stdout, "section offset items\n");
- const dex_ir::Collections& collections = header->GetCollections();
- std::vector<const FileSection*> sorted_sections(GetSortedSections(collections, kSortAscending));
- for (const FileSection* file_section : sorted_sections) {
- fprintf(stdout, "%-10s 0x%08x 0x%08x\n",
- file_section->name_.c_str(),
- file_section->offset_fn_(collections),
- file_section->size_fn_(collections));
+ fprintf(stdout, "%s (%d bytes)\n",
+ MultidexName("classes", dex_file_index, ".dex").c_str(),
+ header->FileSize());
+ fprintf(stdout, "section offset items bytes pages pct\n");
+ std::vector<dex_ir::DexFileSection> sorted_sections =
+ GetSortedDexFileSections(header, dex_ir::SortDirection::kSortAscending);
+ for (size_t i = 0; i < sorted_sections.size(); ++i) {
+ const dex_ir::DexFileSection& file_section = sorted_sections[i];
+ uint32_t bytes = 0;
+ if (file_section.size > 0) {
+ bytes = FindNextByteAfterSection(header, sorted_sections, i) - file_section.offset;
+ }
+ fprintf(stdout,
+ "%-10s %8d %8d %8d %8d %%%02d\n",
+ file_section.name.c_str(),
+ file_section.offset,
+ file_section.size,
+ bytes,
+ RoundUp(bytes, kPageSize) / kPageSize,
+ 100 * bytes / header->FileSize());
}
fprintf(stdout, "\n");
}
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
new file mode 100644
index 0000000..3edf051
--- /dev/null
+++ b/dexlayout/dexdiag.cc
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <iostream>
+#include <memory>
+
+#include "android-base/stringprintf.h"
+
+#include "dex_file.h"
+#include "dex_ir.h"
+#include "dex_ir_builder.h"
+#include "pagemap/pagemap.h"
+#include "runtime.h"
+#include "vdex_file.h"
+
+namespace art {
+
+using android::base::StringPrintf;
+
+static constexpr size_t kLineLength = 32;
+
+static bool g_show_key = false;
+static bool g_verbose = false;
+static bool g_show_statistics = false;
+
+struct DexSectionInfo {
+ public:
+ std::string name;
+ char letter;
+};
+
+static const std::map<uint16_t, DexSectionInfo> kDexSectionInfoMap = {
+ { DexFile::kDexTypeHeaderItem, { "Header", 'H' } },
+ { DexFile::kDexTypeStringIdItem, { "StringId", 'S' } },
+ { DexFile::kDexTypeTypeIdItem, { "TypeId", 'T' } },
+ { DexFile::kDexTypeProtoIdItem, { "ProtoId", 'P' } },
+ { DexFile::kDexTypeFieldIdItem, { "FieldId", 'F' } },
+ { DexFile::kDexTypeMethodIdItem, { "MethodId", 'M' } },
+ { DexFile::kDexTypeClassDefItem, { "ClassDef", 'C' } },
+ { DexFile::kDexTypeCallSiteIdItem, { "CallSiteId", 'z' } },
+ { DexFile::kDexTypeMethodHandleItem, { "MethodHandle", 'Z' } },
+ { DexFile::kDexTypeMapList, { "TypeMap", 'L' } },
+ { DexFile::kDexTypeTypeList, { "TypeList", 't' } },
+ { DexFile::kDexTypeAnnotationSetRefList, { "AnnotationSetReferenceItem", '1' } },
+ { DexFile::kDexTypeAnnotationSetItem, { "AnnotationSetItem", '2' } },
+ { DexFile::kDexTypeClassDataItem, { "ClassData", 'c' } },
+ { DexFile::kDexTypeCodeItem, { "CodeItem", 'X' } },
+ { DexFile::kDexTypeStringDataItem, { "StringData", 's' } },
+ { DexFile::kDexTypeDebugInfoItem, { "DebugInfo", 'D' } },
+ { DexFile::kDexTypeAnnotationItem, { "AnnotationItem", '3' } },
+ { DexFile::kDexTypeEncodedArrayItem, { "EncodedArrayItem", 'E' } },
+ { DexFile::kDexTypeAnnotationsDirectoryItem, { "AnnotationsDirectoryItem", '4' } }
+};
+
+class PageCount {
+ public:
+ PageCount() {
+ for (auto it = kDexSectionInfoMap.begin(); it != kDexSectionInfoMap.end(); ++it) {
+ map_[it->first] = 0;
+ }
+ }
+ void Increment(uint16_t type) {
+ map_[type]++;
+ }
+ size_t Get(uint16_t type) const {
+ return map_.at(type);
+ }
+ private:
+ std::map<uint16_t, size_t> map_;
+ DISALLOW_COPY_AND_ASSIGN(PageCount);
+};
+
+static void PrintLetterKey() {
+ std::cout << "letter section_type" << std::endl;
+ for (const auto& p : kDexSectionInfoMap) {
+ const DexSectionInfo& section_info = p.second;
+ std::cout << section_info.letter << " " << section_info.name.c_str() << std::endl;
+ }
+}
+
+static char PageTypeChar(uint16_t type) {
+ if (kDexSectionInfoMap.find(type) == kDexSectionInfoMap.end()) {
+ return '-';
+ }
+ return kDexSectionInfoMap.find(type)->second.letter;
+}
+
+static uint16_t FindSectionTypeForPage(size_t page,
+ const std::vector<dex_ir::DexFileSection>& sections) {
+ for (const auto& section : sections) {
+ size_t first_page_of_section = section.offset / kPageSize;
+ // Only consider non-empty sections.
+ if (section.size == 0) {
+ continue;
+ }
+ // Attribute the page to the highest-offset section that starts before the page.
+ if (first_page_of_section <= page) {
+ return section.type;
+ }
+ }
+ // If there's no non-zero sized section with an offset below offset we're looking for, it
+ // must be the header.
+ return DexFile::kDexTypeHeaderItem;
+}
+
+static void ProcessPageMap(uint64_t* pagemap,
+ size_t start,
+ size_t end,
+ const std::vector<dex_ir::DexFileSection>& sections,
+ PageCount* page_counts) {
+ for (size_t page = start; page < end; ++page) {
+ char type_char = '.';
+ if (PM_PAGEMAP_PRESENT(pagemap[page])) {
+ uint16_t type = FindSectionTypeForPage(page, sections);
+ page_counts->Increment(type);
+ type_char = PageTypeChar(type);
+ }
+ if (g_verbose) {
+ std::cout << type_char;
+ if ((page - start) % kLineLength == kLineLength - 1) {
+ std::cout << std::endl;
+ }
+ }
+ }
+ if (g_verbose) {
+ if ((end - start) % kLineLength != 0) {
+ std::cout << std::endl;
+ }
+ }
+}
+
+static void DisplayDexStatistics(size_t start,
+ size_t end,
+ const PageCount& resident_pages,
+ const std::vector<dex_ir::DexFileSection>& sections) {
+ // Compute the total possible sizes for sections.
+ PageCount mapped_pages;
+ DCHECK_GE(end, start);
+ size_t total_mapped_pages = end - start;
+ if (total_mapped_pages == 0) {
+ return;
+ }
+ for (size_t page = start; page < end; ++page) {
+ mapped_pages.Increment(FindSectionTypeForPage(page, sections));
+ }
+ size_t total_resident_pages = 0;
+ // Compute the width of the section header column in the table (for fixed formatting).
+ int section_header_width = 0;
+ for (const auto& section_info : kDexSectionInfoMap) {
+ section_header_width = std::max(section_header_width,
+ static_cast<int>(section_info.second.name.length()));
+ }
+ // The width needed to print a file page offset (32-bit).
+ static constexpr int kPageCountWidth =
+ static_cast<int>(std::numeric_limits<uint32_t>::digits10);
+ // Display the sections.
+ static constexpr char kSectionHeader[] = "Section name";
+ std::cout << StringPrintf("%-*s %*s %*s %% of %% of",
+ section_header_width,
+ kSectionHeader,
+ kPageCountWidth,
+ "resident",
+ kPageCountWidth,
+ "total"
+ )
+ << std::endl;
+ std::cout << StringPrintf("%-*s %*s %*s sect. total",
+ section_header_width,
+ "",
+ kPageCountWidth,
+ "pages",
+ kPageCountWidth,
+ "pages")
+ << std::endl;
+ for (size_t i = sections.size(); i > 0; --i) {
+ const dex_ir::DexFileSection& section = sections[i - 1];
+ const uint16_t type = section.type;
+ const DexSectionInfo& section_info = kDexSectionInfoMap.find(type)->second;
+ size_t pages_resident = resident_pages.Get(type);
+ double percent_resident = 0;
+ if (mapped_pages.Get(type) > 0) {
+ percent_resident = 100.0 * pages_resident / mapped_pages.Get(type);
+ }
+ // 6.2 is sufficient to print 0-100% with two decimal places of accuracy.
+ std::cout << StringPrintf("%-*s %*zd %*zd %6.2f %6.2f",
+ section_header_width,
+ section_info.name.c_str(),
+ kPageCountWidth,
+ pages_resident,
+ kPageCountWidth,
+ mapped_pages.Get(type),
+ percent_resident,
+ 100.0 * pages_resident / total_mapped_pages)
+ << std::endl;
+ total_resident_pages += pages_resident;
+ }
+ std::cout << StringPrintf("%-*s %*zd %*zd %6.2f",
+ section_header_width,
+ "GRAND TOTAL",
+ kPageCountWidth,
+ total_resident_pages,
+ kPageCountWidth,
+ total_mapped_pages,
+ 100.0 * total_resident_pages / total_mapped_pages)
+ << std::endl
+ << std::endl;
+}
+
+static void ProcessOneDexMapping(uint64_t* pagemap,
+ uint64_t map_start,
+ const DexFile* dex_file,
+ uint64_t vdex_start) {
+ uint64_t dex_file_start = reinterpret_cast<uint64_t>(dex_file->Begin());
+ size_t dex_file_size = dex_file->Size();
+ if (dex_file_start < vdex_start) {
+ std::cerr << "Dex file start offset for "
+ << dex_file->GetLocation().c_str()
+ << " is incorrect: map start "
+ << StringPrintf("%zx > dex start %zx\n", map_start, dex_file_start)
+ << std::endl;
+ return;
+ }
+ uint64_t start_page = (dex_file_start - vdex_start) / kPageSize;
+ uint64_t start_address = start_page * kPageSize;
+ uint64_t end_page = RoundUp(start_address + dex_file_size, kPageSize) / kPageSize;
+ std::cout << "DEX "
+ << dex_file->GetLocation().c_str()
+ << StringPrintf(": %zx-%zx",
+ map_start + start_page * kPageSize,
+ map_start + end_page * kPageSize)
+ << std::endl;
+ // Build a list of the dex file section types, sorted from highest offset to lowest.
+ std::vector<dex_ir::DexFileSection> sections;
+ {
+ std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file));
+ sections = dex_ir::GetSortedDexFileSections(header.get(),
+ dex_ir::SortDirection::kSortDescending);
+ }
+ PageCount section_resident_pages;
+ ProcessPageMap(pagemap, start_page, end_page, sections, §ion_resident_pages);
+ if (g_show_statistics) {
+ DisplayDexStatistics(start_page, end_page, section_resident_pages, sections);
+ }
+}
+
+static bool DisplayMappingIfFromVdexFile(pm_map_t* map) {
+ // Confirm that the map is from a vdex file.
+ static const char* suffixes[] = { ".vdex" };
+ std::string vdex_name;
+ bool found = false;
+ for (size_t j = 0; j < sizeof(suffixes) / sizeof(suffixes[0]); ++j) {
+ if (strstr(pm_map_name(map), suffixes[j]) != nullptr) {
+ vdex_name = pm_map_name(map);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return true;
+ }
+ // Extract all the dex files from the vdex file.
+ std::string error_msg;
+ std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_name,
+ false /*writeable*/,
+ false /*low_4gb*/,
+ &error_msg /*out*/));
+ if (vdex == nullptr) {
+ std::cerr << "Could not open vdex file "
+ << vdex_name.c_str()
+ << ": error "
+ << error_msg.c_str()
+ << std::endl;
+ return false;
+ }
+
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ if (!vdex->OpenAllDexFiles(&dex_files, &error_msg)) {
+ std::cerr << "Dex files could not be opened for "
+ << vdex_name.c_str()
+ << ": error "
+ << error_msg.c_str()
+ << std::endl;
+ }
+ // Open the page mapping (one uint64_t per page) for the entire vdex mapping.
+ uint64_t* pagemap;
+ size_t len;
+ if (pm_map_pagemap(map, &pagemap, &len) != 0) {
+ std::cerr << "Error creating pagemap." << std::endl;
+ return false;
+ }
+ // Process the dex files.
+ std::cout << "MAPPING "
+ << pm_map_name(map)
+ << StringPrintf(": %zx-%zx", pm_map_start(map), pm_map_end(map))
+ << std::endl;
+ for (const auto& dex_file : dex_files) {
+ ProcessOneDexMapping(pagemap,
+ pm_map_start(map),
+ dex_file.get(),
+ reinterpret_cast<uint64_t>(vdex->Begin()));
+ }
+ free(pagemap);
+ return true;
+}
+
+
+static void Usage(const char* cmd) {
+ std::cerr << "Usage: " << cmd << " [-k] [-s] [-v] pid" << std::endl
+ << " -k Shows a key to verbose display characters." << std::endl
+ << " -s Shows section statistics for individual dex files." << std::endl
+ << " -v Verbosely displays resident pages for dex files." << std::endl;
+}
+
+static int DexDiagMain(int argc, char* argv[]) {
+ if (argc < 2) {
+ Usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ // TODO: add option to track usage by class name, etc.
+ for (int i = 1; i < argc - 1; ++i) {
+ if (strcmp(argv[i], "-k") == 0) {
+ g_show_key = true;
+ } else if (strcmp(argv[i], "-s") == 0) {
+ g_show_statistics = true;
+ } else if (strcmp(argv[i], "-v") == 0) {
+ g_verbose = true;
+ } else {
+ Usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+ }
+
+ // Art specific set up.
+ InitLogging(argv, Runtime::Aborter);
+ MemMap::Init();
+
+ pid_t pid;
+ char* endptr;
+ pid = (pid_t)strtol(argv[argc - 1], &endptr, 10);
+ if (*endptr != '\0' || kill(pid, 0) != 0) {
+ std::cerr << StringPrintf("Invalid PID \"%s\".\n", argv[argc - 1]) << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // get libpagemap kernel information.
+ pm_kernel_t* ker;
+ if (pm_kernel_create(&ker) != 0) {
+ std::cerr << "Error creating kernel interface -- does this kernel have pagemap?" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // get libpagemap process information.
+ pm_process_t* proc;
+ if (pm_process_create(ker, pid, &proc) != 0) {
+ std::cerr << "Error creating process interface -- does process "
+ << pid
+ << " really exist?"
+ << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Get the set of mappings by the specified process.
+ pm_map_t** maps;
+ size_t num_maps;
+ if (pm_process_maps(proc, &maps, &num_maps) != 0) {
+ std::cerr << "Error listing maps." << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Process the mappings that are due to DEX files.
+ for (size_t i = 0; i < num_maps; ++i) {
+ if (!DisplayMappingIfFromVdexFile(maps[i])) {
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (g_show_key) {
+ PrintLetterKey();
+ }
+ return 0;
+}
+
+} // namespace art
+
+int main(int argc, char* argv[]) {
+ return art::DexDiagMain(argc, argv);
+}
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 615bcf9..0536f322 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1528,6 +1528,111 @@
return new_class_data_order;
}
+void DexLayout::LayoutStringData(const DexFile* dex_file) {
+ const size_t num_strings = header_->GetCollections().StringIds().size();
+ std::vector<bool> is_shorty(num_strings, false);
+ std::vector<bool> from_hot_method(num_strings, false);
+ for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ // A name of a profile class is probably going to get looked up by ClassTable::Lookup, mark it
+ // as hot.
+ const bool is_profile_class =
+ info_->ContainsClass(*dex_file, dex::TypeIndex(class_def->ClassType()->GetIndex()));
+ if (is_profile_class) {
+ from_hot_method[class_def->ClassType()->GetStringId()->GetIndex()] = true;
+ }
+ dex_ir::ClassData* data = class_def->GetClassData();
+ if (data == nullptr) {
+ continue;
+ }
+ for (size_t i = 0; i < 2; ++i) {
+ for (auto& method : *(i == 0 ? data->DirectMethods() : data->VirtualMethods())) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item == nullptr) {
+ continue;
+ }
+ const bool is_clinit = is_profile_class &&
+ (method->GetAccessFlags() & kAccConstructor) != 0 &&
+ (method->GetAccessFlags() & kAccStatic) != 0;
+ const bool method_executed = is_clinit ||
+ info_->ContainsMethod(MethodReference(dex_file, method_id->GetIndex()));
+ if (!method_executed) {
+ continue;
+ }
+ is_shorty[method_id->Proto()->Shorty()->GetIndex()] = true;
+ dex_ir::CodeFixups* fixups = code_item->GetCodeFixups();
+ if (fixups == nullptr) {
+ continue;
+ }
+ if (fixups->StringIds() != nullptr) {
+ // Add const-strings.
+ for (dex_ir::StringId* id : *fixups->StringIds()) {
+ from_hot_method[id->GetIndex()] = true;
+ }
+ }
+ // TODO: Only visit field ids from static getters and setters.
+ for (dex_ir::FieldId* id : *fixups->FieldIds()) {
+ // Add the field names and types from getters and setters.
+ from_hot_method[id->Name()->GetIndex()] = true;
+ from_hot_method[id->Type()->GetStringId()->GetIndex()] = true;
+ }
+ }
+ }
+ }
+ // Sort string data by specified order.
+ std::vector<dex_ir::StringId*> string_ids;
+ size_t min_offset = std::numeric_limits<size_t>::max();
+ size_t max_offset = 0;
+ size_t hot_bytes = 0;
+ for (auto& string_id : header_->GetCollections().StringIds()) {
+ string_ids.push_back(string_id.get());
+ const size_t cur_offset = string_id->DataItem()->GetOffset();
+ CHECK_NE(cur_offset, 0u);
+ min_offset = std::min(min_offset, cur_offset);
+ dex_ir::StringData* data = string_id->DataItem();
+ const size_t element_size = data->GetSize() + 1; // Add one extra for null.
+ size_t end_offset = cur_offset + element_size;
+ if (is_shorty[string_id->GetIndex()] || from_hot_method[string_id->GetIndex()]) {
+ hot_bytes += element_size;
+ }
+ max_offset = std::max(max_offset, end_offset);
+ }
+ VLOG(compiler) << "Hot string data bytes " << hot_bytes << "/" << max_offset - min_offset;
+ std::sort(string_ids.begin(),
+ string_ids.end(),
+ [&is_shorty, &from_hot_method](const dex_ir::StringId* a,
+ const dex_ir::StringId* b) {
+ const bool a_is_hot = from_hot_method[a->GetIndex()];
+ const bool b_is_hot = from_hot_method[b->GetIndex()];
+ if (a_is_hot != b_is_hot) {
+ return a_is_hot < b_is_hot;
+ }
+ // After hot methods are partitioned, subpartition shorties.
+ const bool a_is_shorty = is_shorty[a->GetIndex()];
+ const bool b_is_shorty = is_shorty[b->GetIndex()];
+ if (a_is_shorty != b_is_shorty) {
+ return a_is_shorty < b_is_shorty;
+ }
+ // Preserve order.
+ return a->DataItem()->GetOffset() < b->DataItem()->GetOffset();
+ });
+ // Now we know what order we want the string data, reorder the offsets.
+ size_t offset = min_offset;
+ for (dex_ir::StringId* string_id : string_ids) {
+ dex_ir::StringData* data = string_id->DataItem();
+ data->SetOffset(offset);
+ offset += data->GetSize() + 1; // Add one extra for null.
+ }
+ if (offset > max_offset) {
+ const uint32_t diff = offset - max_offset;
+ // If we expanded the string data section, we need to update the offsets or else we will
+ // corrupt the next section when writing out.
+ FixupSections(header_->GetCollections().StringDatasOffset(), diff);
+ // Update file size.
+ header_->SetFileSize(header_->FileSize() + diff);
+ }
+}
+
// Orders code items according to specified class data ordering.
// NOTE: If the section following the code items is byte aligned, the last code item is left in
// place to preserve alignment. Layout needs an overhaul to handle movement of other sections.
@@ -1686,6 +1791,7 @@
}
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
+ LayoutStringData(dex_file);
std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
int32_t diff = LayoutCodeItems(new_class_data_order);
// Move sections after ClassData by diff bytes.
@@ -1700,7 +1806,7 @@
std::unique_ptr<File> new_file;
if (!options_.output_to_memmap_) {
std::string output_location(options_.output_dex_directory_);
- size_t last_slash = dex_file_location.rfind("/");
+ size_t last_slash = dex_file_location.rfind('/');
std::string dex_file_directory = dex_file_location.substr(0, last_slash + 1);
if (output_location == dex_file_directory) {
output_location = dex_file_location + ".new";
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index f26b423..69117ad 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -109,6 +109,7 @@
std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
int32_t LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order);
+ void LayoutStringData(const DexFile* dex_file);
bool IsNextSectionCodeItemAligned(uint32_t offset);
template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
void FixupSections(uint32_t offset, uint32_t diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index bd6548e..e988aac 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -41,19 +41,7 @@
"AAAAdQEAAAAQAAABAAAAjAEAAA==";
static const char kDexFileLayoutInputProfile[] =
- "cHJvADAwNAABCwABAAAAAAD1KW3+Y2xhc3Nlcy5kZXgBAA==";
-
-static const char kDexFileLayoutExpectedOutputDex[] =
- "ZGV4CjAzNQD1KW3+B8NAB0f2A/ZVIBJ0aHrGIqcpVTAUAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAH"
- "AAAAcAAAAAQAAACMAAAAAQAAAJwAAAAAAAAAAAAAAAMAAACoAAAAAgAAAMAAAAAUAQAAAAEAADAB"
- "AAA4AQAAQAEAAEgBAABNAQAAUgEAAGYBAAADAAAABAAAAAUAAAAGAAAABgAAAAMAAAAAAAAAAAAA"
- "AAAAAAABAAAAAAAAAAIAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAIAAAAAAAAAdQEAAAAAAAAAAAAA"
- "AAAAAAIAAAAAAAAAAQAAAAAAAAB/AQAAAAAAAAEAAQABAAAAbwEAAAQAAABwEAIAAAAOAAEAAQAB"
- "AAAAaQEAAAQAAABwEAIAAAAOAAY8aW5pdD4ABkEuamF2YQAGQi5qYXZhAANMQTsAA0xCOwASTGph"
- "dmEvbGFuZy9PYmplY3Q7AAFWAAQABw48AAQABw48AAAAAQABgIAEgAIAAAEAAICABJgCAAAACwAA"
- "AAAAAAABAAAAAAAAAAEAAAAHAAAAcAAAAAIAAAAEAAAAjAAAAAMAAAABAAAAnAAAAAUAAAADAAAA"
- "qAAAAAYAAAACAAAAwAAAAAEgAAACAAAAAAEAAAIgAAAHAAAAMAEAAAMgAAACAAAAaQEAAAAgAAAC"
- "AAAAdQEAAAAQAAABAAAAjAEAAA==";
+ "cHJvADAwNQABCwABAAAAAAD1KW3+Y2xhc3Nlcy5kZXgBAA==";
// Dex file with catch handler unreferenced by try blocks.
// Constructed by building a dex file with try/catch blocks and hex editing.
@@ -203,6 +191,20 @@
"QS5qYXZhAAFJAANMQTsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgABYQABYgABYwABAAcOAAMABw4A"
"BgAHDgAJAAcOAA==";
+// Dex file with local info containing a null type descriptor.
+// Constructed a dex file with debug info sequence containing DBG_RESTART_LOCAL without any
+// DBG_START_LOCAL to give it a declared type.
+static const char kUnknownTypeDebugInfoInputDex[] =
+ "ZGV4CjAzNQBtKqZfzjHLNSNwW2A6Bz9FuCEX0sL+FF38AQAAcAAAAHhWNBIAAAAAAAAAAHQBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAMAQAA8AAAABwB"
+ "AAAkAQAALAEAAC8BAAA0AQAASAEAAEsBAABOAQAAAgAAAAMAAAAEAAAABQAAAAIAAAAAAAAAAAAA"
+ "AAUAAAADAAAAAAAAAAEAAQAAAAAAAQAAAAYAAAACAAEAAAAAAAEAAAABAAAAAgAAAAAAAAABAAAA"
+ "AAAAAGMBAAAAAAAAAQABAAEAAABUAQAABAAAAHAQAgAAAA4AAgABAAAAAABZAQAAAgAAABIQDwAG"
+ "PGluaXQ+AAZBLmphdmEAAUkAA0xBOwASTGphdmEvbGFuZy9PYmplY3Q7AAFWAAFhAAR0aGlzAAEA"
+ "Bw4AAwAHDh4GAAYAAAAAAQEAgYAE8AEBAYgCAAAACwAAAAAAAAABAAAAAAAAAAEAAAAIAAAAcAAA"
+ "AAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAAuAAAAAYAAAABAAAA0AAAAAEgAAACAAAA"
+ "8AAAAAIgAAAIAAAAHAEAAAMgAAACAAAAVAEAAAAgAAABAAAAYwEAAAAQAAABAAAAdAEAAA==";
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -302,7 +304,7 @@
return true;
}
- // Runs DexFileOutput test.
+ // Runs DexFileLayout test.
bool DexFileLayoutExec(std::string* error_msg) {
ScratchFile tmp_file;
std::string tmp_name = tmp_file.GetFilename();
@@ -314,26 +316,21 @@
WriteFileBase64(kDexFileLayoutInputDex, dex_file.c_str());
std::string profile_file = tmp_dir + "primary.prof";
WriteFileBase64(kDexFileLayoutInputProfile, profile_file.c_str());
- std::string expected_output = tmp_dir + "expected.dex";
- WriteFileBase64(kDexFileLayoutExpectedOutputDex, expected_output.c_str());
std::string output_dex = tmp_dir + "classes.dex.new";
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
+ { dexlayout, "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
if (!::art::Exec(dexlayout_exec_argv, error_msg)) {
return false;
}
- std::vector<std::string> diff_exec_argv =
- { "/usr/bin/diff", expected_output, output_dex };
- if (!::art::Exec(diff_exec_argv, error_msg)) {
- return false;
- }
+
+ // -v makes sure that the layout did not corrupt the dex file.
std::vector<std::string> rm_exec_argv =
- { "/bin/rm", dex_file, profile_file, expected_output, output_dex };
+ { "/bin/rm", dex_file, profile_file, output_dex };
if (!::art::Exec(rm_exec_argv, error_msg)) {
return false;
}
@@ -373,6 +370,26 @@
}
return true;
}
+
+ bool DexLayoutExec(ScratchFile* dex_file,
+ const char* dex_filename,
+ ScratchFile* profile_file,
+ const char* profile_filename,
+ std::vector<std::string>& dexlayout_exec_argv) {
+ WriteBase64ToFile(dex_filename, dex_file->GetFile());
+ EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
+ if (profile_file != nullptr) {
+ WriteBase64ToFile(profile_filename, profile_file->GetFile());
+ EXPECT_EQ(profile_file->GetFile()->Flush(), 0);
+ }
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ if (!result) {
+ LOG(ERROR) << "Error: " << error_msg;
+ return false;
+ }
+ return true;
+ }
};
@@ -422,94 +439,84 @@
}
TEST_F(DexLayoutTest, DuplicateOffset) {
- ScratchFile temp;
- WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
- std::vector<std::string> dexlayout_exec_argv = {
- dexlayout,
- "-a",
- "-i",
- "-o",
- "/dev/null",
- temp.GetFilename()};
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-a", "-i", "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kDexFileDuplicateOffset,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, NullSetRefListElement) {
- ScratchFile temp;
- WriteBase64ToFile(kNullSetRefListElementInputDex, temp.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kNullSetRefListElementInputDex,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, MultiClassData) {
- ScratchFile temp;
- WriteBase64ToFile(kMultiClassDataInputDex, temp.GetFile());
- ScratchFile temp2;
- WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
+ ScratchFile temp_profile;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-p", temp_profile.GetFilename(), "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kMultiClassDataInputDex,
+ &temp_profile,
+ kDexFileLayoutInputProfile,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, UnalignedCodeInfo) {
- ScratchFile temp;
- WriteBase64ToFile(kUnalignedCodeInfoInputDex, temp.GetFile());
- ScratchFile temp2;
- WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
+ ScratchFile temp_profile;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-p", temp_profile.GetFilename(), "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kUnalignedCodeInfoInputDex,
+ &temp_profile,
+ kDexFileLayoutInputProfile,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, ClassDataBeforeCode) {
- ScratchFile temp;
- WriteBase64ToFile(kClassDataBeforeCodeInputDex, temp.GetFile());
- ScratchFile temp2;
- WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
+ ScratchFile temp_profile;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-p", temp_profile.GetFilename(), "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kClassDataBeforeCodeInputDex,
+ &temp_profile,
+ kDexFileLayoutInputProfile,
+ dexlayout_exec_argv));
+}
+
+TEST_F(DexLayoutTest, UnknownTypeDebugInfo) {
+ ScratchFile temp_dex;
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kUnknownTypeDebugInfoInputDex,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
}
} // namespace art
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 1f6b874..eb57d33 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -43,6 +43,7 @@
static const uint32_t kOpcodeShift = 26;
static const uint32_t kCop1 = (17 << kOpcodeShift);
+static const uint32_t kMsa = (30 << kOpcodeShift); // MSA major opcode.
static const uint32_t kITypeMask = (0x3f << kOpcodeShift);
static const uint32_t kJTypeMask = (0x3f << kOpcodeShift);
@@ -51,6 +52,8 @@
static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift);
static const uint32_t kSpecial3Mask = (0x3f << kOpcodeShift);
static const uint32_t kFpMask = kRTypeMask;
+static const uint32_t kMsaMask = kRTypeMask;
+static const uint32_t kMsaSpecialMask = (0x3f << kOpcodeShift);
static const MipsInstruction gMipsInstructions[] = {
// "sll r0, r0, 0" is the canonical "nop", used in delay slots.
@@ -417,6 +420,37 @@
{ kFpMask, kCop1 | 0x10, "sel", "fadt" },
{ kFpMask, kCop1 | 0x1e, "max", "fadt" },
{ kFpMask, kCop1 | 0x1c, "min", "fadt" },
+
+ // MSA instructions.
+ { kMsaMask | (0x1f << 21), kMsa | (0x0 << 21) | 0x1e, "and.v", "kmn" },
+ { kMsaMask | (0x1f << 21), kMsa | (0x1 << 21) | 0x1e, "or.v", "kmn" },
+ { kMsaMask | (0x1f << 21), kMsa | (0x2 << 21) | 0x1e, "nor.v", "kmn" },
+ { kMsaMask | (0x1f << 21), kMsa | (0x3 << 21) | 0x1e, "xor.v", "kmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xe, "addv", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0xe, "subv", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x12, "mulv", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x12, "div_s", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x12, "div_u", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x12, "mod_s", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x12, "mod_u", "Vkmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0x0 << 22) | 0x1b, "fadd", "Ukmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x1b, "fsub", "Ukmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x1b, "fmul", "Ukmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x1b, "fdiv", "Ukmn" },
+ { kMsaMask | (0x1ff << 17), kMsa | (0x19e << 17) | 0x1e, "ffint_s", "ukm" },
+ { kMsaMask | (0x1ff << 17), kMsa | (0x19c << 17) | 0x1e, "ftint_s", "ukm" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xd, "sll", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0xd, "sra", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0xd, "srl", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x9, "slli", "kmW" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x9, "srai", "kmW" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x9, "srli", "kmW" },
+ { kMsaMask | (0x3ff << 16), kMsa | (0xbe << 16) | 0x19, "move.v", "km" },
+ { kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x19, "splati", "kX" },
+ { kMsaMask | (0xff << 18), kMsa | (0xc0 << 18) | 0x1e, "fill", "vkD" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x7, "ldi", "kx" },
+ { kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" },
+ { kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -559,6 +593,125 @@
case 't': args << 'f' << rt; break;
case 'Z': args << (rd + 1); break; // sz ([d]ext size).
case 'z': args << (rd - sa + 1); break; // sz ([d]ins, dinsu size).
+ case 'k': args << 'w' << sa; break;
+ case 'm': args << 'w' << rd; break;
+ case 'n': args << 'w' << rt; break;
+ case 'U': // MSA 1-bit df (word/doubleword), position 21.
+ {
+ int32_t df = (instruction >> 21) & 0x1;
+ switch (df) {
+ case 0: opcode += ".w"; break;
+ case 1: opcode += ".d"; break;
+ }
+ continue; // No ", ".
+ }
+ case 'u': // MSA 1-bit df (word/doubleword), position 16.
+ {
+ int32_t df = (instruction >> 16) & 0x1;
+ switch (df) {
+ case 0: opcode += ".w"; break;
+ case 1: opcode += ".d"; break;
+ }
+ continue; // No ", ".
+ }
+ case 'V': // MSA 2-bit df, position 21.
+ {
+ int32_t df = (instruction >> 21) & 0x3;
+ switch (df) {
+ case 0: opcode += ".b"; break;
+ case 1: opcode += ".h"; break;
+ case 2: opcode += ".w"; break;
+ case 3: opcode += ".d"; break;
+ }
+ continue; // No ", ".
+ }
+ case 'v': // MSA 2-bit df, position 16.
+ {
+ int32_t df = (instruction >> 16) & 0x3;
+ switch (df) {
+ case 0: opcode += ".b"; break;
+ case 1: opcode += ".h"; break;
+ case 2: opcode += ".w"; break;
+ case 3: opcode += ".d"; break;
+ }
+ continue; // No ", ".
+ }
+ case 'W': // MSA df/m.
+ {
+ int32_t df_m = (instruction >> 16) & 0x7f;
+ if ((df_m & (0x1 << 6)) == 0) {
+ opcode += ".d";
+ args << (df_m & 0x3f);
+ break;
+ }
+ if ((df_m & (0x1 << 5)) == 0) {
+ opcode += ".w";
+ args << (df_m & 0x1f);
+ break;
+ }
+ if ((df_m & (0x1 << 4)) == 0) {
+ opcode += ".h";
+ args << (df_m & 0xf);
+ break;
+ }
+ if ((df_m & (0x1 << 3)) == 0) {
+ opcode += ".b";
+ args << (df_m & 0x7);
+ }
+ break;
+ }
+ case 'w': // MSA +x(rs).
+ {
+ int32_t df = instruction & 0x3;
+ int32_t s10 = (instruction >> 16) & 0x3ff;
+ s10 -= (s10 & 0x200) << 1; // Sign-extend s10.
+ switch (df) {
+ case 0: opcode += ".b"; break;
+ case 1: opcode += ".h"; break;
+ case 2: opcode += ".w"; break;
+ case 3: opcode += ".d"; break;
+ }
+ args << StringPrintf("%+d(r%d)", s10 << df, rd);
+ break;
+ }
+ case 'X': // MSA df/n - ws[x].
+ {
+ int32_t df_n = (instruction >> 16) & 0x3f;
+ if ((df_n & (0x3 << 4)) == 0) {
+ opcode += ".b";
+ args << 'w' << rd << '[' << (df_n & 0xf) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 3)) == 0) {
+ opcode += ".h";
+ args << 'w' << rd << '[' << (df_n & 0x7) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 2)) == 0) {
+ opcode += ".w";
+ args << 'w' << rd << '[' << (df_n & 0x3) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 1)) == 0) {
+ opcode += ".d";
+ args << 'w' << rd << '[' << (df_n & 0x1) << ']';
+ }
+ break;
+ }
+ case 'x': // MSA i10.
+ {
+ int32_t df = (instruction >> 21) & 0x3;
+ int32_t i10 = (instruction >> 11) & 0x3ff;
+ i10 -= (i10 & 0x200) << 1; // Sign-extend i10.
+ switch (df) {
+ case 0: opcode += ".b"; break;
+ case 1: opcode += ".h"; break;
+ case 2: opcode += ".w"; break;
+ case 3: opcode += ".d"; break;
+ }
+ args << i10;
+ break;
+ }
}
if (*(args_fmt + 1)) {
args << ", ";
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index a289433..e12bcec 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -574,6 +574,20 @@
load = true;
src_reg_file = dst_reg_file = SSE;
break;
+ case 0x29:
+ opcode1 = "pcmpeqq";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x39:
+ opcode1 = "pcmpgtq";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
case 0x40:
opcode1 = "pmulld";
prefix[2] = 0;
@@ -737,6 +751,24 @@
load = true;
has_modrm = true;
break;
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ if (prefix[2] == 0x66) {
+ src_reg_file = dst_reg_file = SSE;
+ prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
+ } else {
+ src_reg_file = dst_reg_file = MMX;
+ }
+ switch (*instr) {
+ case 0x64: opcode1 = "pcmpgtb"; break;
+ case 0x65: opcode1 = "pcmpgtw"; break;
+ case 0x66: opcode1 = "pcmpgtd"; break;
+ }
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ break;
case 0x6E:
if (prefix[2] == 0x66) {
dst_reg_file = SSE;
@@ -832,6 +864,24 @@
store = true;
immediate_bytes = 1;
break;
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ if (prefix[2] == 0x66) {
+ src_reg_file = dst_reg_file = SSE;
+ prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
+ } else {
+ src_reg_file = dst_reg_file = MMX;
+ }
+ switch (*instr) {
+ case 0x74: opcode1 = "pcmpeqb"; break;
+ case 0x75: opcode1 = "pcmpeqw"; break;
+ case 0x76: opcode1 = "pcmpeqd"; break;
+ }
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ break;
case 0x7C:
if (prefix[0] == 0xF2) {
opcode1 = "haddps";
@@ -1083,6 +1133,22 @@
opcode1 = opcode_tmp.c_str();
}
break;
+ case 0xE0:
+ case 0xE3:
+ if (prefix[2] == 0x66) {
+ src_reg_file = dst_reg_file = SSE;
+ prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
+ } else {
+ src_reg_file = dst_reg_file = MMX;
+ }
+ switch (*instr) {
+ case 0xE0: opcode1 = "pavgb"; break;
+ case 0xE3: opcode1 = "pavgw"; break;
+ }
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ break;
case 0xEB:
if (prefix[2] == 0x66) {
src_reg_file = dst_reg_file = SSE;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index dfaae7d..0c2717f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -485,7 +485,7 @@
explicit RelocatedPointerVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
template <typename T>
- T* operator()(T* ptr) const {
+ T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED = 0) const {
return patch_oat_->RelocatedAddressOfPointer(ptr);
}
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index a25460e..b9a85bc 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -44,10 +44,15 @@
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
- if (!info.Load(profile_files[i].GetFile()->Fd())) {
+ ProfileCompilationInfo cur_info;
+ if (!cur_info.Load(profile_files[i].GetFile()->Fd())) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
+ if (!info.MergeWith(cur_info)) {
+ LOG(WARNING) << "Could not merge profile file at index " << i;
+ return kErrorBadProfiles;
+ }
}
// Check if there is enough new information added by the current profiles.
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 52f3b52..38254e2 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -22,6 +22,7 @@
#include "exec_utils.h"
#include "jit/profile_compilation_info.h"
#include "mirror/class-inl.h"
+#include "obj_ptr-inl.h"
#include "profile_assistant.h"
#include "scoped_thread_state_change-inl.h"
#include "utils.h"
@@ -36,14 +37,25 @@
uint16_t number_of_classes,
const ScratchFile& profile,
ProfileCompilationInfo* info,
- uint16_t start_method_index = 0) {
+ uint16_t start_method_index = 0,
+ bool reverse_dex_write_order = false) {
std::string dex_location1 = "location1" + id;
uint32_t dex_location_checksum1 = checksum;
std::string dex_location2 = "location2" + id;
uint32_t dex_location_checksum2 = 10 * checksum;
for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
- ASSERT_TRUE(info->AddMethodIndex(dex_location1, dex_location_checksum1, i));
- ASSERT_TRUE(info->AddMethodIndex(dex_location2, dex_location_checksum2, i));
+ // reverse_dex_write_order controls the order in which the dex files will be added to
+ // the profile and thus written to disk.
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi =
+ GetOfflineProfileMethodInfo(dex_location1, dex_location_checksum1,
+ dex_location2, dex_location_checksum2);
+ if (reverse_dex_write_order) {
+ ASSERT_TRUE(info->AddMethod(dex_location2, dex_location_checksum2, i, pmi));
+ ASSERT_TRUE(info->AddMethod(dex_location1, dex_location_checksum1, i, pmi));
+ } else {
+ ASSERT_TRUE(info->AddMethod(dex_location1, dex_location_checksum1, i, pmi));
+ ASSERT_TRUE(info->AddMethod(dex_location2, dex_location_checksum2, i, pmi));
+ }
}
for (uint16_t i = 0; i < number_of_classes; i++) {
ASSERT_TRUE(info->AddClassIndex(dex_location1, dex_location_checksum1, dex::TypeIndex(i)));
@@ -54,6 +66,43 @@
ASSERT_TRUE(profile.GetFile()->ResetOffset());
}
+ ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo(
+ const std::string& dex_location1, uint32_t dex_checksum1,
+ const std::string& dex_location2, uint32_t dex_checksum2) {
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ pmi.dex_references.emplace_back(dex_location1, dex_checksum1);
+ pmi.dex_references.emplace_back(dex_location2, dex_checksum2);
+
+ // Monomorphic
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.AddClass(0, dex::TypeIndex(0));
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+ // Polymorphic
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.AddClass(0, dex::TypeIndex(0));
+ dex_pc_data.AddClass(1, dex::TypeIndex(1));
+
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+ // Megamorphic
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMegamorphic();
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMissingTypes();
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+
+ return pmi;
+ }
+
int GetFd(const ScratchFile& file) const {
return static_cast<int>(file.GetFd());
}
@@ -98,6 +147,18 @@
return ExecAndReturnCode(argv_str, &error);
}
+ bool GenerateTestProfileWithInputDex(const std::string& filename) {
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--generate-test-profile=" + filename);
+ argv_str.push_back("--generate-test-profile-seed=0");
+ argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
+ argv_str.push_back("--dex-location=" + GetLibCoreDexFileNames()[0]);
+ std::string error;
+ return ExecAndReturnCode(argv_str, &error);
+ }
+
bool CreateProfile(std::string profile_file_contents,
const std::string& filename,
const std::string& dex_location) {
@@ -118,12 +179,12 @@
return true;
}
- bool DumpClasses(const std::string& filename, std::string* file_contents) {
+ bool DumpClassesAndMethods(const std::string& filename, std::string* file_contents) {
ScratchFile class_names_file;
std::string profman_cmd = GetProfmanCmd();
std::vector<std::string> argv_str;
argv_str.push_back(profman_cmd);
- argv_str.push_back("--dump-classes");
+ argv_str.push_back("--dump-classes-and-methods");
argv_str.push_back("--profile-file=" + filename);
argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
argv_str.push_back("--dex-location=" + GetLibCoreDexFileNames()[0]);
@@ -140,13 +201,14 @@
return true;
}
- bool CreateAndDump(const std::string& input_file_contents, std::string* output_file_contents) {
+ bool CreateAndDump(const std::string& input_file_contents,
+ std::string* output_file_contents) {
ScratchFile profile_file;
EXPECT_TRUE(CreateProfile(input_file_contents,
profile_file.GetFilename(),
GetLibCoreDexFileNames()[0]));
profile_file.GetFile()->ResetOffset();
- EXPECT_TRUE(DumpClasses(profile_file.GetFilename(), output_file_contents));
+ EXPECT_TRUE(DumpClassesAndMethods(profile_file.GetFilename(), output_file_contents));
return true;
}
@@ -156,7 +218,7 @@
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(self);
Handle<mirror::ClassLoader> h_loader(
- hs.NewHandle(self->DecodeJObject(class_loader)->AsClassLoader()));
+ hs.NewHandle(ObjPtr<mirror::ClassLoader>::DownCast(self->DecodeJObject(class_loader))));
return class_linker->FindClass(self, clazz.c_str(), h_loader);
}
@@ -423,12 +485,38 @@
ASSERT_TRUE(info.Load(GetFd(profile)));
}
+TEST_F(ProfileAssistantTest, TestProfileGenerationWithIndexDex) {
+ ScratchFile profile;
+ // Generate a test profile passing in a dex file as reference.
+ GenerateTestProfileWithInputDex(profile.GetFilename());
+
+ // Verify that the generated profile is valid and can be loaded.
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ProfileCompilationInfo info;
+ ASSERT_TRUE(info.Load(GetFd(profile)));
+}
+
TEST_F(ProfileAssistantTest, TestProfileCreationAllMatch) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
"Ljava/lang/Comparable;",
"Ljava/lang/Math;",
- "Ljava/lang/Object;"
+ "Ljava/lang/Object;",
+ "Ljava/lang/Object;-><init>()V"
+ };
+ std::string file_contents;
+ for (std::string& class_name : class_names) {
+ file_contents += class_name + std::string("\n");
+ }
+ std::string output_file_contents;
+ ASSERT_TRUE(CreateAndDump(file_contents, &output_file_contents));
+ ASSERT_EQ(output_file_contents, file_contents);
+}
+
+TEST_F(ProfileAssistantTest, TestProfileCreationGenerateMethods) {
+ // Class names put here need to be in sorted order.
+ std::vector<std::string> class_names = {
+ "Ljava/lang/Math;->*",
};
std::string input_file_contents;
std::string expected_contents;
@@ -438,8 +526,29 @@
std::string("\n");
}
std::string output_file_contents;
- ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
- ASSERT_EQ(output_file_contents, expected_contents);
+ ScratchFile profile_file;
+ EXPECT_TRUE(CreateProfile(input_file_contents,
+ profile_file.GetFilename(),
+ GetLibCoreDexFileNames()[0]));
+ ProfileCompilationInfo info;
+ profile_file.GetFile()->ResetOffset();
+ ASSERT_TRUE(info.Load(GetFd(profile_file)));
+ // Verify that the profile has matching methods.
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> klass = GetClass(nullptr, "Ljava/lang/Math;");
+ ASSERT_TRUE(klass != nullptr);
+ size_t method_count = 0;
+ for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
+ if (!method.IsCopied() && method.GetCodeItem() != nullptr) {
+ ++method_count;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ASSERT_TRUE(info.GetMethod(method.GetDexFile()->GetLocation(),
+ method.GetDexFile()->GetLocationChecksum(),
+ method.GetDexMethodIndex(),
+ &pmi));
+ }
+ }
+ EXPECT_GT(method_count, 0u);
}
TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
@@ -456,8 +565,8 @@
std::string output_file_contents;
ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
std::string expected_contents =
- DescriptorToDot(class_names[1].c_str()) + std::string("\n") +
- DescriptorToDot(class_names[2].c_str()) + std::string("\n");
+ class_names[1] + std::string("\n") +
+ class_names[2] + std::string("\n");
ASSERT_EQ(output_file_contents, expected_contents);
}
@@ -589,4 +698,44 @@
}
}
+TEST_F(ProfileAssistantTest, MergeProfilesWithDifferentDexOrder) {
+ ScratchFile profile1;
+ ScratchFile reference_profile;
+
+ std::vector<int> profile_fds({GetFd(profile1)});
+ int reference_profile_fd = GetFd(reference_profile);
+
+ // The new profile info will contain the methods with indices 0-100.
+ const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+ ProfileCompilationInfo info1;
+ SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1,
+ /*start_method_index*/0, /*reverse_dex_write_order*/false);
+
+ // The reference profile info will contain the methods with indices 50-150.
+ // When setting up the profile reverse the order in which the dex files
+ // are added to the profile. This will verify that profman merges profiles
+ // with a different dex order correctly.
+ const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
+ ProfileCompilationInfo reference_info;
+ SetupProfile("p1", 1, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+ &reference_info, kNumberOfMethodsToEnableCompilation / 2, /*reverse_dex_write_order*/true);
+
+ // We should advise compilation.
+ ASSERT_EQ(ProfileAssistant::kCompile,
+ ProcessProfiles(profile_fds, reference_profile_fd));
+
+ // The resulting compilation info must be equal to the merge of the inputs.
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+
+ ProfileCompilationInfo expected;
+ ASSERT_TRUE(expected.MergeWith(reference_info));
+ ASSERT_TRUE(expected.MergeWith(info1));
+ ASSERT_TRUE(expected.Equals(result));
+
+ // The information from profile must remain the same.
+ CheckProfileInfo(profile1, info1);
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index f7316cc..cf1ffe1 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -90,8 +90,9 @@
UsageError("");
UsageError(" --dump-output-to-fd=<number>: redirects --dump-only output to a file descriptor.");
UsageError("");
- UsageError(" --dump-classes: dumps a sorted list of classes that are in the specified profile");
- UsageError(" file to standard output (default) in a human readable form.");
+ UsageError(" --dump-classes-and-methods: dumps a sorted list of classes and methods that are");
+ UsageError(" in the specified profile file to standard output (default) in a human");
+ UsageError(" readable form. The output is valid input for --create-profile-from");
UsageError("");
UsageError(" --profile-file=<filename>: specify profiler output file to use for compilation.");
UsageError(" Can be specified multiple time, in which case the data from the different");
@@ -117,9 +118,11 @@
UsageError(" number of methods that should be generated. Defaults to 5.");
UsageError(" --generate-test-profile-class-ratio=<number>: the percentage from the maximum");
UsageError(" number of classes that should be generated. Defaults to 5.");
+ UsageError(" --generate-test-profile-seed=<number>: seed for random number generator used when");
+ UsageError(" generating random test profiles. Defaults to using NanoTime.");
UsageError("");
- UsageError(" --create-profile-from=<filename>: creates a profile from a list of classes.");
- UsageError("");
+ UsageError(" --create-profile-from=<filename>: creates a profile from a list of classes and");
+ UsageError(" methods.");
UsageError("");
UsageError(" --dex-location=<string>: location string to use with corresponding");
UsageError(" apk-fd to find dex files");
@@ -140,6 +143,7 @@
// Separators used when parsing human friendly representation of profiles.
static const std::string kMethodSep = "->";
static const std::string kMissingTypesMarker = "missing_types";
+static const std::string kClassAllMethods = "*";
static constexpr char kProfileParsingInlineChacheSep = '+';
static constexpr char kProfileParsingTypeSep = ',';
static constexpr char kProfileParsingFirstCharInSignature = '(';
@@ -151,11 +155,12 @@
ProfMan() :
reference_profile_file_fd_(kInvalidFd),
dump_only_(false),
- dump_classes_(false),
+ dump_classes_and_methods_(false),
dump_output_to_fd_(kInvalidFd),
test_profile_num_dex_(kDefaultTestProfileNumDex),
test_profile_method_ratio_(kDefaultTestProfileMethodRatio),
test_profile_class_ratio_(kDefaultTestProfileClassRatio),
+ test_profile_seed_(NanoTime()),
start_ns_(NanoTime()) {}
~ProfMan() {
@@ -184,8 +189,8 @@
}
if (option == "--dump-only") {
dump_only_ = true;
- } else if (option == "--dump-classes") {
- dump_classes_ = true;
+ } else if (option == "--dump-classes-and-methods") {
+ dump_classes_and_methods_ = true;
} else if (option.starts_with("--create-profile-from=")) {
create_profile_from_file_ = option.substr(strlen("--create-profile-from=")).ToString();
} else if (option.starts_with("--dump-output-to-fd=")) {
@@ -221,6 +226,8 @@
"--generate-test-profile-class-ratio",
&test_profile_class_ratio_,
Usage);
+ } else if (option.starts_with("--generate-test-profile-seed=")) {
+ ParseUintOption(option, "--generate-test-profile-seed", &test_profile_seed_, Usage);
} else {
Usage("Unknown argument '%s'", option.data());
}
@@ -406,27 +413,45 @@
return dump_only_;
}
- bool GetClassNames(int fd,
- std::vector<std::unique_ptr<const DexFile>>* dex_files,
- std::set<std::string>* class_names) {
+ bool GetClassNamesAndMethods(int fd,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::set<std::string>* out_lines) {
ProfileCompilationInfo profile_info;
if (!profile_info.Load(fd)) {
LOG(ERROR) << "Cannot load profile info";
return false;
}
- profile_info.GetClassNames(dex_files, class_names);
+ for (const std::unique_ptr<const DexFile>& dex_file : *dex_files) {
+ std::set<dex::TypeIndex> class_types;
+ ProfileCompilationInfo::MethodMap methods;
+ if (profile_info.GetClassesAndMethods(dex_file.get(), &class_types, &methods)) {
+ for (const dex::TypeIndex& type_index : class_types) {
+ const DexFile::TypeId& type_id = dex_file->GetTypeId(type_index);
+ out_lines->insert(std::string(dex_file->GetTypeDescriptor(type_id)));
+ }
+ for (const auto& pair : methods) {
+ // TODO: Process inline caches.
+ const uint16_t dex_method_idx = pair.first;
+ const DexFile::MethodId& id = dex_file->GetMethodId(dex_method_idx);
+ std::string signature_string(dex_file->GetMethodSignature(id).ToString());
+ std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
+ std::string method_name(dex_file->GetMethodName(id));
+ out_lines->insert(type_string + kMethodSep + method_name + signature_string);
+ }
+ }
+ }
return true;
}
- bool GetClassNames(std::string profile_file,
- std::vector<std::unique_ptr<const DexFile>>* dex_files,
- std::set<std::string>* class_names) {
+ bool GetClassNamesAndMethods(const std::string& profile_file,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::set<std::string>* out_lines) {
int fd = open(profile_file.c_str(), O_RDONLY);
if (!FdIsValid(fd)) {
LOG(ERROR) << "Cannot open " << profile_file << strerror(errno);
return false;
}
- if (!GetClassNames(fd, dex_files, class_names)) {
+ if (!GetClassNamesAndMethods(fd, dex_files, out_lines)) {
return false;
}
if (close(fd) < 0) {
@@ -450,26 +475,26 @@
std::set<std::string> class_names;
if (!profile_files_fd_.empty()) {
for (int profile_file_fd : profile_files_fd_) {
- if (!GetClassNames(profile_file_fd, &dex_files, &class_names)) {
+ if (!GetClassNamesAndMethods(profile_file_fd, &dex_files, &class_names)) {
return -1;
}
}
}
if (!profile_files_.empty()) {
for (const std::string& profile_file : profile_files_) {
- if (!GetClassNames(profile_file, &dex_files, &class_names)) {
+ if (!GetClassNamesAndMethods(profile_file, &dex_files, &class_names)) {
return -1;
}
}
}
// Concatenate class names from reference profile file.
if (FdIsValid(reference_profile_file_fd_)) {
- if (!GetClassNames(reference_profile_file_fd_, &dex_files, &class_names)) {
+ if (!GetClassNamesAndMethods(reference_profile_file_fd_, &dex_files, &class_names)) {
return -1;
}
}
if (!reference_profile_file_.empty()) {
- if (!GetClassNames(reference_profile_file_, &dex_files, &class_names)) {
+ if (!GetClassNamesAndMethods(reference_profile_file_, &dex_files, &class_names)) {
return -1;
}
}
@@ -489,8 +514,8 @@
return 0;
}
- bool ShouldOnlyDumpClasses() {
- return dump_classes_;
+ bool ShouldOnlyDumpClassesAndMethods() {
+ return dump_classes_and_methods_;
}
// Read lines from the given file, dropping comments and empty lines. Post-process each line with
@@ -556,17 +581,9 @@
return false;
}
- // Find the method specified by method_spec in the class class_ref. The method
- // must have a single INVOKE_VIRTUAL in its byte code.
- // Upon success it returns true and stores the method index and the invoke dex pc
- // in the output parameters.
- // The format of the method spec is "inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
- //
- // TODO(calin): support INVOKE_INTERFACE and the range variants.
- bool FindMethodWithSingleInvoke(const ProfileMethodInfo::ProfileClassReference& class_ref,
- const std::string& method_spec,
- /*out*/uint16_t* method_index,
- /*out*/uint32_t* dex_pc) {
+ // Find the method specified by method_spec in the class class_ref.
+ uint32_t FindMethodIndex(const ProfileMethodInfo::ProfileClassReference& class_ref,
+ const std::string& method_spec) {
std::vector<std::string> name_and_signature;
Split(method_spec, kProfileParsingFirstCharInSignature, &name_and_signature);
if (name_and_signature.size() != 2) {
@@ -579,38 +596,50 @@
const DexFile::StringId* name_id = dex_file->FindStringId(name.c_str());
if (name_id == nullptr) {
LOG(ERROR) << "Could not find name: " << name;
- return false;
+ return DexFile::kDexNoIndex;
}
dex::TypeIndex return_type_idx;
std::vector<dex::TypeIndex> param_type_idxs;
if (!dex_file->CreateTypeList(signature, &return_type_idx, ¶m_type_idxs)) {
LOG(ERROR) << "Could not create type list" << signature;
- return false;
+ return DexFile::kDexNoIndex;
}
const DexFile::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
if (proto_id == nullptr) {
LOG(ERROR) << "Could not find proto_id: " << name;
- return false;
+ return DexFile::kDexNoIndex;
}
const DexFile::MethodId* method_id = dex_file->FindMethodId(
dex_file->GetTypeId(class_ref.type_index), *name_id, *proto_id);
if (method_id == nullptr) {
LOG(ERROR) << "Could not find method_id: " << name;
- return false;
+ return DexFile::kDexNoIndex;
}
- *method_index = dex_file->GetIndexForMethodId(*method_id);
+ return dex_file->GetIndexForMethodId(*method_id);
+ }
+ // Given a method, return true if the method has a single INVOKE_VIRTUAL in its byte code.
+ // Upon success it returns true and stores the method index and the invoke dex pc
+ // in the output parameters.
+ // The format of the method spec is "inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
+ //
+ // TODO(calin): support INVOKE_INTERFACE and the range variants.
+ bool HasSingleInvoke(const ProfileMethodInfo::ProfileClassReference& class_ref,
+ uint16_t method_index,
+ /*out*/uint32_t* dex_pc) {
+ const DexFile* dex_file = class_ref.dex_file;
uint32_t offset = dex_file->FindCodeItemOffset(
*dex_file->FindClassDef(class_ref.type_index),
- *method_index);
+ method_index);
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(offset);
bool found_invoke = false;
for (CodeItemIterator it(*code_item); !it.Done(); it.Advance()) {
if (it.CurrentInstruction().Opcode() == Instruction::INVOKE_VIRTUAL) {
if (found_invoke) {
- LOG(ERROR) << "Multiple invoke INVOKE_VIRTUAL found: " << name;
+ LOG(ERROR) << "Multiple invoke INVOKE_VIRTUAL found: "
+ << dex_file->PrettyMethod(method_index);
return false;
}
found_invoke = true;
@@ -618,7 +647,7 @@
}
}
if (!found_invoke) {
- LOG(ERROR) << "Could not find any INVOKE_VIRTUAL: " << name;
+ LOG(ERROR) << "Could not find any INVOKE_VIRTUAL: " << dex_file->PrettyMethod(method_index);
}
return found_invoke;
}
@@ -630,6 +659,7 @@
// "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
// "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types".
// "LTestInline;->inlineNoInlineCaches(LSuper;)I".
+ // "LTestInline;->*".
// The method and classes are searched only in the given dex files.
bool ProcessLine(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
const std::string& line,
@@ -650,8 +680,8 @@
return false;
}
- if (method_str.empty()) {
- // No method to add. Just add the class.
+ if (method_str.empty() || method_str == kClassAllMethods) {
+ // Start by adding the class.
std::set<DexCacheResolvedClasses> resolved_class_set;
const DexFile* dex_file = class_ref.dex_file;
const auto& dex_resolved_classes = resolved_class_set.emplace(
@@ -659,7 +689,27 @@
dex_file->GetBaseLocation(),
dex_file->GetLocationChecksum());
dex_resolved_classes.first->AddClass(class_ref.type_index);
- profile->AddMethodsAndClasses(std::vector<ProfileMethodInfo>(), resolved_class_set);
+ std::vector<ProfileMethodInfo> methods;
+ if (method_str == kClassAllMethods) {
+ // Add all of the methods.
+ const DexFile::ClassDef* class_def = dex_file->FindClassDef(class_ref.type_index);
+ const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ if (class_data != nullptr) {
+ ClassDataItemIterator it(*dex_file, class_data);
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ it.Next();
+ }
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ if (it.GetMethodCodeItemOffset() != 0) {
+ // Add all of the methods that have code to the profile.
+ const uint32_t method_idx = it.GetMemberIndex();
+ methods.push_back(ProfileMethodInfo(dex_file, method_idx));
+ }
+ it.Next();
+ }
+ }
+ }
+ profile->AddMethodsAndClasses(methods, resolved_class_set);
return true;
}
@@ -683,24 +733,29 @@
return false;
}
- uint16_t method_index;
- uint32_t dex_pc;
- if (!FindMethodWithSingleInvoke(class_ref, method_spec, &method_index, &dex_pc)) {
+ const uint32_t method_index = FindMethodIndex(class_ref, method_spec);
+ if (method_index == DexFile::kDexNoIndex) {
return false;
}
- std::vector<ProfileMethodInfo::ProfileClassReference> classes(inline_cache_elems.size());
- size_t class_it = 0;
- for (const std::string ic_class : inline_cache_elems) {
- if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
- LOG(ERROR) << "Could not find class: " << ic_class;
+
+ std::vector<ProfileMethodInfo> pmi;
+ std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
+ if (is_missing_types || !inline_cache_elems.empty()) {
+ uint32_t dex_pc;
+ if (!HasSingleInvoke(class_ref, method_index, &dex_pc)) {
return false;
}
+ std::vector<ProfileMethodInfo::ProfileClassReference> classes(inline_cache_elems.size());
+ size_t class_it = 0;
+ for (const std::string& ic_class : inline_cache_elems) {
+ if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
+ LOG(ERROR) << "Could not find class: " << ic_class;
+ return false;
+ }
+ }
+ inline_caches.emplace_back(dex_pc, is_missing_types, classes);
}
- std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
- inline_caches.emplace_back(dex_pc, is_missing_types, classes);
- std::vector<ProfileMethodInfo> pmi;
pmi.emplace_back(class_ref.dex_file, method_index, inline_caches);
-
profile->AddMethodsAndClasses(pmi, std::set<DexCacheResolvedClasses>());
return true;
}
@@ -777,17 +832,39 @@
if (test_profile_class_ratio_ > 100) {
Usage("Invalid ratio for --generate-test-profile-class-ratio");
}
+ // If given APK files or DEX locations, check that they're ok.
+ if (!apk_files_.empty() || !apks_fd_.empty() || !dex_locations_.empty()) {
+ if (apk_files_.empty() && apks_fd_.empty()) {
+ Usage("APK files must be specified when passing DEX locations to --generate-test-profile");
+ }
+ if (dex_locations_.empty()) {
+ Usage("DEX locations must be specified when passing APK files to --generate-test-profile");
+ }
+ }
// ShouldGenerateTestProfile confirms !test_profile_.empty().
int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (profile_test_fd < 0) {
LOG(ERROR) << "Cannot open " << test_profile_ << strerror(errno);
return -1;
}
-
- bool result = ProfileCompilationInfo::GenerateTestProfile(profile_test_fd,
- test_profile_num_dex_,
- test_profile_method_ratio_,
- test_profile_class_ratio_);
+ bool result;
+ if (apk_files_.empty() && apks_fd_.empty() && dex_locations_.empty()) {
+ result = ProfileCompilationInfo::GenerateTestProfile(profile_test_fd,
+ test_profile_num_dex_,
+ test_profile_method_ratio_,
+ test_profile_class_ratio_,
+ test_profile_seed_);
+ } else {
+ // Initialize MemMap for ZipArchive::OpenFromFd.
+ MemMap::Init();
+ // Open the dex files to look up classes and methods.
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ OpenApkFilesFromLocations(&dex_files);
+ // Create a random profile file based on the set of dex files.
+ result = ProfileCompilationInfo::GenerateTestProfile(profile_test_fd,
+ dex_files,
+ test_profile_seed_);
+ }
close(profile_test_fd); // ignore close result.
return result ? 0 : -1;
}
@@ -829,13 +906,14 @@
std::string reference_profile_file_;
int reference_profile_file_fd_;
bool dump_only_;
- bool dump_classes_;
+ bool dump_classes_and_methods_;
int dump_output_to_fd_;
std::string test_profile_;
std::string create_profile_from_file_;
uint16_t test_profile_num_dex_;
uint16_t test_profile_method_ratio_;
uint16_t test_profile_class_ratio_;
+ uint32_t test_profile_seed_;
uint64_t start_ns_;
};
@@ -852,7 +930,7 @@
if (profman.ShouldOnlyDumpProfile()) {
return profman.DumpProfileInfo();
}
- if (profman.ShouldOnlyDumpClasses()) {
+ if (profman.ShouldOnlyDumpClassesAndMethods()) {
return profman.DumpClasses();
}
if (profman.ShouldCreateProfile()) {
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 6d5dd6d..3582351 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -34,6 +34,18 @@
EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
+ // Build features for a 32-bit ARM kryo processor.
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ InstructionSetFeatures::FromVariant(kArm, "kryo", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 7U);
+
// Build features for a 32-bit ARM denver processor.
std::unique_ptr<const InstructionSetFeatures> denver_features(
InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
@@ -86,6 +98,18 @@
EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
+ // Build features for a 32-bit ARM with LPAE and div processor.
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(kryo_features->Equals(krait_features.get()));
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 3U);
+
// Build features for a 32-bit ARM processor with LPAE and div flipped.
std::unique_ptr<const InstructionSetFeatures> denver_features(
base_features->AddFeaturesFromString("div,atomic_ldrd_strd,armv8a", &error_msg));
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 72aa785..029de46 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1614,6 +1614,11 @@
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+/*
+ * Called to attempt to execute an obsolete method.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
/*
* Routine that intercepts method calls and returns.
*/
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 5b5d2ef..b2bbd0d 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2152,6 +2152,11 @@
RETURN_OR_DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+/*
+ * Called to attempt to execute an obsolete method.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
//
// Instrumentation-related stubs
@@ -2379,9 +2384,8 @@
ret
.Lnot_marked_rb_\name:
// Check if the top two bits are one, if this is the case it is a forwarding address.
- mvn wIP0, wIP0
- cmp wzr, wIP0, lsr #30
- beq .Lret_forwarding_address\name
+ tst wIP0, wIP0, lsl #1
+ bmi .Lret_forwarding_address\name
.Lslow_rb_\name:
/*
* Allocate 44 stack slots * 8 = 352 bytes:
@@ -2452,10 +2456,9 @@
DECREASE_FRAME 352
ret
.Lret_forwarding_address\name:
- mvn wIP0, wIP0
// Shift left by the forwarding address shift. This clears out the state bits since they are
// in the top 2 bits of the lock word.
- lsl \wreg, wIP0, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+ lsl \wreg, wIP0, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
ret
END \name
.endm
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 36f9ea7..2349620 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -32,6 +32,33 @@
// Cast entrypoints.
extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
+// Read barrier entrypoints.
+// art_quick_read_barrier_mark_regXX uses a non-standard calling
+// convention: it expects its input in register XX+1 and returns its
+// result in that same register, and saves and restores all
+// caller-save registers.
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg14(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
+
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
extern int32_t CmplDouble(double a, double b);
@@ -59,9 +86,71 @@
extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
-// No read barrier entrypoints for marking registers.
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints ATTRIBUTE_UNUSED,
- bool is_marking ATTRIBUTE_UNUSED) {}
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+ qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg02),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg03),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg04 = is_marking ? art_quick_read_barrier_mark_reg04 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg04),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg05),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg06),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg07),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg08),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg09),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg10),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg11),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg12 = is_marking ? art_quick_read_barrier_mark_reg12 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg12),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg13 = is_marking ? art_quick_read_barrier_mark_reg13 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg13),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg14 = is_marking ? art_quick_read_barrier_mark_reg14 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg14),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg17 = is_marking ? art_quick_read_barrier_mark_reg17 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg17),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg18 = is_marking ? art_quick_read_barrier_mark_reg18 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg18),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg19 = is_marking ? art_quick_read_barrier_mark_reg19 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg19),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg20 = is_marking ? art_quick_read_barrier_mark_reg20 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg20),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg21 = is_marking ? art_quick_read_barrier_mark_reg21 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg21),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg22 = is_marking ? art_quick_read_barrier_mark_reg22 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg22),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg29 = is_marking ? art_quick_read_barrier_mark_reg29 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
+ "Non-direct C stub marked direct.");
+}
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Note: MIPS has asserts checking for the type of entrypoint. Don't move it
@@ -287,77 +376,19 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
- // Read barriers (and these entry points in particular) are not
- // supported in the compiler on MIPS32.
+ UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
+ // Cannot use the following registers to pass arguments:
+ // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
+ // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
qpoints->pReadBarrierMarkReg00 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg00),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg01 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg02 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg02),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg03 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg03),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg04 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg04),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg05 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg05),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg06 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg06),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg07 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg07),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg08 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg08),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg09 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg09),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg10 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg10),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg11 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg11),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg12 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg12),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg13 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg13),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg14 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg14),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierMarkReg15 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg15),
"Non-direct C stub marked direct.");
qpoints->pReadBarrierMarkReg16 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg16),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg17 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg17),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg18 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg18),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg19 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg19),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg20 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg20),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg21 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg21),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg22 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg22),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierMarkReg23 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg23),
"Non-direct C stub marked direct.");
@@ -376,9 +407,6 @@
qpoints->pReadBarrierMarkReg28 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg28),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg29 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierSlow = artReadBarrierSlow;
static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 5d61539..722a679 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1877,6 +1877,14 @@
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+ .extern artInvokeObsoleteMethod
+ENTRY art_invoke_obsolete_method_stub
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ la $t9, artInvokeObsoleteMethod
+ jalr $t9 # (Method* method, Thread* self)
+ move $a1, rSELF # pass Thread::Current
+END art_invoke_obsolete_method_stub
+
/*
* Routine that intercepts method calls and returns.
*/
@@ -2198,6 +2206,151 @@
subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
END art_quick_string_compareto
+ /*
+ * Create a function `name` calling the ReadBarrier::Mark routine,
+ * getting its argument and returning its result through register
+ * `reg`, saving and restoring all caller-save registers.
+ */
+.macro READ_BARRIER_MARK_REG name, reg
+ENTRY \name
+ /* TODO: optimizations: mark bit, forwarding. */
+ addiu $sp, $sp, -160 # includes 16 bytes of space for argument registers a0-a3
+ .cfi_adjust_cfa_offset 160
+
+ sw $ra, 156($sp)
+ .cfi_rel_offset 31, 156
+ sw $t8, 152($sp)
+ .cfi_rel_offset 24, 152
+ sw $t7, 148($sp)
+ .cfi_rel_offset 15, 148
+ sw $t6, 144($sp)
+ .cfi_rel_offset 14, 144
+ sw $t5, 140($sp)
+ .cfi_rel_offset 13, 140
+ sw $t4, 136($sp)
+ .cfi_rel_offset 12, 136
+ sw $t3, 132($sp)
+ .cfi_rel_offset 11, 132
+ sw $t2, 128($sp)
+ .cfi_rel_offset 10, 128
+ sw $t1, 124($sp)
+ .cfi_rel_offset 9, 124
+ sw $t0, 120($sp)
+ .cfi_rel_offset 8, 120
+ sw $a3, 116($sp)
+ .cfi_rel_offset 7, 116
+ sw $a2, 112($sp)
+ .cfi_rel_offset 6, 112
+ sw $a1, 108($sp)
+ .cfi_rel_offset 5, 108
+ sw $a0, 104($sp)
+ .cfi_rel_offset 4, 104
+ sw $v1, 100($sp)
+ .cfi_rel_offset 3, 100
+ sw $v0, 96($sp)
+ .cfi_rel_offset 2, 96
+
+ la $t9, artReadBarrierMark
+
+ sdc1 $f18, 88($sp)
+ sdc1 $f16, 80($sp)
+ sdc1 $f14, 72($sp)
+ sdc1 $f12, 64($sp)
+ sdc1 $f10, 56($sp)
+ sdc1 $f8, 48($sp)
+ sdc1 $f6, 40($sp)
+ sdc1 $f4, 32($sp)
+ sdc1 $f2, 24($sp)
+
+ .ifnc \reg, $a0
+ move $a0, \reg # pass obj from `reg` in a0
+ .endif
+ jalr $t9 # v0 <- artReadBarrierMark(obj)
+ sdc1 $f0, 16($sp) # in delay slot
+
+ lw $ra, 156($sp)
+ .cfi_restore 31
+ lw $t8, 152($sp)
+ .cfi_restore 24
+ lw $t7, 148($sp)
+ .cfi_restore 15
+ lw $t6, 144($sp)
+ .cfi_restore 14
+ lw $t5, 140($sp)
+ .cfi_restore 13
+ lw $t4, 136($sp)
+ .cfi_restore 12
+ lw $t3, 132($sp)
+ .cfi_restore 11
+ lw $t2, 128($sp)
+ .cfi_restore 10
+ lw $t1, 124($sp)
+ .cfi_restore 9
+ lw $t0, 120($sp)
+ .cfi_restore 8
+ lw $a3, 116($sp)
+ .cfi_restore 7
+ lw $a2, 112($sp)
+ .cfi_restore 6
+ lw $a1, 108($sp)
+ .cfi_restore 5
+ lw $a0, 104($sp)
+ .cfi_restore 4
+ lw $v1, 100($sp)
+ .cfi_restore 3
+
+ .ifnc \reg, $v0
+ move \reg, $v0 # `reg` <- v0
+ lw $v0, 96($sp)
+ .cfi_restore 2
+ .endif
+
+ ldc1 $f18, 88($sp)
+ ldc1 $f16, 80($sp)
+ ldc1 $f14, 72($sp)
+ ldc1 $f12, 64($sp)
+ ldc1 $f10, 56($sp)
+ ldc1 $f8, 48($sp)
+ ldc1 $f6, 40($sp)
+ ldc1 $f4, 32($sp)
+ ldc1 $f2, 24($sp)
+ ldc1 $f0, 16($sp)
+
+ jalr $zero, $ra
+ addiu $sp, $sp, 160
+ .cfi_adjust_cfa_offset -160
+END \name
+.endm
+
+// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
+// ZERO (register 0) is reserved.
+// AT (register 1) is reserved as a temporary/scratch register.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $t0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $t1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $t2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $t3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, $t7
+// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
+// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
+// K0, K1, GP, SP (registers 26 - 29) are reserved.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
+// RA (register 31) is reserved.
+
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 135cdc1..f8242ae 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -32,6 +32,32 @@
// Cast entrypoints.
extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
+// Read barrier entrypoints.
+// art_quick_read_barrier_mark_regXX uses a non-standard calling
+// convention: it expects its input in register XX+1 and returns its
+// result in that same register, and saves and restores all
+// caller-save registers.
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
+
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
extern int32_t CmplDouble(double a, double b);
@@ -60,8 +86,28 @@
extern "C" int64_t __moddi3(int64_t, int64_t);
// No read barrier entrypoints for marking registers.
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints ATTRIBUTE_UNUSED,
- bool is_marking ATTRIBUTE_UNUSED) {}
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+ qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+ qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+ qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+ qpoints->pReadBarrierMarkReg04 = is_marking ? art_quick_read_barrier_mark_reg04 : nullptr;
+ qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+ qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+ qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+ qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+ qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+ qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+ qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+ qpoints->pReadBarrierMarkReg12 = is_marking ? art_quick_read_barrier_mark_reg12 : nullptr;
+ qpoints->pReadBarrierMarkReg13 = is_marking ? art_quick_read_barrier_mark_reg13 : nullptr;
+ qpoints->pReadBarrierMarkReg17 = is_marking ? art_quick_read_barrier_mark_reg17 : nullptr;
+ qpoints->pReadBarrierMarkReg18 = is_marking ? art_quick_read_barrier_mark_reg18 : nullptr;
+ qpoints->pReadBarrierMarkReg19 = is_marking ? art_quick_read_barrier_mark_reg19 : nullptr;
+ qpoints->pReadBarrierMarkReg20 = is_marking ? art_quick_read_barrier_mark_reg20 : nullptr;
+ qpoints->pReadBarrierMarkReg21 = is_marking ? art_quick_read_barrier_mark_reg21 : nullptr;
+ qpoints->pReadBarrierMarkReg22 = is_marking ? art_quick_read_barrier_mark_reg22 : nullptr;
+ qpoints->pReadBarrierMarkReg29 = is_marking ? art_quick_read_barrier_mark_reg29 : nullptr;
+}
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
DefaultInitEntryPoints(jpoints, qpoints);
@@ -122,38 +168,20 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- // Read barriers (and these entry points in particular) are not
- // supported in the compiler on MIPS64.
+ UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
+ // Cannot use the following registers to pass arguments:
+ // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
+ // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
qpoints->pReadBarrierMarkReg00 = nullptr;
- qpoints->pReadBarrierMarkReg01 = nullptr;
- qpoints->pReadBarrierMarkReg02 = nullptr;
- qpoints->pReadBarrierMarkReg03 = nullptr;
- qpoints->pReadBarrierMarkReg04 = nullptr;
- qpoints->pReadBarrierMarkReg05 = nullptr;
- qpoints->pReadBarrierMarkReg06 = nullptr;
- qpoints->pReadBarrierMarkReg07 = nullptr;
- qpoints->pReadBarrierMarkReg08 = nullptr;
- qpoints->pReadBarrierMarkReg09 = nullptr;
- qpoints->pReadBarrierMarkReg10 = nullptr;
- qpoints->pReadBarrierMarkReg11 = nullptr;
- qpoints->pReadBarrierMarkReg12 = nullptr;
- qpoints->pReadBarrierMarkReg13 = nullptr;
qpoints->pReadBarrierMarkReg14 = nullptr;
qpoints->pReadBarrierMarkReg15 = nullptr;
qpoints->pReadBarrierMarkReg16 = nullptr;
- qpoints->pReadBarrierMarkReg17 = nullptr;
- qpoints->pReadBarrierMarkReg18 = nullptr;
- qpoints->pReadBarrierMarkReg19 = nullptr;
- qpoints->pReadBarrierMarkReg20 = nullptr;
- qpoints->pReadBarrierMarkReg21 = nullptr;
- qpoints->pReadBarrierMarkReg22 = nullptr;
qpoints->pReadBarrierMarkReg23 = nullptr;
qpoints->pReadBarrierMarkReg24 = nullptr;
qpoints->pReadBarrierMarkReg25 = nullptr;
qpoints->pReadBarrierMarkReg26 = nullptr;
qpoints->pReadBarrierMarkReg27 = nullptr;
qpoints->pReadBarrierMarkReg28 = nullptr;
- qpoints->pReadBarrierMarkReg29 = nullptr;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
index 563200f..0ba0bd4 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
@@ -20,7 +20,7 @@
namespace art {
-TEST(Mips64InstructionSetFeaturesTest, Mips64Features) {
+TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips64_features(
InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
@@ -31,4 +31,20 @@
EXPECT_EQ(mips64_features->AsBitmap(), 1U);
}
+TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromR6Variant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips64r6_features(
+ InstructionSetFeatures::FromVariant(kMips64, "mips64r6", &error_msg));
+ ASSERT_TRUE(mips64r6_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips64r6_features->GetInstructionSet(), kMips64);
+ EXPECT_TRUE(mips64r6_features->Equals(mips64r6_features.get()));
+ EXPECT_STREQ("msa", mips64r6_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips64r6_features->AsBitmap(), 1U);
+
+ std::unique_ptr<const InstructionSetFeatures> mips64_default_features(
+ InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
+ ASSERT_TRUE(mips64_default_features.get() != nullptr) << error_msg;
+ EXPECT_TRUE(mips64r6_features->Equals(mips64_default_features.get()));
+}
+
} // namespace art
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 3ee9c4a..9402232 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1818,6 +1818,13 @@
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+ .extern artInvokeObsoleteMethod
+ENTRY art_invoke_obsolete_method_stub
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ jal artInvokeObsoleteMethod # (Method* method, Thread* self)
+ move $a1, rSELF # pass Thread::Current
+END art_invoke_obsolete_method_stub
+
/*
* Routine that intercepts method calls and returns.
*/
@@ -2053,6 +2060,180 @@
#endif
END art_quick_indexof
+ /*
+ * Create a function `name` calling the ReadBarrier::Mark routine,
+ * getting its argument and returning its result through register
+ * `reg`, saving and restoring all caller-save registers.
+ */
+.macro READ_BARRIER_MARK_REG name, reg
+ENTRY \name
+ /* TODO: optimizations: mark bit, forwarding. */
+ daddiu $sp, $sp, -320
+ .cfi_adjust_cfa_offset 320
+
+ sd $ra, 312($sp)
+ .cfi_rel_offset 31, 312
+ sd $t8, 304($sp) # save t8 holding caller's gp
+ .cfi_rel_offset 24, 304
+ sd $t3, 296($sp)
+ .cfi_rel_offset 15, 296
+ sd $t2, 288($sp)
+ .cfi_rel_offset 14, 288
+ sd $t1, 280($sp)
+ .cfi_rel_offset 13, 280
+ sd $t0, 272($sp)
+ .cfi_rel_offset 12, 272
+ sd $a7, 264($sp)
+ .cfi_rel_offset 11, 264
+ sd $a6, 256($sp)
+ .cfi_rel_offset 10, 256
+ sd $a5, 248($sp)
+ .cfi_rel_offset 9, 248
+ sd $a4, 240($sp)
+ .cfi_rel_offset 8, 240
+ sd $a3, 232($sp)
+ .cfi_rel_offset 7, 232
+ sd $a2, 224($sp)
+ .cfi_rel_offset 6, 224
+ sd $a1, 216($sp)
+ .cfi_rel_offset 5, 216
+ sd $a0, 208($sp)
+ .cfi_rel_offset 4, 208
+ sd $v1, 200($sp)
+ .cfi_rel_offset 3, 200
+ sd $v0, 192($sp)
+ .cfi_rel_offset 2, 192
+
+ dla $t9, artReadBarrierMark
+
+ sdc1 $f23, 184($sp)
+ sdc1 $f22, 176($sp)
+ sdc1 $f21, 168($sp)
+ sdc1 $f20, 160($sp)
+ sdc1 $f19, 152($sp)
+ sdc1 $f18, 144($sp)
+ sdc1 $f17, 136($sp)
+ sdc1 $f16, 128($sp)
+ sdc1 $f15, 120($sp)
+ sdc1 $f14, 112($sp)
+ sdc1 $f13, 104($sp)
+ sdc1 $f12, 96($sp)
+ sdc1 $f11, 88($sp)
+ sdc1 $f10, 80($sp)
+ sdc1 $f9, 72($sp)
+ sdc1 $f8, 64($sp)
+ sdc1 $f7, 56($sp)
+ sdc1 $f6, 48($sp)
+ sdc1 $f5, 40($sp)
+ sdc1 $f4, 32($sp)
+ sdc1 $f3, 24($sp)
+ sdc1 $f2, 16($sp)
+ sdc1 $f1, 8($sp)
+
+ .ifnc \reg, $a0
+ move $a0, \reg # pass obj from `reg` in a0
+ .endif
+ jalr $t9 # v0 <- artReadBarrierMark(obj)
+ sdc1 $f0, 0($sp) # in delay slot
+
+ ld $ra, 312($sp)
+ .cfi_restore 31
+ ld $t8, 304($sp) # restore t8 holding caller's gp
+ .cfi_restore 24
+ ld $t3, 296($sp)
+ .cfi_restore 15
+ ld $t2, 288($sp)
+ .cfi_restore 14
+ ld $t1, 280($sp)
+ .cfi_restore 13
+ ld $t0, 272($sp)
+ .cfi_restore 12
+ ld $a7, 264($sp)
+ .cfi_restore 11
+ ld $a6, 256($sp)
+ .cfi_restore 10
+ ld $a5, 248($sp)
+ .cfi_restore 9
+ ld $a4, 240($sp)
+ .cfi_restore 8
+ ld $a3, 232($sp)
+ .cfi_restore 7
+ ld $a2, 224($sp)
+ .cfi_restore 6
+ ld $a1, 216($sp)
+ .cfi_restore 5
+ ld $a0, 208($sp)
+ .cfi_restore 4
+ ld $v1, 200($sp)
+ .cfi_restore 3
+
+ .ifnc \reg, $v0
+ move \reg, $v0 # `reg` <- v0
+ ld $v0, 192($sp)
+ .cfi_restore 2
+ .endif
+
+ ldc1 $f23, 184($sp)
+ ldc1 $f22, 176($sp)
+ ldc1 $f21, 168($sp)
+ ldc1 $f20, 160($sp)
+ ldc1 $f19, 152($sp)
+ ldc1 $f18, 144($sp)
+ ldc1 $f17, 136($sp)
+ ldc1 $f16, 128($sp)
+ ldc1 $f15, 120($sp)
+ ldc1 $f14, 112($sp)
+ ldc1 $f13, 104($sp)
+ ldc1 $f12, 96($sp)
+ ldc1 $f11, 88($sp)
+ ldc1 $f10, 80($sp)
+ ldc1 $f9, 72($sp)
+ ldc1 $f8, 64($sp)
+ ldc1 $f7, 56($sp)
+ ldc1 $f6, 48($sp)
+ ldc1 $f5, 40($sp)
+ ldc1 $f4, 32($sp)
+ ldc1 $f3, 24($sp)
+ ldc1 $f2, 16($sp)
+ ldc1 $f1, 8($sp)
+ ldc1 $f0, 0($sp)
+
+ .cpreturn # restore caller's gp from t8
+ jalr $zero, $ra
+ daddiu $sp, $sp, 320
+ .cfi_adjust_cfa_offset -320
+END \name
+.endm
+
+// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
+// ZERO (register 0) is reserved.
+// AT (register 1) is reserved as a temporary/scratch register.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $a4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $a5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $a6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $a7
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t2
+// T3 (register 15) is reserved as a temporary/scratch register.
+// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
+// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
+// K0, K1, GP, SP (registers 26 - 29) are reserved.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
+// RA (register 31) is reserved.
+
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/mips64/registers_mips64.cc b/runtime/arch/mips64/registers_mips64.cc
index 4959208..1ee2cdd 100644
--- a/runtime/arch/mips64/registers_mips64.cc
+++ b/runtime/arch/mips64/registers_mips64.cc
@@ -46,5 +46,14 @@
return os;
}
+std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
+ if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
+ os << "w" << static_cast<int>(rhs);
+ } else {
+ os << "VectorRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
} // namespace mips64
} // namespace art
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index 81fae72..30de2cc 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -107,6 +107,45 @@
};
std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs);
+// Values for vector registers.
+enum VectorRegister {
+ W0 = 0,
+ W1 = 1,
+ W2 = 2,
+ W3 = 3,
+ W4 = 4,
+ W5 = 5,
+ W6 = 6,
+ W7 = 7,
+ W8 = 8,
+ W9 = 9,
+ W10 = 10,
+ W11 = 11,
+ W12 = 12,
+ W13 = 13,
+ W14 = 14,
+ W15 = 15,
+ W16 = 16,
+ W17 = 17,
+ W18 = 18,
+ W19 = 19,
+ W20 = 20,
+ W21 = 21,
+ W22 = 22,
+ W23 = 23,
+ W24 = 24,
+ W25 = 25,
+ W26 = 26,
+ W27 = 27,
+ W28 = 28,
+ W29 = 29,
+ W30 = 30,
+ W31 = 31,
+ kNumberOfVectorRegisters = 32,
+ kNoVectorRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
+
} // namespace mips64
} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 5f38dc8..6c0bcc9 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1938,6 +1938,11 @@
END_FUNCTION art_quick_to_interpreter_bridge
/*
+ * Called by managed code, saves callee saves and then calls artInvokeObsoleteMethod
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
+ /*
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index e87b165..8e2acab 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1902,6 +1902,12 @@
END_FUNCTION art_quick_to_interpreter_bridge
/*
+ * Called to catch an attempt to invoke an obsolete method.
+ * RDI = method being called.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
+ /*
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 75dd981..666ed8a 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -47,6 +47,10 @@
void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::CompressedReference<mirror::Object>* GetDeclaringClassAddressWithoutBarrier() {
+ return declaring_class_.AddressWithoutBarrier();
+ }
+
uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index b47f8f0..5cf0e0f 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -32,6 +32,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array.h"
+#include "mirror/string.h"
#include "oat.h"
#include "obj_ptr-inl.h"
#include "quick/quick_method_frame_info.h"
@@ -56,8 +57,10 @@
if (!IsRuntimeMethod()) {
CHECK(result != nullptr) << this;
if (kCheckDeclaringClassState) {
- CHECK(result->IsIdxLoaded() || result->IsErroneous())
- << result->GetStatus() << " " << result->PrettyClass();
+ if (!(result->IsIdxLoaded() || result->IsErroneous())) {
+ LOG(FATAL_WITHOUT_ABORT) << "Class status: " << result->GetStatus();
+ LOG(FATAL) << result->PrettyClass();
+ }
}
} else {
CHECK(result == nullptr) << this;
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 80a8773..5a71be6 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -337,7 +337,8 @@
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
CHECK(!runtime->UseJitCompilation());
- const void* oat_quick_code = (IsNative() || !IsInvokable() || IsProxyMethod())
+ const void* oat_quick_code =
+ (IsNative() || !IsInvokable() || IsProxyMethod() || IsObsolete())
? nullptr
: GetOatMethodQuickCode(runtime->GetClassLinker()->GetImagePointerSize());
CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode())
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 2248c3b..51b6576 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -73,6 +73,10 @@
ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::CompressedReference<mirror::Object>* GetDeclaringClassAddressWithoutBarrier() {
+ return declaring_class_.AddressWithoutBarrier();
+ }
+
void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -691,7 +695,7 @@
// Pointer to JNI function registered to this method, or a function to resolve the JNI function,
// or the profiling data for non-native methods, or an ImtConflictTable, or the
- // single-implementation of an abstract method.
+ // single-implementation of an abstract/interface method.
void* data_;
// Method dispatch from quick compiled code invokes this pointer which may cause bridging into
diff --git a/runtime/backtrace_helper.h b/runtime/backtrace_helper.h
new file mode 100644
index 0000000..ace118c
--- /dev/null
+++ b/runtime/backtrace_helper.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BACKTRACE_HELPER_H_
+#define ART_RUNTIME_BACKTRACE_HELPER_H_
+
+#include <unwind.h>
+
+namespace art {
+
+// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
+class BacktraceCollector {
+ public:
+ BacktraceCollector(uintptr_t* out_frames, size_t max_depth, size_t skip_count)
+ : out_frames_(out_frames), max_depth_(max_depth), skip_count_(skip_count) {}
+
+ size_t NumFrames() const {
+ return num_frames_;
+ }
+
+ // Collect the backtrace, do not call more than once.
+ void Collect() {
+ _Unwind_Backtrace(&Callback, this);
+ }
+
+ private:
+ static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
+ auto* const state = reinterpret_cast<BacktraceCollector*>(arg);
+ const uintptr_t ip = _Unwind_GetIP(context);
+ // The first stack frame is get_backtrace itself. Skip it.
+ if (ip != 0 && state->skip_count_ > 0) {
+ --state->skip_count_;
+ return _URC_NO_REASON;
+ }
+ // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
+ state->out_frames_[state->num_frames_] = ip;
+ state->num_frames_++;
+ return state->num_frames_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
+ }
+
+ uintptr_t* const out_frames_ = nullptr;
+ size_t num_frames_ = 0u;
+ const size_t max_depth_ = 0u;
+ size_t skip_count_ = 0u;
+};
+
+// A bounded sized backtrace.
+template <size_t kMaxFrames>
+class FixedSizeBacktrace {
+ public:
+ void Collect(size_t skip_count) {
+ BacktraceCollector collector(frames_, kMaxFrames, skip_count);
+ collector.Collect();
+ num_frames_ = collector.NumFrames();
+ }
+
+ uint64_t Hash() const {
+ uint64_t hash = 9314237;
+ for (size_t i = 0; i < num_frames_; ++i) {
+ hash = hash * 2654435761 + frames_[i];
+ hash += (hash >> 13) ^ (hash << 6);
+ }
+ return hash;
+ }
+
+ private:
+ uintptr_t frames_[kMaxFrames];
+ size_t num_frames_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BACKTRACE_HELPER_H_
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 5aede38..e763e43 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -166,7 +166,7 @@
MEMORY_TOOL_MAKE_NOACCESS(ptr, size);
}
-Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
+Arena::Arena() : bytes_allocated_(0), memory_(nullptr), size_(0), next_(nullptr) {
}
class MallocArena FINAL : public Arena {
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index ca9a694..b28eb72 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -48,7 +48,8 @@
: kAdjust(0),
kInitialBucketCount(0),
name_(name),
- max_buckets_(0) {
+ max_buckets_(0),
+ sample_size_(0) {
}
template <class Value>
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index a173ac2..7a9184e 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -56,6 +56,7 @@
bool image;
bool systrace_lock_logging; // Enabled with "-verbose:sys-locks".
bool agents;
+ bool dex; // Some dex access output etc.
};
// Global log verbosity setting, initialized by InitLogging.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 038aeb3..2414b5f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -62,10 +62,11 @@
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
+ kMarkSweepMarkStackLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
- kMarkSweepMarkStackLock,
+ kTaggingLockLevel,
kTransactionLogLock,
kJniFunctionTableLock,
kJniWeakGlobalsLock,
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index d4bb56b..5394e53 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -116,7 +116,10 @@
ScopedFlock::~ScopedFlock() {
if (file_.get() != nullptr) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
- CHECK_EQ(0, flock_result);
+ if (flock_result != 0) {
+ PLOG(FATAL) << "Unable to unlock file " << file_->GetPath();
+ UNREACHABLE();
+ }
int close_result = -1;
if (file_->ReadOnlyMode()) {
close_result = file_->Close();
diff --git a/runtime/cha.cc b/runtime/cha.cc
index eaba01b..7948c29 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -210,7 +210,7 @@
}
}
-void ClassHierarchyAnalysis::CheckSingleImplementationInfo(
+void ClassHierarchyAnalysis::CheckVirtualMethodSingleImplementationInfo(
Handle<mirror::Class> klass,
ArtMethod* virtual_method,
ArtMethod* method_in_super,
@@ -290,8 +290,9 @@
// A non-abstract method overrides an abstract method.
if (method_in_super->GetSingleImplementation(pointer_size) == nullptr) {
// Abstract method_in_super has no implementation yet.
- // We need to grab cha_lock_ for further checking/updating due to possible
- // races.
+ // We need to grab cha_lock_ since there may be multiple class linking
+ // going on that can check/modify the single-implementation flag/method
+ // of method_in_super.
MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
if (!method_in_super->HasSingleImplementation()) {
return;
@@ -362,6 +363,55 @@
}
}
+void ClassHierarchyAnalysis::CheckInterfaceMethodSingleImplementationInfo(
+ Handle<mirror::Class> klass,
+ ArtMethod* interface_method,
+ ArtMethod* implementation_method,
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods,
+ PointerSize pointer_size) {
+ DCHECK(klass->IsInstantiable());
+ DCHECK(interface_method->IsAbstract() || interface_method->IsDefault());
+
+ if (!interface_method->HasSingleImplementation()) {
+ return;
+ }
+
+ if (implementation_method->IsAbstract()) {
+ // An instantiable class doesn't supply an implementation for
+ // interface_method. Invoking the interface method on the class will throw
+ // AbstractMethodError. This is an uncommon case, so we simply treat
+ // interface_method as not having single-implementation.
+ invalidated_single_impl_methods.insert(interface_method);
+ return;
+ }
+
+ // We need to grab cha_lock_ since there may be multiple class linking going
+ // on that can check/modify the single-implementation flag/method of
+ // interface_method.
+ MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
+ // Do this check again after we grab cha_lock_.
+ if (!interface_method->HasSingleImplementation()) {
+ return;
+ }
+
+ ArtMethod* single_impl = interface_method->GetSingleImplementation(pointer_size);
+ if (single_impl == nullptr) {
+ // implementation_method becomes the first implementation for
+ // interface_method.
+ interface_method->SetSingleImplementation(implementation_method, pointer_size);
+ // Keep interface_method's single-implementation status.
+ return;
+ }
+ DCHECK(!single_impl->IsAbstract());
+ if (single_impl->GetDeclaringClass() == implementation_method->GetDeclaringClass()) {
+ // Same implementation. Since implementation_method may be a copy of a default
+ // method, we need to check the declaring class for equality.
+ return;
+ }
+ // Another implementation for interface_method.
+ invalidated_single_impl_methods.insert(interface_method);
+}
+
void ClassHierarchyAnalysis::InitSingleImplementationFlag(Handle<mirror::Class> klass,
ArtMethod* method,
PointerSize pointer_size) {
@@ -382,6 +432,7 @@
// Rare case, but we do accept it (such as 800-smali/smali/b_26143249.smali).
// Do not attempt to devirtualize it.
method->SetHasSingleImplementation(false);
+ DCHECK(method->GetSingleImplementation(pointer_size) == nullptr);
} else {
// Abstract method starts with single-implementation flag set and null
// implementation method.
@@ -396,9 +447,15 @@
}
void ClassHierarchyAnalysis::UpdateAfterLoadingOf(Handle<mirror::Class> klass) {
+ PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if (klass->IsInterface()) {
+ for (ArtMethod& method : klass->GetDeclaredVirtualMethods(image_pointer_size)) {
+ DCHECK(method.IsAbstract() || method.IsDefault());
+ InitSingleImplementationFlag(klass, &method, image_pointer_size);
+ }
return;
}
+
mirror::Class* super_class = klass->GetSuperClass();
if (super_class == nullptr) {
return;
@@ -408,7 +465,6 @@
// is invalidated by linking `klass`.
std::unordered_set<ArtMethod*> invalidated_single_impl_methods;
- PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
// Do an entry-by-entry comparison of vtable contents with super's vtable.
for (int32_t i = 0; i < super_class->GetVTableLength(); ++i) {
ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
@@ -418,33 +474,59 @@
if (method->IsAbstract() && klass->IsInstantiable()) {
// An instantiable class that inherits an abstract method is treated as
// supplying an implementation that throws AbstractMethodError.
- CheckSingleImplementationInfo(klass,
- method,
- method_in_super,
- invalidated_single_impl_methods,
- image_pointer_size);
+ CheckVirtualMethodSingleImplementationInfo(klass,
+ method,
+ method_in_super,
+ invalidated_single_impl_methods,
+ image_pointer_size);
}
continue;
}
InitSingleImplementationFlag(klass, method, image_pointer_size);
- CheckSingleImplementationInfo(klass,
- method,
- method_in_super,
- invalidated_single_impl_methods,
- image_pointer_size);
+ CheckVirtualMethodSingleImplementationInfo(klass,
+ method,
+ method_in_super,
+ invalidated_single_impl_methods,
+ image_pointer_size);
}
-
// For new virtual methods that don't override.
for (int32_t i = super_class->GetVTableLength(); i < klass->GetVTableLength(); ++i) {
ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
InitSingleImplementationFlag(klass, method, image_pointer_size);
}
- Runtime* const runtime = Runtime::Current();
+ if (klass->IsInstantiable()) {
+ auto* iftable = klass->GetIfTable();
+ const size_t ifcount = klass->GetIfTableCount();
+ for (size_t i = 0; i < ifcount; ++i) {
+ mirror::Class* interface = iftable->GetInterface(i);
+ for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) {
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size);
+ mirror::PointerArray* method_array = iftable->GetMethodArray(i);
+ ArtMethod* implementation_method =
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size);
+ DCHECK(implementation_method != nullptr) << klass->PrettyClass();
+ CheckInterfaceMethodSingleImplementationInfo(klass,
+ interface_method,
+ implementation_method,
+ invalidated_single_impl_methods,
+ image_pointer_size);
+ }
+ }
+ }
+
+ InvalidateSingleImplementationMethods(invalidated_single_impl_methods);
+}
+
+void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods) {
if (!invalidated_single_impl_methods.empty()) {
+ Runtime* const runtime = Runtime::Current();
Thread *self = Thread::Current();
// Method headers for compiled code to be invalidated.
std::unordered_set<OatQuickMethodHeader*> dependent_method_headers;
+ PointerSize image_pointer_size =
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize();
{
// We do this under cha_lock_. Committing code also grabs this lock to
diff --git a/runtime/cha.h b/runtime/cha.h
index a56a752..99c49d2 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -117,11 +117,13 @@
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Check/update single-implementation info when one virtual method
+ // overrides another.
// `virtual_method` in `klass` overrides `method_in_super`.
- // This will invalidate some assumptions on single-implementation.
+ // This may invalidate some assumptions on single-implementation.
// Append methods that should have their single-implementation flag invalidated
// to `invalidated_single_impl_methods`.
- void CheckSingleImplementationInfo(
+ void CheckVirtualMethodSingleImplementationInfo(
Handle<mirror::Class> klass,
ArtMethod* virtual_method,
ArtMethod* method_in_super,
@@ -129,6 +131,23 @@
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Check/update single-implementation info when one method
+ // implements an interface method.
+ // `implementation_method` in `klass` implements `interface_method`.
+ // Append `interface_method` to `invalidated_single_impl_methods`
+ // if `interface_method` gets a new implementation.
+ void CheckInterfaceMethodSingleImplementationInfo(
+ Handle<mirror::Class> klass,
+ ArtMethod* interface_method,
+ ArtMethod* implementation_method,
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods,
+ PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void InvalidateSingleImplementationMethods(
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// For all methods in vtable slot at `verify_index` of `verify_class` and its
// superclasses, single-implementation status should be false, except if the
// method is `excluded_method`.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 746cace..4bc8e8e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2782,7 +2782,7 @@
}
CHECK(klass->IsLoaded());
- // At this point the class is loaded. Publish a ClassLoad even.
+ // At this point the class is loaded. Publish a ClassLoad event.
// Note: this may be a temporary class. It is a listener's responsibility to handle this.
Runtime::Current()->GetRuntimeCallbacks()->ClassLoad(klass);
@@ -3505,13 +3505,12 @@
return dex_cache;
}
// Failure, dump diagnostic and abort.
- std::string location(dex_file.GetLocation());
for (const DexCacheData& data : dex_caches_) {
if (DecodeDexCache(self, data) != nullptr) {
- LOG(ERROR) << "Registered dex file " << data.dex_file->GetLocation();
+ LOG(FATAL_WITHOUT_ABORT) << "Registered dex file " << data.dex_file->GetLocation();
}
}
- LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
+ LOG(FATAL) << "Failed to find DexCache for DexFile " << dex_file.GetLocation();
UNREACHABLE();
}
@@ -3733,6 +3732,12 @@
ObjPtr<mirror::Class> existing = InsertClass(descriptor, new_class.Get(), hash);
if (existing == nullptr) {
+ // We postpone ClassLoad and ClassPrepare events to this point in time to avoid
+ // duplicate events in case of races. Array classes don't really follow dedicated
+ // load and prepare, anyways.
+ Runtime::Current()->GetRuntimeCallbacks()->ClassLoad(new_class);
+ Runtime::Current()->GetRuntimeCallbacks()->ClassPrepare(new_class, new_class);
+
jit::Jit::NewTypeLoadedIfUsingJit(new_class.Get());
return new_class.Get();
}
@@ -4268,53 +4273,53 @@
jobjectArray throws) {
Thread* self = soa.Self();
StackHandleScope<10> hs(self);
- MutableHandle<mirror::Class> klass(hs.NewHandle(
+ MutableHandle<mirror::Class> temp_klass(hs.NewHandle(
AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class))));
- if (klass == nullptr) {
+ if (temp_klass == nullptr) {
CHECK(self->IsExceptionPending()); // OOME.
return nullptr;
}
- DCHECK(klass->GetClass() != nullptr);
- klass->SetObjectSize(sizeof(mirror::Proxy));
+ DCHECK(temp_klass->GetClass() != nullptr);
+ temp_klass->SetObjectSize(sizeof(mirror::Proxy));
// Set the class access flags incl. VerificationAttempted, so we do not try to set the flag on
// the methods.
- klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
- klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
- DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetName(soa.Decode<mirror::String>(name));
- klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
+ temp_klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
+ temp_klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
+ DCHECK_EQ(temp_klass->GetPrimitiveType(), Primitive::kPrimNot);
+ temp_klass->SetName(soa.Decode<mirror::String>(name));
+ temp_klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
// Object has an empty iftable, copy it for that reason.
- klass->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
- mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
- std::string descriptor(GetDescriptorForProxy(klass.Get()));
+ temp_klass->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
+ mirror::Class::SetStatus(temp_klass, mirror::Class::kStatusIdx, self);
+ std::string descriptor(GetDescriptorForProxy(temp_klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
// Needs to be before we insert the class so that the allocator field is set.
- LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
+ LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(temp_klass->GetClassLoader());
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
// table. There can't be any suspend points between inserting the
// class and setting the field arrays below.
- ObjPtr<mirror::Class> existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
+ ObjPtr<mirror::Class> existing = InsertClass(descriptor.c_str(), temp_klass.Get(), hash);
CHECK(existing == nullptr);
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
- klass->SetSFieldsPtr(sfields);
+ temp_klass->SetSFieldsPtr(sfields);
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
// our proxy, so Class.getInterfaces doesn't return the flattened set.
ArtField& interfaces_sfield = sfields->At(0);
interfaces_sfield.SetDexFieldIndex(0);
- interfaces_sfield.SetDeclaringClass(klass.Get());
+ interfaces_sfield.SetDeclaringClass(temp_klass.Get());
interfaces_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// 2. Create a static field 'throws' that holds exceptions thrown by our methods.
ArtField& throws_sfield = sfields->At(1);
throws_sfield.SetDexFieldIndex(1);
- throws_sfield.SetDeclaringClass(klass.Get());
+ throws_sfield.SetDeclaringClass(temp_klass.Get());
throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
@@ -4335,43 +4340,46 @@
self->AssertPendingOOMException();
return nullptr;
}
- klass->SetMethodsPtr(proxy_class_methods, num_direct_methods, num_virtual_methods);
+ temp_klass->SetMethodsPtr(proxy_class_methods, num_direct_methods, num_virtual_methods);
// Create the single direct method.
- CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_));
+ CreateProxyConstructor(temp_klass, temp_klass->GetDirectMethodUnchecked(0, image_pointer_size_));
// Create virtual method using specified prototypes.
// TODO These should really use the iterators.
for (size_t i = 0; i < num_virtual_methods; ++i) {
- auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
+ auto* virtual_method = temp_klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
auto* prototype = h_methods->Get(i)->GetArtMethod();
- CreateProxyMethod(klass, prototype, virtual_method);
+ CreateProxyMethod(temp_klass, prototype, virtual_method);
DCHECK(virtual_method->GetDeclaringClass() != nullptr);
DCHECK(prototype->GetDeclaringClass() != nullptr);
}
// The super class is java.lang.reflect.Proxy
- klass->SetSuperClass(GetClassRoot(kJavaLangReflectProxy));
+ temp_klass->SetSuperClass(GetClassRoot(kJavaLangReflectProxy));
// Now effectively in the loaded state.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusLoaded, self);
+ mirror::Class::SetStatus(temp_klass, mirror::Class::kStatusLoaded, self);
self->AssertNoPendingException();
- MutableHandle<mirror::Class> new_class = hs.NewHandle<mirror::Class>(nullptr);
+ // At this point the class is loaded. Publish a ClassLoad event.
+ // Note: this may be a temporary class. It is a listener's responsibility to handle this.
+ Runtime::Current()->GetRuntimeCallbacks()->ClassLoad(temp_klass);
+
+ MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
{
// Must hold lock on object when resolved.
- ObjectLock<mirror::Class> resolution_lock(self, klass);
+ ObjectLock<mirror::Class> resolution_lock(self, temp_klass);
// Link the fields and virtual methods, creating vtable and iftables.
// The new class will replace the old one in the class table.
Handle<mirror::ObjectArray<mirror::Class>> h_interfaces(
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>>(interfaces)));
- if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
+ if (!LinkClass(self, descriptor.c_str(), temp_klass, h_interfaces, &klass)) {
+ mirror::Class::SetStatus(temp_klass, mirror::Class::kStatusErrorUnresolved, self);
return nullptr;
}
}
- CHECK(klass->IsRetired());
- CHECK_NE(klass.Get(), new_class.Get());
- klass.Assign(new_class.Get());
+ CHECK(temp_klass->IsRetired());
+ CHECK_NE(temp_klass.Get(), klass.Get());
CHECK_EQ(interfaces_sfield.GetDeclaringClass(), klass.Get());
interfaces_sfield.SetObject<false>(
@@ -4382,6 +4390,8 @@
klass.Get(),
soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws));
+ Runtime::Current()->GetRuntimeCallbacks()->ClassPrepare(temp_klass, klass);
+
{
// Lock on klass is released. Lock new class object.
ObjectLock<mirror::Class> initialization_lock(self, klass);
@@ -6724,18 +6734,20 @@
ArtMethod* m = check_vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
CHECK(m != nullptr);
- CHECK_EQ(m->GetMethodIndexDuringLinking(), i)
- << m->PrettyMethod()
- << " has an unexpected method index for its spot in the vtable for class"
- << klass->PrettyClass();
+ if (m->GetMethodIndexDuringLinking() != i) {
+ LOG(WARNING) << m->PrettyMethod()
+ << " has an unexpected method index for its spot in the vtable for class"
+ << klass->PrettyClass();
+ }
ArraySlice<ArtMethod> virtuals = klass->GetVirtualMethodsSliceUnchecked(pointer_size);
auto is_same_method = [m] (const ArtMethod& meth) {
return &meth == m;
};
- CHECK((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) ||
- std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end())
- << m->PrettyMethod() << " does not seem to be owned by current class "
- << klass->PrettyClass() << " or any of its superclasses!";
+ if (!((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) ||
+ std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end())) {
+ LOG(WARNING) << m->PrettyMethod() << " does not seem to be owned by current class "
+ << klass->PrettyClass() << " or any of its superclasses!";
+ }
}
}
@@ -6763,14 +6775,15 @@
other_entry->GetAccessFlags())) {
continue;
}
- CHECK(vtable_entry != other_entry &&
- !name_comparator.HasSameNameAndSignature(
- other_entry->GetInterfaceMethodIfProxy(pointer_size)))
- << "vtable entries " << i << " and " << j << " are identical for "
- << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod() << " (0x"
- << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and "
- << other_entry->PrettyMethod() << " (0x" << std::hex
- << reinterpret_cast<uintptr_t>(other_entry) << ")";
+ if (vtable_entry == other_entry ||
+ name_comparator.HasSameNameAndSignature(
+ other_entry->GetInterfaceMethodIfProxy(pointer_size))) {
+ LOG(WARNING) << "vtable entries " << i << " and " << j << " are identical for "
+ << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod()
+ << " (0x" << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and "
+ << other_entry->PrettyMethod() << " (0x" << std::hex
+ << reinterpret_cast<uintptr_t>(other_entry) << ")";
+ }
}
}
}
@@ -7819,7 +7832,7 @@
mirror::String* ClassLinker::LookupString(const DexFile& dex_file,
dex::StringIndex string_idx,
- Handle<mirror::DexCache> dex_cache) {
+ ObjPtr<mirror::DexCache> dex_cache) {
DCHECK(dex_cache != nullptr);
ObjPtr<mirror::String> resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != nullptr) {
@@ -8505,6 +8518,15 @@
}
}
+void ClassLinker::SetEntryPointsForObsoleteMethod(ArtMethod* method) const {
+ DCHECK(method->IsObsolete());
+ // We cannot mess with the entrypoints of native methods because they are used to determine how
+ // large the method's quick stack frame is. Without this information we cannot walk the stacks.
+ if (!method->IsNative()) {
+ method->SetEntryPointFromQuickCompiledCode(GetInvokeObsoleteMethodStub());
+ }
+}
+
void ClassLinker::DumpForSigQuit(std::ostream& os) {
ScopedObjectAccess soa(Thread::Current());
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6254acb..a26e63b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -247,7 +247,7 @@
// result in the DexCache if found. Return null if not found.
mirror::String* LookupString(const DexFile& dex_file,
dex::StringIndex string_idx,
- Handle<mirror::DexCache> dex_cache)
+ ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
@@ -544,6 +544,10 @@
void SetEntryPointsToInterpreter(ArtMethod* method) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Set the entrypoints up for an obsolete method.
+ void SetEntryPointsForObsoleteMethod(ArtMethod* method) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 9f04e59..b421810 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -618,7 +618,7 @@
ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_dex_caches_), "obsoleteDexCaches");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_methods_), "obsoleteMethods");
- addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_bytes_), "originalDexFile");
+ addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_), "originalDexFile");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
}
};
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 79f5aea..430edbb 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -256,6 +256,7 @@
}
private:
+ // Only copies classes.
void CopyWithoutLocks(const ClassTable& source_table) NO_THREAD_SAFETY_ANALYSIS;
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 78ba6e7..15724a1 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -68,6 +68,74 @@
using android::base::StringPrintf;
+static const uint8_t kBase64Map[256] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
+ 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
+ 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
+ 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
+ 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255
+};
+
+uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+ CHECK(dst_size != nullptr);
+ std::vector<uint8_t> tmp;
+ uint32_t t = 0, y = 0;
+ int g = 3;
+ for (size_t i = 0; src[i] != '\0'; ++i) {
+ uint8_t c = kBase64Map[src[i] & 0xFF];
+ if (c == 255) continue;
+ // the final = symbols are read and used to trim the remaining bytes
+ if (c == 254) {
+ c = 0;
+ // prevent g < 0 which would potentially allow an overflow later
+ if (--g < 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ } else if (g != 3) {
+ // we only allow = to be at the end
+ *dst_size = 0;
+ return nullptr;
+ }
+ t = (t << 6) | c;
+ if (++y == 4) {
+ tmp.push_back((t >> 16) & 255);
+ if (g > 1) {
+ tmp.push_back((t >> 8) & 255);
+ }
+ if (g > 2) {
+ tmp.push_back(t & 255);
+ }
+ y = t = 0;
+ }
+ }
+ if (y != 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
+ *dst_size = tmp.size();
+ std::copy(tmp.begin(), tmp.end(), dst.get());
+ return dst.release();
+}
+
ScratchFile::ScratchFile() {
// ANDROID_DATA needs to be set
CHECK_NE(static_cast<char*>(nullptr), getenv("ANDROID_DATA")) <<
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index d7abe2a..bfa273d 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -44,6 +44,8 @@
class Runtime;
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+uint8_t* DecodeBase64(const char* src, size_t* dst_size);
+
class ScratchFile {
public:
ScratchFile();
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 4f4bed0..6758d75 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -313,6 +313,14 @@
ArtMethod::PrettyMethod(method).c_str()).c_str());
}
+// InternalError
+
+void ThrowInternalError(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ThrowException("Ljava/lang/InternalError;", nullptr, fmt, &args);
+ va_end(args);
+}
// IOException
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 55a8938..4afef79 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -151,6 +151,12 @@
void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+// InternalError
+
+void ThrowInternalError(const char* fmt, ...)
+ __attribute__((__format__(__printf__, 1, 2)))
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
// IOException
void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b2fba67..868d8df 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1396,7 +1396,8 @@
mirror::Class* c = m->GetDeclaringClass();
location->type_tag = GetTypeTag(c);
location->class_id = gRegistry->AddRefType(c);
- location->method_id = ToMethodId(m);
+ // The RI Seems to return 0 for all obsolete methods. For compatibility we shall do the same.
+ location->method_id = m->IsObsolete() ? 0 : ToMethodId(m);
location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
}
}
@@ -1409,6 +1410,15 @@
return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
}
+bool Dbg::IsMethodObsolete(JDWP::MethodId method_id) {
+ ArtMethod* m = FromMethodId(method_id);
+ if (m == nullptr) {
+ // NB Since we return 0 as MID for obsolete methods we want to default to true here.
+ return true;
+ }
+ return m->IsObsolete();
+}
+
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
ArtField* f = FromFieldId(field_id);
if (f == nullptr) {
@@ -3717,10 +3727,9 @@
if (!m->IsRuntimeMethod()) {
++stack_depth;
if (method == nullptr) {
- mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
+ const DexFile* dex_file = m->GetDexFile();
method = m;
- if (dex_cache != nullptr) {
- const DexFile* dex_file = dex_cache->GetDexFile();
+ if (dex_file != nullptr) {
line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc());
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index a7fd160..27124e1 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -370,6 +370,8 @@
//
static std::string GetMethodName(JDWP::MethodId method_id)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static bool IsMethodObsolete(JDWP::MethodId method_id)
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 35e9d5d..85100ae 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -23,6 +23,7 @@
#include <string.h>
#include <sys/file.h>
#include <sys/stat.h>
+#include <zlib.h>
#include <memory>
#include <sstream>
@@ -67,6 +68,12 @@
{'0', '3', '8', '\0'}
};
+uint32_t DexFile::CalculateChecksum() const {
+ const uint32_t non_sum = OFFSETOF_MEMBER(DexFile::Header, signature_);
+ const uint8_t* non_sum_ptr = Begin() + non_sum;
+ return adler32(adler32(0L, Z_NULL, 0), non_sum_ptr, Size() - non_sum);
+}
+
struct DexFile::AnnotationValue {
JValue value_;
uint8_t type_;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 58b8e79..1b18d21 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1088,6 +1088,9 @@
static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth);
static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right);
+ // Recalculates the checksum of the dex file. Does not use the current value in the header.
+ uint32_t CalculateChecksum() const;
+
// Returns a human-readable form of the method at an index.
std::string PrettyMethod(uint32_t method_idx, bool with_signature = true) const;
// Returns a human-readable form of the field at an index.
@@ -1320,6 +1323,9 @@
uint32_t NumVirtualMethods() const {
return header_.virtual_methods_size_;
}
+ bool IsAtMethod() const {
+ return pos_ >= EndOfInstanceFieldsPos();
+ }
bool HasNextStaticField() const {
return pos_ < EndOfStaticFieldsPos();
}
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 0b3f16a..11b3cd0 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -17,7 +17,6 @@
#include "dex_file_verifier.h"
#include <inttypes.h>
-#include <zlib.h>
#include <limits>
#include <memory>
@@ -368,11 +367,8 @@
return false;
}
+ uint32_t adler_checksum = dex_file_->CalculateChecksum();
// Compute and verify the checksum in the header.
- uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
- const uint32_t non_sum = sizeof(header_->magic_) + sizeof(header_->checksum_);
- const uint8_t* non_sum_ptr = reinterpret_cast<const uint8_t*>(header_) + non_sum;
- adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
if (adler_checksum != header_->checksum_) {
if (verify_checksum_) {
ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 1520e13..565b4ed 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -29,6 +29,15 @@
self->QuickDeliverException();
}
+extern "C" NO_RETURN uint64_t artInvokeObsoleteMethod(ArtMethod* method, Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(method->IsObsolete());
+ ScopedQuickEntrypointChecks sqec(self);
+ ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
+ method->PrettyMethod().c_str());
+ self->QuickDeliverException();
+}
+
// Called by generated code to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 25073a8..354ae20 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1075,7 +1075,7 @@
<< called_method.dex_file->PrettyMethod(called_method.dex_method_index);
}
} else {
- VLOG(oat) << "Accessed dex file for invoke " << invoke_type << " "
+ VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " "
<< called_method.dex_method_index;
}
} else {
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index 2842c5a..4ca52de 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -40,6 +40,12 @@
return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge);
}
+// Return the address of stub code for attempting to invoke an obsolete method.
+extern "C" void art_invoke_obsolete_method_stub(ArtMethod*);
+static inline const void* GetInvokeObsoleteMethodStub() {
+ return reinterpret_cast<const void*>(art_invoke_obsolete_method_stub);
+}
+
// Return the address of quick stub code for handling JNI calls.
extern "C" void art_quick_generic_jni_trampoline(ArtMethod*);
static inline const void* GetQuickGenericJniStub() {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 64128cc..4220250 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -38,8 +38,8 @@
}
// Signal handler called on SIGSEGV.
-static void art_fault_handler(int sig, siginfo_t* info, void* context) {
- fault_manager.HandleFault(sig, info, context);
+static bool art_fault_handler(int sig, siginfo_t* info, void* context) {
+ return fault_manager.HandleFault(sig, info, context);
}
FaultManager::FaultManager() : initialized_(false) {
@@ -49,43 +49,15 @@
FaultManager::~FaultManager() {
}
-static void SetUpArtAction(struct sigaction* action) {
- action->sa_sigaction = art_fault_handler;
- sigemptyset(&action->sa_mask);
- action->sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__APPLE__) && !defined(__mips__)
- action->sa_restorer = nullptr;
-#endif
-}
-
-void FaultManager::EnsureArtActionInFrontOfSignalChain() {
- if (initialized_) {
- struct sigaction action;
- SetUpArtAction(&action);
- EnsureFrontOfChain(SIGSEGV, &action);
- } else {
- LOG(WARNING) << "Can't call " << __FUNCTION__ << " due to unitialized fault manager";
- }
-}
-
void FaultManager::Init() {
CHECK(!initialized_);
- struct sigaction action;
- SetUpArtAction(&action);
-
- // Set our signal handler now.
- int e = sigaction(SIGSEGV, &action, &oldaction_);
- if (e != 0) {
- VLOG(signals) << "Failed to claim SEGV: " << strerror(errno);
- }
- // Make sure our signal handler is called before any user handlers.
- ClaimSignalChain(SIGSEGV, &oldaction_);
+ AddSpecialSignalHandlerFn(SIGSEGV, art_fault_handler);
initialized_ = true;
}
void FaultManager::Release() {
if (initialized_) {
- UnclaimSignalChain(SIGSEGV);
+ RemoveSpecialSignalHandlerFn(SIGSEGV, art_fault_handler);
initialized_ = false;
}
}
@@ -118,93 +90,36 @@
return false;
}
-class ScopedSignalUnblocker {
- public:
- explicit ScopedSignalUnblocker(const std::initializer_list<int>& signals) {
- sigset_t new_mask;
- sigemptyset(&new_mask);
- for (int signal : signals) {
- sigaddset(&new_mask, signal);
- }
- if (sigprocmask(SIG_UNBLOCK, &new_mask, &previous_mask_) != 0) {
- PLOG(FATAL) << "failed to unblock signals";
- }
- }
-
- ~ScopedSignalUnblocker() {
- if (sigprocmask(SIG_SETMASK, &previous_mask_, nullptr) != 0) {
- PLOG(FATAL) << "failed to unblock signals";
- }
- }
-
- private:
- sigset_t previous_mask_;
-};
-
-class ScopedHandlingSignalSetter {
- public:
- explicit ScopedHandlingSignalSetter(Thread* thread) : thread_(thread) {
- CHECK(!thread->HandlingSignal());
- thread_->SetHandlingSignal(true);
- }
-
- ~ScopedHandlingSignalSetter() {
- CHECK(thread_->HandlingSignal());
- thread_->SetHandlingSignal(false);
- }
-
- private:
- Thread* thread_;
-};
-
-void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- // BE CAREFUL ALLOCATING HERE INCLUDING USING LOG(...)
- //
- // If malloc calls abort, it will be holding its lock.
- // If the handler tries to call malloc, it will deadlock.
-
- // Use a thread local field to track whether we're recursing, and fall back.
- // (e.g.. if one of our handlers crashed)
- Thread* thread = Thread::Current();
-
- if (thread != nullptr && !thread->HandlingSignal()) {
- // Unblock some signals and set thread->handling_signal_ to true,
- // so that we can catch crashes in our signal handler.
- ScopedHandlingSignalSetter setter(thread);
- ScopedSignalUnblocker unblocker { SIGABRT, SIGBUS, SIGSEGV }; // NOLINT
-
- VLOG(signals) << "Handling fault";
+bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
+ VLOG(signals) << "Handling fault";
#ifdef TEST_NESTED_SIGNAL
- // Simulate a crash in a handler.
- raise(SIGSEGV);
+ // Simulate a crash in a handler.
+ raise(SIGSEGV);
#endif
- if (IsInGeneratedCode(info, context, true)) {
- VLOG(signals) << "in generated code, looking for handler";
- for (const auto& handler : generated_code_handlers_) {
- VLOG(signals) << "invoking Action on handler " << handler;
- if (handler->Action(sig, info, context)) {
- // We have handled a signal so it's time to return from the
- // signal handler to the appropriate place.
- return;
- }
+ if (IsInGeneratedCode(info, context, true)) {
+ VLOG(signals) << "in generated code, looking for handler";
+ for (const auto& handler : generated_code_handlers_) {
+ VLOG(signals) << "invoking Action on handler " << handler;
+ if (handler->Action(sig, info, context)) {
+ // We have handled a signal so it's time to return from the
+ // signal handler to the appropriate place.
+ return true;
}
+ }
- // We hit a signal we didn't handle. This might be something for which
- // we can give more information about so call all registered handlers to
- // see if it is.
- if (HandleFaultByOtherHandlers(sig, info, context)) {
- return;
- }
+ // We hit a signal we didn't handle. This might be something for which
+ // we can give more information about so call all registered handlers to
+ // see if it is.
+ if (HandleFaultByOtherHandlers(sig, info, context)) {
+ return true;
}
}
// Set a breakpoint in this function to catch unhandled signals.
art_sigsegv_fault();
-
- // Pass this on to the next handler in the chain, or the default if none.
- InvokeUserSignalHandler(sig, info, context);
+ return false;
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index ce59ba7..d56cf17 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -42,9 +42,9 @@
// Unclaim signals and delete registered handlers.
void Shutdown();
- void EnsureArtActionInFrontOfSignalChain();
- void HandleFault(int sig, siginfo_t* info, void* context);
+ // Try to handle a fault, returns true if successful.
+ bool HandleFault(int sig, siginfo_t* info, void* context);
// Added handlers are owned by the fault handler and will be freed on Shutdown().
void AddHandler(FaultHandler* handler, bool generated_code);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 1fa2d1a..562fc75 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -141,7 +141,7 @@
template<bool kUseTail = true>
class SlotFreeList {
public:
- SlotFreeList() : head_(0U), tail_(0), size_(0) {}
+ SlotFreeList() : head_(0U), tail_(0), size_(0), padding_(0) {}
Slot* Head() const {
return reinterpret_cast<Slot*>(head_);
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7136f10..24ba52f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -72,12 +72,19 @@
rb_mark_bit_stack_full_(false),
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
- is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
+ is_marking_(false),
+ is_active_(false),
+ is_asserting_to_space_invariant_(false),
region_space_bitmap_(nullptr),
- heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
+ heap_mark_bitmap_(nullptr),
+ live_stack_freeze_size_(0),
+ from_space_num_objects_at_first_pause_(0),
+ from_space_num_bytes_at_first_pause_(0),
+ mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
+ mark_from_read_barrier_measurements_(false),
rb_slow_path_ns_(0),
rb_slow_path_count_(0),
rb_slow_path_count_gc_(0),
@@ -87,6 +94,7 @@
rb_slow_path_count_gc_total_(0),
rb_table_(heap_->GetReadBarrierTable()),
force_evacuate_all_(false),
+ gc_grays_immune_objects_(false),
immune_gray_stack_lock_("concurrent copying immune gray stack lock",
kMarkSweepMarkStackLock) {
static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
@@ -2171,9 +2179,12 @@
fall_back_to_non_moving = true;
to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
&non_moving_space_bytes_allocated, nullptr, &dummy);
- CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed for a "
- << obj_size << " byte object in region type "
- << region_space_->GetRegionType(from_ref);
+ if (UNLIKELY(to_ref == nullptr)) {
+ LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
+ << obj_size << " byte object in region type "
+ << region_space_->GetRegionType(from_ref);
+ LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
+ }
bytes_allocated = non_moving_space_bytes_allocated;
// Mark it in the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 0039388..c61f69d 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -52,8 +52,12 @@
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
+ mark_stack_(nullptr),
space_(nullptr),
+ mark_bitmap_(nullptr),
collector_name_(name_),
+ bump_pointer_(nullptr),
+ live_objects_in_space_(0),
updating_references_(false) {}
void MarkCompact::RunPhases() {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a769748..357541f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -18,13 +18,13 @@
#include <limits>
#include <memory>
-#include <unwind.h> // For GC verification.
#include <vector>
#include "android-base/stringprintf.h"
#include "allocation_listener.h"
#include "art_field-inl.h"
+#include "backtrace_helper.h"
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
@@ -2542,6 +2542,13 @@
// to large objects.
mod_union_table->SetCards();
} else {
+ // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
+ // may be dirty cards from the zygote compaction or reference processing. These cards are not
+ // necessary to have marked since the zygote space may not refer to any objects not in the
+ // zygote or image spaces at this point.
+ mod_union_table->ProcessCards();
+ mod_union_table->ClearTable();
+
// For CC we never collect zygote large objects. This means we do not need to set the cards for
// the zygote mod-union table and we can also clear all of the existing image mod-union tables.
// The existing mod-union tables are only for image spaces and may only reference zygote and
@@ -4065,42 +4072,6 @@
}
}
-// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
-class StackCrawlState {
- public:
- StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
- : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
- }
- size_t GetFrameCount() const {
- return frame_count_;
- }
- static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
- auto* const state = reinterpret_cast<StackCrawlState*>(arg);
- const uintptr_t ip = _Unwind_GetIP(context);
- // The first stack frame is get_backtrace itself. Skip it.
- if (ip != 0 && state->skip_count_ > 0) {
- --state->skip_count_;
- return _URC_NO_REASON;
- }
- // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
- state->frames_[state->frame_count_] = ip;
- state->frame_count_++;
- return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
- }
-
- private:
- uintptr_t* const frames_;
- size_t frame_count_;
- const size_t max_depth_;
- size_t skip_count_;
-};
-
-static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
- StackCrawlState state(frames, max_depth, 0u);
- _Unwind_Backtrace(&StackCrawlState::Callback, &state);
- return state.GetFrameCount();
-}
-
void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
@@ -4109,13 +4080,9 @@
bool new_backtrace = false;
{
static constexpr size_t kMaxFrames = 16u;
- uintptr_t backtrace[kMaxFrames];
- const size_t frames = get_backtrace(backtrace, kMaxFrames);
- uint64_t hash = 0;
- for (size_t i = 0; i < frames; ++i) {
- hash = hash * 2654435761 + backtrace[i];
- hash += (hash >> 13) ^ (hash << 6);
- }
+ FixedSizeBacktrace<kMaxFrames> backtrace;
+ backtrace.Collect(/* skip_frames */ 2);
+ uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
if (new_backtrace) {
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 515a6fd..6d426c2 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -72,6 +72,11 @@
bitmap->Set(fake_end_of_heap_object);
}
+TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
+}
+
class ZygoteHeapTest : public CommonRuntimeTest {
void SetUpRuntimeOptions(RuntimeOptions* options) {
CommonRuntimeTest::SetUpRuntimeOptions(options);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 568f8d6..662efe2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -886,7 +886,7 @@
explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
template <typename T>
- T* operator()(T* obj) const {
+ T* operator()(T* obj, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
return ForwardObject(obj);
}
};
@@ -976,7 +976,8 @@
ForwardObject(obj));
}
- void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(mirror::Object* obj) const
+ NO_THREAD_SAFETY_ANALYSIS {
if (visited_->Test(obj)) {
// Already visited.
return;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 4c6b5bf..3988073 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,13 +16,12 @@
#include "large_object_space.h"
-#include <valgrind.h>
#include <memory>
-#include <memcheck/memcheck.h>
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
+#include "base/memory_tool.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "image.h"
diff --git a/runtime/imt_conflict_table.h b/runtime/imt_conflict_table.h
index fdd10fe..3586864 100644
--- a/runtime/imt_conflict_table.h
+++ b/runtime/imt_conflict_table.h
@@ -81,6 +81,14 @@
return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
}
+ void** AddressOfInterfaceMethod(size_t index, PointerSize pointer_size) {
+ return AddressOfMethod(index * kMethodCount + kMethodInterface, pointer_size);
+ }
+
+ void** AddressOfImplementationMethod(size_t index, PointerSize pointer_size) {
+ return AddressOfMethod(index * kMethodCount + kMethodImplementation, pointer_size);
+ }
+
// Return true if two conflict tables are the same.
bool Equals(ImtConflictTable* other, PointerSize pointer_size) const {
size_t num = NumEntries(pointer_size);
@@ -169,6 +177,14 @@
}
private:
+ void** AddressOfMethod(size_t index, PointerSize pointer_size) {
+ if (pointer_size == PointerSize::k64) {
+ return reinterpret_cast<void**>(&data64_[index]);
+ } else {
+ return reinterpret_cast<void**>(&data32_[index]);
+ }
+ }
+
ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
if (pointer_size == PointerSize::k64) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
diff --git a/runtime/imtable.h b/runtime/imtable.h
index b7066bd..aa0a504 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -37,9 +37,13 @@
// (non-marker) interfaces.
static constexpr size_t kSize = IMT_SIZE;
+ uint8_t* AddressOfElement(size_t index, PointerSize pointer_size) {
+ return reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ }
+
ArtMethod* Get(size_t index, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ uint8_t* ptr = AddressOfElement(index, pointer_size);
if (pointer_size == PointerSize::k32) {
uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
return reinterpret_cast<ArtMethod*>(value);
@@ -51,7 +55,7 @@
void Set(size_t index, ArtMethod* method, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ uint8_t* ptr = AddressOfElement(index, pointer_size);
if (pointer_size == PointerSize::k32) {
uintptr_t value = reinterpret_cast<uintptr_t>(method);
DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 1b3d339..bf49e84 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -232,12 +232,6 @@
kSwitchImplKind, // Switch-based interpreter implementation.
kMterpImplKind // Assembly interpreter
};
-static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
- os << ((rhs == kSwitchImplKind)
- ? "Switch-based interpreter"
- : "Asm interpreter");
- return os;
-}
static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
@@ -366,6 +360,14 @@
return;
}
+ // This can happen if we are in forced interpreter mode and an obsolete method is called using
+ // reflection.
+ if (UNLIKELY(method->IsObsolete())) {
+ ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
+ method->PrettyMethod().c_str());
+ return;
+ }
+
const char* old_cause = self->StartAssertNoThreadSuspension("EnterInterpreterFromInvoke");
const DexFile::CodeItem* code_item = method->GetCodeItem();
uint16_t num_regs;
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 0ae7307..869d430 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -14,9 +14,12 @@
* limitations under the License.
*/
-#include "interpreter/interpreter_common.h"
#include "interpreter/interpreter_intrinsics.h"
+#include "compiler/intrinsics_enum.h"
+#include "dex_instruction.h"
+#include "interpreter/interpreter_common.h"
+
namespace art {
namespace interpreter {
diff --git a/runtime/interpreter/interpreter_intrinsics.h b/runtime/interpreter/interpreter_intrinsics.h
index ae45679..2a23002 100644
--- a/runtime/interpreter/interpreter_intrinsics.h
+++ b/runtime/interpreter/interpreter_intrinsics.h
@@ -17,10 +17,14 @@
#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
#define ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
-#include "compiler/intrinsics_enum.h"
-#include "dex_instruction.h"
+#include "jvalue.h"
namespace art {
+
+class ArtMethod;
+class Instruction;
+class ShadowFrame;
+
namespace interpreter {
// Invokes to methods identified as intrinics are routed here. If there is
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index a341cdb..b93b8f2 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -39,6 +39,7 @@
#include "runtime_options.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
+#include "sigchain.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -900,7 +901,8 @@
int version = (*jni_on_load)(this, nullptr);
if (runtime_->GetTargetSdkVersion() != 0 && runtime_->GetTargetSdkVersion() <= 21) {
- fault_manager.EnsureArtActionInFrontOfSignalChain();
+ // Make sure that sigchain owns SIGSEGV.
+ EnsureFrontOfChain(SIGSEGV);
}
self->SetClassLoaderOverride(old_class_loader.get());
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index b13d565..0aa04c1 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -227,7 +227,7 @@
const int sleep_max_ms = 2*1000;
char buff[5];
- int sock = socket(PF_UNIX, SOCK_STREAM, 0);
+ int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
if (sock < 0) {
PLOG(ERROR) << "Could not create ADB control socket";
return false;
@@ -264,7 +264,7 @@
* up after a few minutes in case somebody ships an app with
* the debuggable flag set.
*/
- int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_);
+ int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_);
if (!ret) {
int control_sock = ControlSock();
#ifdef ART_TARGET_ANDROID
@@ -278,7 +278,7 @@
/* now try to send our pid to the ADB daemon */
ret = TEMP_FAILURE_RETRY(send(control_sock, buff, 4, 0));
- if (ret >= 0) {
+ if (ret == 4) {
VLOG(jdwp) << StringPrintf("PID sent as '%.*s' to ADB", 4, buff);
break;
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 964c567..971d039 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -761,12 +761,11 @@
return ERR_NONE;
}
-// Default implementation for IDEs relying on this command.
static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadRefTypeId(); // unused reference type ID
- request->ReadMethodId(); // unused method ID
- expandBufAdd1(reply, false); // a method is never obsolete.
+ MethodId id = request->ReadMethodId();
+ expandBufAdd1(reply, Dbg::IsMethodObsolete(id));
return ERR_NONE;
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 5da1ea1..4f5bebf 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -279,6 +279,10 @@
code_cache_initial_capacity_(0),
code_cache_max_capacity_(0),
compile_threshold_(0),
+ warmup_threshold_(0),
+ osr_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0),
dump_info_on_shutdown_(false) {}
DISALLOW_COPY_AND_ASSIGN(JitOptions);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fc41f94..e9a5ae5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1366,7 +1366,10 @@
MutexLock mu(self, lock_);
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
- info->IncrementInlineUse();
+ if (!info->IncrementInlineUse()) {
+ // Overflow of inlining uses, just bail.
+ return nullptr;
+ }
}
return info;
}
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index b23a863..2d1601e 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -37,8 +37,9 @@
namespace art {
const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-// Last profile version: fix the order of dex files in the profile.
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '4', '\0' };
+// Last profile version: fix profman merges. Update profile version to force
+// regeneration of possibly faulty profiles.
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '5', '\0' };
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -57,6 +58,14 @@
static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
+ProfileCompilationInfo::ProfileCompilationInfo(const ProfileCompilationInfo& pci) {
+ MergeWith(pci);
+}
+
+ProfileCompilationInfo::~ProfileCompilationInfo() {
+ ClearProfile();
+}
+
void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
const dex::TypeIndex& type_idx) {
if (is_megamorphic || is_missing_types) {
@@ -227,28 +236,21 @@
DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
AddUintToBuffer(&buffer, static_cast<uint8_t>(info_.size()));
- // Make sure we write the dex files in order of their profile index. This
+ // Dex files must be written in the order of their profile index. This
// avoids writing the index in the output file and simplifies the parsing logic.
- std::vector<const std::string*> ordered_info_location(info_.size());
- std::vector<const DexFileData*> ordered_info_data(info_.size());
- for (const auto& it : info_) {
- ordered_info_location[it.second.profile_index] = &(it.first);
- ordered_info_data[it.second.profile_index] = &(it.second);
- }
- for (size_t i = 0; i < info_.size(); i++) {
+ for (const DexFileData* dex_data_ptr : info_) {
+ const DexFileData& dex_data = *dex_data_ptr;
if (buffer.size() > kMaxSizeToKeepBeforeWriting) {
if (!WriteBuffer(fd, buffer.data(), buffer.size())) {
return false;
}
buffer.clear();
}
- const std::string& dex_location = *ordered_info_location[i];
- const DexFileData& dex_data = *ordered_info_data[i];
// Note that we allow dex files without any methods or classes, so that
// inline caches can refer valid dex files.
- if (dex_location.size() >= kMaxDexFileKeyLength) {
+ if (dex_data.profile_key.size() >= kMaxDexFileKeyLength) {
LOG(WARNING) << "DexFileKey exceeds allocated limit";
return false;
}
@@ -258,19 +260,19 @@
uint32_t methods_region_size = GetMethodsRegionSize(dex_data);
size_t required_capacity = buffer.size() +
kLineHeaderSize +
- dex_location.size() +
+ dex_data.profile_key.size() +
sizeof(uint16_t) * dex_data.class_set.size() +
methods_region_size;
buffer.reserve(required_capacity);
- DCHECK_LE(dex_location.size(), std::numeric_limits<uint16_t>::max());
+ DCHECK_LE(dex_data.profile_key.size(), std::numeric_limits<uint16_t>::max());
DCHECK_LE(dex_data.class_set.size(), std::numeric_limits<uint16_t>::max());
- AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_location.size()));
+ AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.profile_key.size()));
AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
AddUintToBuffer(&buffer, methods_region_size); // uint32_t
AddUintToBuffer(&buffer, dex_data.checksum); // uint32_t
- AddStringToBuffer(&buffer, dex_location);
+ AddStringToBuffer(&buffer, dex_data.profile_key);
for (const auto& method_it : dex_data.method_map) {
AddUintToBuffer(&buffer, method_it.first);
@@ -375,23 +377,52 @@
}
ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
- const std::string& dex_location,
+ const std::string& profile_key,
uint32_t checksum) {
- auto info_it = info_.FindOrAdd(dex_location, DexFileData(checksum, info_.size()));
- if (info_.size() > std::numeric_limits<uint8_t>::max()) {
+ const auto& profile_index_it = profile_key_map_.FindOrAdd(profile_key, profile_key_map_.size());
+ if (profile_key_map_.size() > std::numeric_limits<uint8_t>::max()) {
// Allow only 255 dex files to be profiled. This allows us to save bytes
// when encoding. The number is well above what we expect for normal applications.
if (kIsDebugBuild) {
- LOG(WARNING) << "Exceeded the maximum number of dex files (255). Something went wrong";
+ LOG(ERROR) << "Exceeded the maximum number of dex files (255). Something went wrong";
}
- info_.erase(dex_location);
+ profile_key_map_.erase(profile_key);
return nullptr;
}
- if (info_it->second.checksum != checksum) {
- LOG(WARNING) << "Checksum mismatch for dex " << dex_location;
+
+ uint8_t profile_index = profile_index_it->second;
+ if (info_.size() <= profile_index) {
+ // This is a new addition. Add it to the info_ array.
+ info_.emplace_back(new DexFileData(profile_key, checksum, profile_index));
+ }
+ DexFileData* result = info_[profile_index];
+ // DCHECK that profile info map key is consistent with the one stored in the dex file data.
+ // This should always be the case since since the cache map is managed by ProfileCompilationInfo.
+ DCHECK_EQ(profile_key, result->profile_key);
+ DCHECK_EQ(profile_index, result->profile_index);
+
+ // Check that the checksum matches.
+ // This may different if for example the dex file was updated and
+ // we had a record of the old one.
+ if (result->checksum != checksum) {
+ LOG(WARNING) << "Checksum mismatch for dex " << profile_key;
return nullptr;
}
- return &info_it->second;
+ return result;
+}
+
+const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
+ const std::string& profile_key) const {
+ const auto& profile_index_it = profile_key_map_.find(profile_key);
+ if (profile_index_it == profile_key_map_.end()) {
+ return nullptr;
+ }
+
+ uint8_t profile_index = profile_index_it->second;
+ const DexFileData* result = info_[profile_index];
+ DCHECK_EQ(profile_key, result->profile_key);
+ DCHECK_EQ(profile_index, result->profile_index);
+ return result;
}
bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& classes) {
@@ -415,9 +446,7 @@
uint32_t dex_checksum,
uint16_t method_index,
const OfflineProfileMethodInfo& pmi) {
- DexFileData* const data = GetOrAddDexFileData(
- GetProfileDexFileKey(dex_location),
- dex_checksum);
+ DexFileData* const data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location), dex_checksum);
if (data == nullptr) { // checksum mismatch
return false;
}
@@ -498,13 +527,13 @@
return true;
}
-#define READ_UINT(type, buffer, dest, error) \
- do { \
- if (!buffer.ReadUintAndAdvance<type>(&dest)) { \
- *error = "Could not read "#dest; \
- return false; \
- } \
- } \
+#define READ_UINT(type, buffer, dest, error) \
+ do { \
+ if (!(buffer).ReadUintAndAdvance<type>(&(dest))) { \
+ *(error) = "Could not read "#dest; \
+ return false; \
+ } \
+ } \
while (false)
bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
@@ -753,6 +782,8 @@
return kProfileLoadSuccess;
}
+// TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
+// return a unique pointer to a ProfileCompilationInfo upon success.
bool ProfileCompilationInfo::Load(int fd) {
std::string error;
ProfileLoadSatus status = LoadInternal(fd, &error);
@@ -770,6 +801,10 @@
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK_GE(fd, 0);
+ if (!IsEmpty()) {
+ return kProfileLoadWouldOverwiteData;
+ }
+
struct stat stat_buffer;
if (fstat(fd, &stat_buffer) != 0) {
return kProfileLoadIOError;
@@ -820,10 +855,10 @@
// the current profile info.
// Note that the number of elements should be very small, so this should not
// be a performance issue.
- for (const auto& other_it : other.info_) {
- auto info_it = info_.find(other_it.first);
- if ((info_it != info_.end()) && (info_it->second.checksum != other_it.second.checksum)) {
- LOG(WARNING) << "Checksum mismatch for dex " << other_it.first;
+ for (const DexFileData* other_dex_data : other.info_) {
+ const DexFileData* dex_data = FindDexData(other_dex_data->profile_key);
+ if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
+ LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
return false;
}
}
@@ -840,32 +875,28 @@
// First, build a mapping from other_dex_profile_index to this_dex_profile_index.
// This will make sure that the ClassReferences will point to the correct dex file.
SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
- for (const auto& other_it : other.info_) {
- const std::string& other_dex_location = other_it.first;
- uint32_t other_checksum = other_it.second.checksum;
- const DexFileData& other_dex_data = other_it.second;
- const DexFileData* dex_data = GetOrAddDexFileData(other_dex_location, other_checksum);
+ for (const DexFileData* other_dex_data : other.info_) {
+ const DexFileData* dex_data = GetOrAddDexFileData(other_dex_data->profile_key,
+ other_dex_data->checksum);
if (dex_data == nullptr) {
return false; // Could happen if we exceed the number of allowed dex files.
}
- dex_profile_index_remap.Put(other_dex_data.profile_index, dex_data->profile_index);
+ dex_profile_index_remap.Put(other_dex_data->profile_index, dex_data->profile_index);
}
// Merge the actual profile data.
- for (const auto& other_it : other.info_) {
- const std::string& other_dex_location = other_it.first;
- const DexFileData& other_dex_data = other_it.second;
- auto info_it = info_.find(other_dex_location);
- DCHECK(info_it != info_.end());
+ for (const DexFileData* other_dex_data : other.info_) {
+ DexFileData* dex_data = const_cast<DexFileData*>(FindDexData(other_dex_data->profile_key));
+ DCHECK(dex_data != nullptr);
// Merge the classes.
- info_it->second.class_set.insert(other_dex_data.class_set.begin(),
- other_dex_data.class_set.end());
+ dex_data->class_set.insert(other_dex_data->class_set.begin(),
+ other_dex_data->class_set.end());
// Merge the methods and the inline caches.
- for (const auto& other_method_it : other_dex_data.method_map) {
+ for (const auto& other_method_it : other_dex_data->method_map) {
uint16_t other_method_index = other_method_it.first;
- auto method_it = info_it->second.method_map.FindOrAdd(other_method_index);
+ auto method_it = dex_data->method_map.FindOrAdd(other_method_index);
const auto& other_inline_cache = other_method_it.second;
for (const auto& other_ic_it : other_inline_cache) {
uint16_t other_dex_pc = other_ic_it.first;
@@ -905,28 +936,18 @@
ProfileCompilationInfo::FindMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t dex_method_index) const {
- auto info_it = info_.find(GetProfileDexFileKey(dex_location));
- if (info_it != info_.end()) {
- if (!ChecksumMatch(dex_checksum, info_it->second.checksum)) {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location));
+ if (dex_data != nullptr) {
+ if (!ChecksumMatch(dex_checksum, dex_data->checksum)) {
return nullptr;
}
- const MethodMap& methods = info_it->second.method_map;
+ const MethodMap& methods = dex_data->method_map;
const auto method_it = methods.find(dex_method_index);
return method_it == methods.end() ? nullptr : &(method_it->second);
}
return nullptr;
}
-void ProfileCompilationInfo::DexFileToProfileIndex(
- /*out*/std::vector<DexReference>* dex_references) const {
- dex_references->resize(info_.size());
- for (const auto& info_it : info_) {
- DexReference& dex_ref = (*dex_references)[info_it.second.profile_index];
- dex_ref.dex_location = info_it.first;
- dex_ref.dex_checksum = info_it.second.checksum;
- }
-}
-
bool ProfileCompilationInfo::GetMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t dex_method_index,
@@ -936,7 +957,12 @@
return false;
}
- DexFileToProfileIndex(&pmi->dex_references);
+ pmi->dex_references.resize(info_.size());
+ for (const DexFileData* dex_data : info_) {
+ pmi->dex_references[dex_data->profile_index].dex_location = dex_data->profile_key;
+ pmi->dex_references[dex_data->profile_index].dex_checksum = dex_data->checksum;
+ }
+
// TODO(calin): maybe expose a direct pointer to avoid copying
pmi->inline_caches = *inline_caches;
return true;
@@ -944,12 +970,12 @@
bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
- auto info_it = info_.find(GetProfileDexFileKey(dex_file.GetLocation()));
- if (info_it != info_.end()) {
- if (!ChecksumMatch(dex_file, info_it->second.checksum)) {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_file.GetLocation()));
+ if (dex_data != nullptr) {
+ if (!ChecksumMatch(dex_file, dex_data->checksum)) {
return false;
}
- const std::set<dex::TypeIndex>& classes = info_it->second.class_set;
+ const std::set<dex::TypeIndex>& classes = dex_data->class_set;
return classes.find(type_idx) != classes.end();
}
return false;
@@ -957,16 +983,16 @@
uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
uint32_t total = 0;
- for (const auto& it : info_) {
- total += it.second.method_map.size();
+ for (const DexFileData* dex_data : info_) {
+ total += dex_data->method_map.size();
}
return total;
}
uint32_t ProfileCompilationInfo::GetNumberOfResolvedClasses() const {
uint32_t total = 0;
- for (const auto& it : info_) {
- total += it.second.class_set.size();
+ for (const DexFileData* dex_data : info_) {
+ total += dex_data->class_set.size();
}
return total;
}
@@ -999,35 +1025,27 @@
os << "ProfileInfo:";
const std::string kFirstDexFileKeySubstitute = ":classes.dex";
- // Write the entries in profile index order.
- std::vector<const std::string*> ordered_info_location(info_.size());
- std::vector<const DexFileData*> ordered_info_data(info_.size());
- for (const auto& it : info_) {
- ordered_info_location[it.second.profile_index] = &(it.first);
- ordered_info_data[it.second.profile_index] = &(it.second);
- }
- for (size_t profile_index = 0; profile_index < info_.size(); profile_index++) {
+
+ for (const DexFileData* dex_data : info_) {
os << "\n";
- const std::string& location = *ordered_info_location[profile_index];
- const DexFileData& dex_data = *ordered_info_data[profile_index];
if (print_full_dex_location) {
- os << location;
+ os << dex_data->profile_key;
} else {
// Replace the (empty) multidex suffix of the first key with a substitute for easier reading.
- std::string multidex_suffix = DexFile::GetMultiDexSuffix(location);
+ std::string multidex_suffix = DexFile::GetMultiDexSuffix(dex_data->profile_key);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
- os << " [index=" << static_cast<uint32_t>(dex_data.profile_index) << "]";
+ os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
- if (location == (*dex_files)[i]->GetLocation()) {
+ if (dex_data->profile_key == (*dex_files)[i]->GetLocation()) {
dex_file = (*dex_files)[i];
}
}
}
os << "\n\tmethods: ";
- for (const auto method_it : dex_data.method_map) {
+ for (const auto& method_it : dex_data->method_map) {
if (dex_file != nullptr) {
os << "\n\t\t" << dex_file->PrettyMethod(method_it.first, true);
} else {
@@ -1052,7 +1070,7 @@
os << "], ";
}
os << "\n\tclasses: ";
- for (const auto class_it : dex_data.class_set) {
+ for (const auto class_it : dex_data->class_set) {
if (dex_file != nullptr) {
os << "\n\t\t" << dex_file->PrettyType(class_it);
} else {
@@ -1063,41 +1081,34 @@
return os.str();
}
-void ProfileCompilationInfo::GetClassNames(
- const std::vector<std::unique_ptr<const DexFile>>* dex_files,
- std::set<std::string>* class_names) const {
- std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
- MakeNonOwningVector(dex_files));
- GetClassNames(non_owning_dex_files.get(), class_names);
-}
-
-void ProfileCompilationInfo::GetClassNames(const std::vector<const DexFile*>* dex_files,
- std::set<std::string>* class_names) const {
- if (info_.empty()) {
- return;
+bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile* dex_file,
+ std::set<dex::TypeIndex>* class_set,
+ MethodMap* method_map) const {
+ std::set<std::string> ret;
+ std::string profile_key = GetProfileDexFileKey(dex_file->GetLocation());
+ const DexFileData* dex_data = FindDexData(profile_key);
+ if (dex_data == nullptr || dex_data->checksum != dex_file->GetLocationChecksum()) {
+ return false;
}
- for (const auto& it : info_) {
- const std::string& location = it.first;
- const DexFileData& dex_data = it.second;
- const DexFile* dex_file = nullptr;
- if (dex_files != nullptr) {
- for (size_t i = 0; i < dex_files->size(); i++) {
- if (location == GetProfileDexFileKey((*dex_files)[i]->GetLocation()) &&
- dex_data.checksum == (*dex_files)[i]->GetLocationChecksum()) {
- dex_file = (*dex_files)[i];
- }
- }
- }
- for (const auto class_it : dex_data.class_set) {
- if (dex_file != nullptr) {
- class_names->insert(std::string(dex_file->PrettyType(class_it)));
- }
- }
- }
+ *method_map = dex_data->method_map;
+ *class_set = dex_data->class_set;
+ return true;
}
bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
- return info_.Equals(other.info_);
+ // No need to compare profile_key_map_. That's only a cache for fast search.
+ // All the information is already in the info_ vector.
+ if (info_.size() != other.info_.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < info_.size(); i++) {
+ const DexFileData& dex_data = *info_[i];
+ const DexFileData& other_dex_data = *other.info_[i];
+ if (!(dex_data == other_dex_data)) {
+ return false;
+ }
+ }
+ return true;
}
std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
@@ -1107,13 +1118,11 @@
key_to_location_map.emplace(GetProfileDexFileKey(location), location);
}
std::set<DexCacheResolvedClasses> ret;
- for (auto&& pair : info_) {
- const std::string& profile_key = pair.first;
- auto it = key_to_location_map.find(profile_key);
+ for (const DexFileData* dex_data : info_) {
+ const auto& it = key_to_location_map.find(dex_data->profile_key);
if (it != key_to_location_map.end()) {
- const DexFileData& data = pair.second;
- DexCacheResolvedClasses classes(it->second, it->second, data.checksum);
- classes.AddClasses(data.class_set.begin(), data.class_set.end());
+ DexCacheResolvedClasses classes(it->second, it->second, dex_data->checksum);
+ classes.AddClasses(dex_data->class_set.begin(), dex_data->class_set.end());
ret.insert(classes);
}
}
@@ -1121,8 +1130,8 @@
}
void ProfileCompilationInfo::ClearResolvedClasses() {
- for (auto& pair : info_) {
- pair.second.class_set.clear();
+ for (DexFileData* dex_data : info_) {
+ dex_data->class_set.clear();
}
}
@@ -1130,7 +1139,8 @@
bool ProfileCompilationInfo::GenerateTestProfile(int fd,
uint16_t number_of_dex_files,
uint16_t method_ratio,
- uint16_t class_ratio) {
+ uint16_t class_ratio,
+ uint32_t random_seed) {
const std::string base_dex_location = "base.apk";
ProfileCompilationInfo info;
// The limits are defined by the dex specification.
@@ -1139,7 +1149,7 @@
uint16_t number_of_methods = max_method * method_ratio / 100;
uint16_t number_of_classes = max_classes * class_ratio / 100;
- srand(MicroTime());
+ std::srand(random_seed);
// Make sure we generate more samples with a low index value.
// This makes it more likely to hit valid method/class indices in small apps.
@@ -1169,6 +1179,32 @@
return info.Save(fd);
}
+// Naive implementation to generate a random profile file suitable for testing.
+bool ProfileCompilationInfo::GenerateTestProfile(
+ int fd,
+ std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ uint32_t random_seed) {
+ std::srand(random_seed);
+ ProfileCompilationInfo info;
+ for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ const std::string& location = dex_file->GetLocation();
+ uint32_t checksum = dex_file->GetLocationChecksum();
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ // Randomly add a class from the dex file (with 50% chance).
+ if (std::rand() % 2 != 0) {
+ info.AddClassIndex(location, checksum, dex::TypeIndex(dex_file->GetClassDef(i).class_idx_));
+ }
+ }
+ for (uint32_t i = 0; i < dex_file->NumMethodIds(); ++i) {
+ // Randomly add a method from the dex file (with 50% chance).
+ if (std::rand() % 2 != 0) {
+ info.AddMethodIndex(location, checksum, i);
+ }
+ }
+ }
+ return info.Save(fd);
+}
+
bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
const OfflineProfileMethodInfo& other) const {
if (inline_caches.size() != other.inline_caches.size()) {
@@ -1210,4 +1246,17 @@
return true;
}
+void ProfileCompilationInfo::ClearProfile() {
+ for (DexFileData* dex_data : info_) {
+ delete dex_data;
+ }
+ info_.clear();
+ profile_key_map_.clear();
+}
+
+bool ProfileCompilationInfo::IsEmpty() const {
+ DCHECK_EQ(info_.empty(), profile_key_map_.empty());
+ return info_.empty();
+}
+
} // namespace art
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 6ad528c..f68ed5d 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -86,7 +86,7 @@
// A dex location together with its checksum.
struct DexReference {
- DexReference() {}
+ DexReference() : dex_checksum(0) {}
DexReference(const std::string& location, uint32_t checksum)
: dex_location(location), dex_checksum(checksum) {}
@@ -168,6 +168,9 @@
// The inline cache map: DexPc -> DexPcData.
using InlineCacheMap = SafeMap<uint16_t, DexPcData>;
+ // Maps a method dex index to its inline cache.
+ using MethodMap = SafeMap<uint16_t, InlineCacheMap>;
+
// Encodes the full set of inline caches for a given method.
// The dex_references vector is indexed according to the ClassReference::dex_profile_index.
// i.e. the dex file of any ClassReference present in the inline caches can be found at
@@ -181,11 +184,16 @@
// Public methods to create, extend or query the profile.
+ ProfileCompilationInfo() {}
+ ProfileCompilationInfo(const ProfileCompilationInfo& pci);
+ ~ProfileCompilationInfo();
+
// Add the given methods and classes to the current profile object.
bool AddMethodsAndClasses(const std::vector<ProfileMethodInfo>& methods,
const std::set<DexCacheResolvedClasses>& resolved_classes);
// Load profile information from the given file descriptor.
+ // If the current profile is non-empty the load will fail.
bool Load(int fd);
// Merge the data from another ProfileCompilationInfo into the current object.
@@ -229,11 +237,12 @@
std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location = true) const;
- void GetClassNames(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
- std::set<std::string>* class_names) const;
-
- void GetClassNames(const std::vector<const DexFile*>* dex_files,
- std::set<std::string>* class_names) const;
+ // Return the classes and methods for a given dex file through out args. The otu args are the set
+ // of class as well as the methods and their associated inline caches. Returns true if the dex
+ // file is register and has a matching checksum, false otherwise.
+ bool GetClassesAndMethods(const DexFile* dex_file,
+ std::set<dex::TypeIndex>* class_set,
+ MethodMap* method_map) const;
// Perform an equality test with the `other` profile information.
bool Equals(const ProfileCompilationInfo& other);
@@ -253,7 +262,14 @@
static bool GenerateTestProfile(int fd,
uint16_t number_of_dex_files,
uint16_t method_ratio,
- uint16_t class_ratio);
+ uint16_t class_ratio,
+ uint32_t random_seed);
+
+ // Generate a test profile which will randomly contain classes and methods from
+ // the provided list of dex files.
+ static bool GenerateTestProfile(int fd,
+ std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ uint32_t random_seed);
// Check that the given profile method info contain the same data.
static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1,
@@ -261,24 +277,29 @@
private:
enum ProfileLoadSatus {
+ kProfileLoadWouldOverwiteData,
kProfileLoadIOError,
kProfileLoadVersionMismatch,
kProfileLoadBadData,
kProfileLoadSuccess
};
- // Maps a method dex index to its inline cache.
- using MethodMap = SafeMap<uint16_t, InlineCacheMap>;
-
// Internal representation of the profile information belonging to a dex file.
+ // Note that we could do without profile_key (the key used to encode the dex
+ // file in the profile) and profile_index (the index of the dex file in the
+ // profile) fields in this struct because we can infer them from
+ // profile_key_map_ and info_. However, it makes the profiles logic much
+ // simpler if we have references here as well.
struct DexFileData {
- DexFileData(uint32_t location_checksum, uint16_t index)
- : profile_index(index), checksum(location_checksum) {}
- // The profile index of this dex file (matches ClassReference#dex_profile_index)
+ DexFileData(const std::string& key, uint32_t location_checksum, uint16_t index)
+ : profile_key(key), profile_index(index), checksum(location_checksum) {}
+ // The profile key this data belongs to.
+ std::string profile_key;
+ // The profile index of this dex file (matches ClassReference#dex_profile_index).
uint8_t profile_index;
- // The dex checksum
+ // The dex checksum.
uint32_t checksum;
- // The methonds' profile information
+ // The methonds' profile information.
MethodMap method_map;
// The classes which have been profiled. Note that these don't necessarily include
// all the classes that can be found in the inline caches reference.
@@ -289,12 +310,9 @@
}
};
- // Maps dex file to their profile information.
- using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>;
-
- // Return the profile data for the given dex location or null if the dex location
+ // Return the profile data for the given profile key or null if the dex location
// already exists but has a different checksum
- DexFileData* GetOrAddDexFileData(const std::string& dex_location, uint32_t checksum);
+ DexFileData* GetOrAddDexFileData(const std::string& profile_key, uint32_t checksum);
// Add a method index to the profile (without inline caches).
bool AddMethodIndex(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
@@ -325,6 +343,16 @@
// be the same as the profile index of the dex file (used to encode the ClassReferences).
void DexFileToProfileIndex(/*out*/std::vector<DexReference>* dex_references) const;
+ // Return the dex data associated with the given profile key or null if the profile
+ // doesn't contain the key.
+ const DexFileData* FindDexData(const std::string& profile_key) const;
+
+ // Clear all the profile data.
+ void ClearProfile();
+
+ // Checks if the profile is empty.
+ bool IsEmpty() const;
+
// Parsing functionality.
// The information present in the header of each profile line.
@@ -431,7 +459,15 @@
friend class ProfileAssistantTest;
friend class Dex2oatLayoutTest;
- DexFileToProfileInfoMap info_;
+ // Vector containing the actual profile info.
+ // The vector index is the profile index of the dex data and
+ // matched DexFileData::profile_index.
+ std::vector<DexFileData*> info_;
+
+ // Cache mapping profile keys to profile index.
+ // This is used to speed up searches since it avoids iterating
+ // over the info_ vector when searching by profile key.
+ SafeMap<const std::string, uint8_t> profile_key_map_;
};
} // namespace art
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 5cd8e8f..c9f2d0e 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -195,7 +195,7 @@
dex_pc_data.AddClass(1, dex::TypeIndex(1));
dex_pc_data.AddClass(2, dex::TypeIndex(2));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
@@ -787,4 +787,26 @@
ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
}
+TEST_F(ProfileCompilationInfoTest, LoadShouldClearExistingDataFromProfiles) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ // Save a few methods.
+ for (uint16_t i = 0; i < 10; i++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
+ }
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+ // Add a bunch of methods to test_info.
+ ProfileCompilationInfo test_info;
+ for (uint16_t i = 0; i < 10; i++) {
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &test_info));
+ }
+
+ // Attempt to load the saved profile into test_info.
+ // This should fail since the test_info already contains data and the load would overwrite it.
+ ASSERT_FALSE(test_info.Load(GetFd(profile)));
+}
} // namespace art
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index e2bd1cb..9d6cd95 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -451,9 +451,6 @@
return;
}
instance_->shutting_down_ = true;
- if (dump_info) {
- instance_->DumpInfo(LOG_STREAM(INFO));
- }
}
{
@@ -470,6 +467,9 @@
{
MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_);
+ if (dump_info) {
+ instance_->DumpInfo(LOG_STREAM(INFO));
+ }
instance_ = nullptr;
profiler_pthread_ = 0U;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 4dd8e60..be2bffc 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -52,8 +52,10 @@
REQUIRES(!Locks::profiler_lock_, !wait_lock_)
NO_THREAD_SAFETY_ANALYSIS;
- // Just for testing purpose.
+ // For testing or manual purposes (SIGUSR1).
static void ForceProcessProfiles();
+
+ // Just for testing purpose.
static bool HasSeenMethod(const std::string& profile,
const DexFile* dex_file,
uint16_t method_idx);
diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h
index 4ab2052..c8d256f 100644
--- a/runtime/jit/profile_saver_options.h
+++ b/runtime/jit/profile_saver_options.h
@@ -37,7 +37,8 @@
min_methods_to_save_(kMinMethodsToSave),
min_classes_to_save_(kMinClassesToSave),
min_notification_before_wake_(kMinNotificationBeforeWake),
- max_notification_before_wake_(kMaxNotificationBeforeWake) {}
+ max_notification_before_wake_(kMaxNotificationBeforeWake),
+ profile_path_("") {}
ProfileSaverOptions(
bool enabled,
@@ -47,7 +48,8 @@
uint32_t min_methods_to_save,
uint32_t min_classes_to_save,
uint32_t min_notification_before_wake,
- uint32_t max_notification_before_wake):
+ uint32_t max_notification_before_wake,
+ const std::string& profile_path):
enabled_(enabled),
min_save_period_ms_(min_save_period_ms),
save_resolved_classes_delay_ms_(save_resolved_classes_delay_ms),
@@ -55,7 +57,8 @@
min_methods_to_save_(min_methods_to_save),
min_classes_to_save_(min_classes_to_save),
min_notification_before_wake_(min_notification_before_wake),
- max_notification_before_wake_(max_notification_before_wake) {}
+ max_notification_before_wake_(max_notification_before_wake),
+ profile_path_(profile_path) {}
bool IsEnabled() const {
return enabled_;
@@ -85,6 +88,9 @@
uint32_t GetMaxNotificationBeforeWake() const {
return max_notification_before_wake_;
}
+ std::string GetProfilePath() const {
+ return profile_path_;
+ }
friend std::ostream & operator<<(std::ostream &os, const ProfileSaverOptions& pso) {
os << "enabled_" << pso.enabled_
@@ -106,6 +112,7 @@
uint32_t min_classes_to_save_;
uint32_t min_notification_before_wake_;
uint32_t max_notification_before_wake_;
+ std::string profile_path_;
};
} // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index f42a8da..d6881aa 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -108,9 +108,15 @@
}
}
- void IncrementInlineUse() {
- DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
+ // Increments the number of times this method is currently being inlined.
+ // Returns whether it was successful, that is it could increment without
+ // overflowing.
+ bool IncrementInlineUse() {
+ if (current_inline_uses_ == std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
current_inline_uses_++;
+ return true;
}
void DecrementInlineUse() {
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 58c5d17..bd7c4ad 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -49,10 +49,19 @@
bool GetUnboxedPrimitiveType(ObjPtr<mirror::Class> klass, Primitive::Type* type)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
-#define LOOKUP_PRIMITIVE(primitive, _, __, ___) \
- if (klass->DescriptorEquals(Primitive::BoxedDescriptor(primitive))) { \
- *type = primitive; \
- return true; \
+ std::string storage;
+ const char* descriptor = klass->GetDescriptor(&storage);
+ static const char kJavaLangPrefix[] = "Ljava/lang/";
+ static const size_t kJavaLangPrefixSize = sizeof(kJavaLangPrefix) - 1;
+ if (strncmp(descriptor, kJavaLangPrefix, kJavaLangPrefixSize) != 0) {
+ return false;
+ }
+
+ descriptor += kJavaLangPrefixSize;
+#define LOOKUP_PRIMITIVE(primitive, _, java_name, ___) \
+ if (strcmp(descriptor, #java_name ";") == 0) { \
+ *type = primitive; \
+ return true; \
}
PRIMITIVES_LIST(LOOKUP_PRIMITIVE);
@@ -141,21 +150,23 @@
if (from->DescriptorEquals("Ljava/lang/Object;")) {
// Object might be converted into a primitive during unboxing.
return true;
- } else if (Primitive::IsNumericType(to_primitive) &&
- from->DescriptorEquals("Ljava/lang/Number;")) {
+ }
+
+ if (Primitive::IsNumericType(to_primitive) && from->DescriptorEquals("Ljava/lang/Number;")) {
// Number might be unboxed into any of the number primitive types.
return true;
}
+
Primitive::Type unboxed_type;
if (GetUnboxedPrimitiveType(from, &unboxed_type)) {
if (unboxed_type == to_primitive) {
// Straightforward unboxing conversion such as Boolean => boolean.
return true;
- } else {
- // Check if widening operations for numeric primitives would work,
- // such as Byte => byte => long.
- return Primitive::IsWidenable(unboxed_type, to_primitive);
}
+
+ // Check if widening operations for numeric primitives would work,
+ // such as Byte => byte => long.
+ return Primitive::IsWidenable(unboxed_type, to_primitive);
}
}
@@ -372,25 +383,18 @@
static inline size_t GetInsForProxyOrNativeMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method->IsNative() || method->IsProxyMethod());
-
method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- size_t num_ins = 0;
- // Separate accounting for the receiver, which isn't a part of the
- // shorty.
- if (!method->IsStatic()) {
- ++num_ins;
- }
+ uint32_t shorty_length = 0;
+ const char* shorty = method->GetShorty(&shorty_length);
- uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
- for (size_t i = 1; i < shorty_len; ++i) {
- const char c = shorty[i];
- ++num_ins;
- if (c == 'J' || c == 'D') {
+ // Static methods do not include the receiver. The receiver isn't included
+ // in the shorty_length though the return value is.
+ size_t num_ins = method->IsStatic() ? shorty_length - 1 : shorty_length;
+ for (const char* c = shorty + 1; *c != '\0'; ++c) {
+ if (*c == 'J' || *c == 'D') {
++num_ins;
}
}
-
return num_ins;
}
@@ -402,7 +406,10 @@
ObjPtr<mirror::ObjectArray<mirror::Class>> param_types(callsite_type->GetPTypes());
if (param_types->GetLength() == 1) {
ObjPtr<mirror::Class> param(param_types->GetWithoutChecks(0));
- return param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame);
+ // NB Comparing descriptor here as it appears faster in cycle simulation than using:
+ // param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame)
+ // Costs are 98 vs 173 cycles per invocation.
+ return param->DescriptorEquals("Ldalvik/system/EmulatedStackFrame;");
}
return false;
@@ -416,35 +423,8 @@
ShadowFrame& shadow_frame,
const uint32_t (&args)[Instruction::kMaxVarArgRegs],
uint32_t first_arg,
- JValue* result,
- const mirror::MethodHandle::Kind handle_kind)
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // For virtual and interface methods ensure called_method points to
- // the actual method to invoke.
- if (handle_kind == mirror::MethodHandle::Kind::kInvokeVirtual ||
- handle_kind == mirror::MethodHandle::Kind::kInvokeInterface) {
- uint32_t receiver_reg = is_range ? first_arg : args[0];
- ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(receiver_reg));
- if (IsCallerTransformer(callsite_type)) {
- // The current receiver is an emulated stack frame, the method's
- // receiver needs to be fetched from there as the emulated frame
- // will be unpacked into a new frame.
- receiver = ObjPtr<mirror::EmulatedStackFrame>::DownCast(receiver)->GetReceiver();
- }
-
- ObjPtr<mirror::Class> declaring_class(called_method->GetDeclaringClass());
- if (receiver == nullptr || receiver->GetClass() != declaring_class) {
- // Verify that _vRegC is an object reference and of the type expected by
- // the receiver.
- if (!VerifyObjectIsClass(receiver, declaring_class)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- called_method = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
- called_method, kRuntimePointerSize);
- }
- }
-
// Compute method information.
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
@@ -513,17 +493,23 @@
result->SetL(0);
return false;
}
- } else if (!ConvertAndCopyArgumentsFromCallerFrame<is_range>(self,
- callsite_type,
- target_type,
- shadow_frame,
- args,
- first_arg,
- first_dest_reg,
- new_shadow_frame)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
+ } else {
+ if (!callsite_type->IsConvertible(target_type.Get())) {
+ ThrowWrongMethodTypeException(target_type.Get(), callsite_type.Get());
+ return false;
+ }
+ if (!ConvertAndCopyArgumentsFromCallerFrame<is_range>(self,
+ callsite_type,
+ target_type,
+ shadow_frame,
+ args,
+ first_arg,
+ first_dest_reg,
+ new_shadow_frame)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
}
}
}
@@ -548,13 +534,13 @@
if (ConvertReturnValue(emulated_stack_type, target_type, &local_result)) {
emulated_stack_frame->SetReturnValue(self, local_result);
return true;
- } else {
- DCHECK(self->IsExceptionPending());
- return false;
}
- } else {
- return ConvertReturnValue(callsite_type, target_type, result);
+
+ DCHECK(self->IsExceptionPending());
+ return false;
}
+
+ return ConvertReturnValue(callsite_type, target_type, result);
}
template <bool is_range>
@@ -650,98 +636,130 @@
return klass;
}
+ArtMethod* RefineTargetMethod(Thread* self,
+ ShadowFrame& shadow_frame,
+ const mirror::MethodHandle::Kind& handle_kind,
+ Handle<mirror::MethodType> handle_type,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t receiver_reg,
+ ArtMethod* target_method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (handle_kind == mirror::MethodHandle::Kind::kInvokeVirtual ||
+ handle_kind == mirror::MethodHandle::Kind::kInvokeInterface) {
+ // For virtual and interface methods ensure target_method points to
+ // the actual method to invoke.
+ ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(receiver_reg));
+ if (IsCallerTransformer(callsite_type)) {
+ // The current receiver is an emulated stack frame, the method's
+ // receiver needs to be fetched from there as the emulated frame
+ // will be unpacked into a new frame.
+ receiver = ObjPtr<mirror::EmulatedStackFrame>::DownCast(receiver)->GetReceiver();
+ }
+
+ ObjPtr<mirror::Class> declaring_class(target_method->GetDeclaringClass());
+ if (receiver == nullptr || receiver->GetClass() != declaring_class) {
+ // Verify that _vRegC is an object reference and of the type expected by
+ // the receiver.
+ if (!VerifyObjectIsClass(receiver, declaring_class)) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+ return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
+ target_method, kRuntimePointerSize);
+ }
+ } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeDirect) {
+ // String constructors are a special case, they are replaced with
+ // StringFactory methods.
+ if (target_method->IsConstructor() && target_method->GetDeclaringClass()->IsStringClass()) {
+ DCHECK(handle_type->GetRType()->IsStringClass());
+ return WellKnownClasses::StringInitToStringFactory(target_method);
+ }
+ } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeSuper) {
+ ObjPtr<mirror::Class> declaring_class = target_method->GetDeclaringClass();
+
+ // Note that we're not dynamically dispatching on the type of the receiver
+ // here. We use the static type of the "receiver" object that we've
+ // recorded in the method handle's type, which will be the same as the
+ // special caller that was specified at the point of lookup.
+ ObjPtr<mirror::Class> referrer_class = handle_type->GetPTypes()->Get(0);
+ if (!declaring_class->IsInterface()) {
+ ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
+ uint16_t vtable_index = target_method->GetMethodIndex();
+ DCHECK(super_class != nullptr);
+ DCHECK(super_class->HasVTable());
+ // Note that super_class is a super of referrer_class and target_method
+ // will always be declared by super_class (or one of its super classes).
+ DCHECK_LT(vtable_index, super_class->GetVTableLength());
+ return super_class->GetVTableEntry(vtable_index, kRuntimePointerSize);
+ } else {
+ return referrer_class->FindVirtualMethodForInterfaceSuper(target_method, kRuntimePointerSize);
+ }
+ }
+ return target_method;
+}
+
template <bool is_range>
-bool DoInvokePolymorphicUnchecked(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+bool DoInvokePolymorphicMethod(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
- if (IsInvoke(handle_kind)) {
- // Get the method we're actually invoking along with the kind of
- // invoke that is desired. We don't need to perform access checks at this
- // point because they would have been performed on our behalf at the point
- // of creation of the method handle.
- ArtMethod* called_method = method_handle->GetTargetMethod();
- CHECK(called_method != nullptr);
+ DCHECK(IsInvoke(handle_kind));
- if (handle_kind == mirror::MethodHandle::Kind::kInvokeVirtual ||
- handle_kind == mirror::MethodHandle::Kind::kInvokeInterface) {
- // TODO: Unfortunately, we have to postpone dynamic receiver based checks
- // because the receiver might be cast or might come from an emulated stack
- // frame, which means that it is unknown at this point. We perform these
- // checks inside DoCallPolymorphic right before we do the actual invoke.
- } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeDirect) {
- // String constructors are a special case, they are replaced with StringFactory
- // methods.
- if (called_method->IsConstructor() && called_method->GetDeclaringClass()->IsStringClass()) {
- DCHECK(handle_type->GetRType()->IsStringClass());
- called_method = WellKnownClasses::StringInitToStringFactory(called_method);
- }
- } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeSuper) {
- ObjPtr<mirror::Class> declaring_class = called_method->GetDeclaringClass();
+ // Get the method we're actually invoking along with the kind of
+ // invoke that is desired. We don't need to perform access checks at this
+ // point because they would have been performed on our behalf at the point
+ // of creation of the method handle.
+ ArtMethod* target_method = method_handle->GetTargetMethod();
+ uint32_t receiver_reg = is_range ? first_arg: args[0];
+ ArtMethod* called_method = RefineTargetMethod(self,
+ shadow_frame,
+ handle_kind,
+ handle_type,
+ callsite_type,
+ receiver_reg,
+ target_method);
+ if (called_method == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
- // Note that we're not dynamically dispatching on the type of the receiver
- // here. We use the static type of the "receiver" object that we've
- // recorded in the method handle's type, which will be the same as the
- // special caller that was specified at the point of lookup.
- ObjPtr<mirror::Class> referrer_class = handle_type->GetPTypes()->Get(0);
- if (!declaring_class->IsInterface()) {
- ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
- uint16_t vtable_index = called_method->GetMethodIndex();
- DCHECK(super_class != nullptr);
- DCHECK(super_class->HasVTable());
- // Note that super_class is a super of referrer_class and called_method
- // will always be declared by super_class (or one of its super classes).
- DCHECK_LT(vtable_index, super_class->GetVTableLength());
- called_method = super_class->GetVTableEntry(vtable_index, kRuntimePointerSize);
- } else {
- called_method = referrer_class->FindVirtualMethodForInterfaceSuper(
- called_method, kRuntimePointerSize);
- }
- CHECK(called_method != nullptr);
- }
- if (IsInvokeTransform(handle_kind)) {
- // There are two cases here - method handles representing regular
- // transforms and those representing call site transforms. Method
- // handles for call site transforms adapt their MethodType to match
- // the call site. For these, the |callee_type| is the same as the
- // |callsite_type|. The VarargsCollector is such a tranform, its
- // method type depends on the call site, ie. x(a) or x(a, b), or
- // x(a, b, c). The VarargsCollector invokes a variable arity method
- // with the arity arguments in an array.
- Handle<mirror::MethodType> callee_type =
- (handle_kind == mirror::MethodHandle::Kind::kInvokeCallSiteTransform) ? callsite_type
- : handle_type;
- return DoCallTransform<is_range>(called_method,
+ if (IsInvokeTransform(handle_kind)) {
+ // There are two cases here - method handles representing regular
+ // transforms and those representing call site transforms. Method
+ // handles for call site transforms adapt their MethodType to match
+ // the call site. For these, the |callee_type| is the same as the
+ // |callsite_type|. The VarargsCollector is such a tranform, its
+ // method type depends on the call site, ie. x(a) or x(a, b), or
+ // x(a, b, c). The VarargsCollector invokes a variable arity method
+ // with the arity arguments in an array.
+ Handle<mirror::MethodType> callee_type =
+ (handle_kind == mirror::MethodHandle::Kind::kInvokeCallSiteTransform) ? callsite_type
+ : handle_type;
+ return DoCallTransform<is_range>(called_method,
+ callsite_type,
+ callee_type,
+ self,
+ shadow_frame,
+ method_handle /* receiver */,
+ args,
+ first_arg,
+ result);
+ } else {
+ return DoCallPolymorphic<is_range>(called_method,
callsite_type,
- callee_type,
+ handle_type,
self,
shadow_frame,
- method_handle /* receiver */,
args,
first_arg,
result);
-
- } else {
- return DoCallPolymorphic<is_range>(called_method,
- callsite_type,
- handle_type,
- self,
- shadow_frame,
- args,
- first_arg,
- result,
- handle_kind);
- }
- } else {
- LOG(FATAL) << "Unreachable: " << handle_kind;
- UNREACHABLE();
}
}
@@ -948,55 +966,30 @@
ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
CHECK(handle_type != nullptr);
- if (!IsInvokeTransform(handle_kind)) {
- if (UNLIKELY(!IsCallerTransformer(callsite_type) &&
- !callsite_type->IsConvertible(handle_type.Ptr()))) {
+ if (IsFieldAccess(handle_kind)) {
+ DCHECK(!callsite_type->IsExactMatch(handle_type.Ptr()));
+ if (!callsite_type->IsConvertible(handle_type.Ptr())) {
ThrowWrongMethodTypeException(handle_type.Ptr(), callsite_type.Get());
return false;
}
+ const bool do_convert = true;
+ return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
+ self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
- if (IsFieldAccess(handle_kind)) {
- if (UNLIKELY(callsite_type->IsExactMatch(handle_type.Ptr()))) {
- const bool do_convert = false;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
- self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- } else {
- const bool do_convert = true;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
- self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- }
- }
-
- if (UNLIKELY(callsite_type->IsExactMatch(handle_type.Ptr()))) {
- return DoInvokePolymorphicUnchecked<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- } else {
- return DoInvokePolymorphicUnchecked<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- }
+ return DoInvokePolymorphicMethod<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
template <bool is_range>
@@ -1008,32 +1001,9 @@
uint32_t first_arg,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // We need to check the nominal type of the handle in addition to the
- // real type. The "nominal" type is present when MethodHandle.asType is
- // called any handle, and results in the declared type of the handle
- // changing.
- ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
- if (UNLIKELY(nominal_type != nullptr)) {
- if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
- ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
- return false;
- }
- return DoInvokePolymorphicNonExact<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- }
-
- ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
- if (UNLIKELY(!callsite_type->IsExactMatch(handle_type.Ptr()))) {
- ThrowWrongMethodTypeException(handle_type.Ptr(), callsite_type.Get());
- return false;
- }
-
+ StackHandleScope<1> hs(self);
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
+ Handle<mirror::MethodType> method_handle_type(hs.NewHandle(method_handle->GetMethodType()));
if (IsFieldAccess(handle_kind)) {
const bool do_convert = false;
return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
@@ -1046,13 +1016,68 @@
result);
}
- return DoInvokePolymorphicUnchecked<is_range>(self,
+ // Slow-path check.
+ if (IsInvokeTransform(handle_kind) || IsCallerTransformer(callsite_type)) {
+ return DoInvokePolymorphicMethod<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
+
+ // On the fast-path. This is equivalent to DoCallPolymoprhic without the conversion paths.
+ ArtMethod* target_method = method_handle->GetTargetMethod();
+ uint32_t receiver_reg = is_range ? first_arg : args[0];
+ ArtMethod* called_method = RefineTargetMethod(self,
shadow_frame,
- method_handle,
+ handle_kind,
+ method_handle_type,
callsite_type,
- args,
- first_arg,
- result);
+ receiver_reg,
+ target_method);
+ if (called_method == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+
+ // Compute method information.
+ const DexFile::CodeItem* code_item = called_method->GetCodeItem();
+ uint16_t num_regs;
+ size_t num_input_regs;
+ size_t first_dest_reg;
+ if (LIKELY(code_item != nullptr)) {
+ num_regs = code_item->registers_size_;
+ first_dest_reg = num_regs - code_item->ins_size_;
+ num_input_regs = code_item->ins_size_;
+ // Parameter registers go at the end of the shadow frame.
+ DCHECK_NE(first_dest_reg, (size_t)-1);
+ } else {
+ // No local regs for proxy and native methods.
+ DCHECK(called_method->IsNative() || called_method->IsProxyMethod());
+ num_regs = num_input_regs = GetInsForProxyOrNativeMethod(called_method);
+ first_dest_reg = 0;
+ }
+
+ // Allocate shadow frame on the stack.
+ const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
+ ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
+ CopyArgumentsFromCallerFrame<is_range>(shadow_frame,
+ new_shadow_frame,
+ args,
+ first_arg,
+ first_dest_reg,
+ num_input_regs);
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ PerformCall(self, code_item, shadow_frame.GetMethod(), first_dest_reg, new_shadow_frame, result);
+ if (self->IsExceptionPending()) {
+ return false;
+ }
+ return true;
}
} // namespace
@@ -1067,7 +1092,35 @@
uint32_t first_arg,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::MethodType> method_handle_type = method_handle->GetMethodType();
if (IsMethodHandleInvokeExact(invoke_method)) {
+ // We need to check the nominal type of the handle in addition to the
+ // real type. The "nominal" type is present when MethodHandle.asType is
+ // called any handle, and results in the declared type of the handle
+ // changing.
+ ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
+ if (UNLIKELY(nominal_type != nullptr)) {
+ if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
+ ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
+ return false;
+ }
+
+ if (LIKELY(!nominal_type->IsExactMatch(method_handle_type.Ptr()))) {
+ // Different nominal type means we have to treat as non-exact.
+ return DoInvokePolymorphicNonExact<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
+ }
+
+ if (!callsite_type->IsExactMatch(method_handle_type.Ptr())) {
+ ThrowWrongMethodTypeException(method_handle_type.Ptr(), callsite_type.Get());
+ return false;
+ }
return DoInvokePolymorphicExact<is_range>(self,
shadow_frame,
method_handle,
@@ -1076,6 +1129,16 @@
first_arg,
result);
} else {
+ if (UNLIKELY(callsite_type->IsExactMatch(method_handle_type.Ptr()))) {
+ // A non-exact invoke that can be invoked exactly.
+ return DoInvokePolymorphicExact<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
return DoInvokePolymorphicNonExact<is_range>(self,
shadow_frame,
method_handle,
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 16cf30f..51d9d24 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -198,6 +198,13 @@
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void** ElementAddress(size_t index, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_LT(index, static_cast<size_t>(GetLength()));
+ return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(this) +
+ Array::DataOffset(static_cast<size_t>(ptr_size)).Uint32Value() +
+ static_cast<size_t>(ptr_size) * index);
+ }
+
template<bool kTransactionActive = false, bool kUnchecked = false>
void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 003b03b..be3b937 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -1097,7 +1097,9 @@
if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
- ArtMethod* new_method = visitor(method);
+ void** dest_addr = reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) +
+ EmbeddedVTableEntryOffset(i, pointer_size).Uint32Value());
+ ArtMethod* new_method = visitor(method, dest_addr);
if (method != new_method) {
dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
}
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index 5dc3aca..32d49bb 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -40,8 +40,6 @@
void ClassExt::SetObsoleteArrays(ObjPtr<PointerArray> methods,
ObjPtr<ObjectArray<DexCache>> dex_caches) {
- DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
- << "Obsolete arrays are set without synchronization!";
CHECK_EQ(methods.IsNull(), dex_caches.IsNull());
auto obsolete_dex_cache_off = OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_dex_caches_);
auto obsolete_methods_off = OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_);
@@ -54,8 +52,7 @@
// these arrays are written into without all threads being suspended we have a race condition! This
// race could cause obsolete methods to be missed.
bool ClassExt::ExtendObsoleteArrays(Thread* self, uint32_t increase) {
- DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
- << "Obsolete arrays are set without synchronization!";
+ // TODO It would be good to check that we have locked the class associated with this ClassExt.
StackHandleScope<5> hs(self);
Handle<ClassExt> h_this(hs.NewHandle(this));
Handle<PointerArray> old_methods(hs.NewHandle(h_this->GetObsoleteMethods()));
@@ -117,9 +114,9 @@
}
}
-void ClassExt::SetOriginalDexFileBytes(ObjPtr<ByteArray> bytes) {
+void ClassExt::SetOriginalDexFile(ObjPtr<Object> bytes) {
DCHECK(!Runtime::Current()->IsActiveTransaction());
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_bytes_), bytes);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_), bytes);
}
void ClassExt::SetClass(ObjPtr<Class> dalvik_system_ClassExt) {
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index fac955a..708665d 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -60,11 +60,11 @@
OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_));
}
- ByteArray* GetOriginalDexFileBytes() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldObject<ByteArray>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_bytes_));
+ Object* GetOriginalDexFile() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldObject<Object>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_));
}
- void SetOriginalDexFileBytes(ObjPtr<ByteArray> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetOriginalDexFile(ObjPtr<Object> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
void SetObsoleteArrays(ObjPtr<PointerArray> methods, ObjPtr<ObjectArray<DexCache>> dex_caches)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -89,7 +89,7 @@
HeapReference<PointerArray> obsolete_methods_;
- HeapReference<ByteArray> original_dex_file_bytes_;
+ HeapReference<Object> original_dex_file_;
// The saved verification error of this class.
HeapReference<Object> verify_error_;
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 48a9ecd..cf570b8 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -65,7 +65,7 @@
DexCachePair(ObjPtr<T> object, uint32_t index)
: object(object),
index(index) {}
- DexCachePair() = default;
+ DexCachePair() : index(0) {}
DexCachePair(const DexCachePair<T>&) = default;
DexCachePair& operator=(const DexCachePair<T>&) = default;
@@ -426,6 +426,11 @@
NativeDexCachePair<T> pair,
PointerSize ptr_size);
+ uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
void Init(const DexFile* dex_file,
ObjPtr<String> location,
@@ -457,11 +462,6 @@
using ConversionPair32 = ConversionPair<uint32_t>;
using ConversionPair64 = ConversionPair<uint64_t>;
- uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
-
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 8e591e4..811f1ea 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -187,10 +187,12 @@
uint32_t rb_state = lw.ReadBarrierState();
return rb_state;
#else
- // mips/mips64
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(fake_address_dependency);
+ // MIPS32/MIPS64: use a memory barrier to prevent load-load reordering.
+ LockWord lw = GetLockWord(false);
+ *fake_address_dependency = 0;
+ std::atomic_thread_fence(std::memory_order_acquire);
+ uint32_t rb_state = lw.ReadBarrierState();
+ return rb_state;
#endif
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 1fa4682..e80d31c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -354,7 +354,7 @@
// For use only by the JDWP implementation.
class MonitorInfo {
public:
- MonitorInfo() = default;
+ MonitorInfo() : owner_(nullptr), entry_count_(0) {}
MonitorInfo(const MonitorInfo&) = default;
MonitorInfo& operator=(const MonitorInfo&) = default;
explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index d81c13d..11f8505 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -17,6 +17,8 @@
#include "dalvik_system_VMRuntime.h"
#ifdef ART_TARGET_ANDROID
+#include <sys/time.h>
+#include <sys/resource.h>
extern "C" void android_set_application_target_sdk_version(uint32_t version);
#endif
#include <limits.h>
@@ -449,8 +451,12 @@
Thread* const self = Thread::Current();
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
CHECK(dex_file != nullptr);
+ // In fallback mode, not all boot classpath components might be registered, yet.
+ if (!class_linker->IsDexFileRegistered(self, *dex_file)) {
+ continue;
+ }
ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file);
- CHECK(dex_cache != nullptr); // Boot class path dex caches are never unloaded.
+ DCHECK(dex_cache != nullptr); // Boot class path dex caches are never unloaded.
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(j));
if (string != nullptr) {
@@ -625,6 +631,23 @@
return Runtime::Current()->GetPrunedDalvikCache() ? JNI_TRUE : JNI_FALSE;
}
+static void VMRuntime_setSystemDaemonThreadPriority(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+#ifdef ART_TARGET_ANDROID
+ Thread* self = Thread::Current();
+ DCHECK(self != nullptr);
+ pid_t tid = self->GetTid();
+ // We use a priority lower than the default for the system daemon threads (eg HeapTaskDaemon) to
+ // avoid jank due to CPU contentions between GC and other UI-related threads. b/36631902.
+ // We may use a native priority that doesn't have a corresponding java.lang.Thread-level priority.
+ static constexpr int kSystemDaemonNiceValue = 4; // priority 124
+ if (setpriority(PRIO_PROCESS, tid, kSystemDaemonNiceValue) != 0) {
+ PLOG(INFO) << *self << " setpriority(PRIO_PROCESS, " << tid << ", "
+ << kSystemDaemonNiceValue << ") failed";
+ }
+#endif
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -662,6 +685,7 @@
NATIVE_METHOD(VMRuntime, isBootClassPathOnDisk, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(VMRuntime, getCurrentInstructionSet, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, didPruneDalvikCache, "()Z"),
+ NATIVE_METHOD(VMRuntime, setSystemDaemonThreadPriority, "()V"),
};
void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index c58854b..d77cfa1 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -118,7 +118,7 @@
for (int signal = 0; signal < _NSIG; ++signal) {
android::NativeBridgeSignalHandlerFn fn = android::NativeBridgeGetSignalHandler(signal);
if (fn != nullptr) {
- SetSpecialSignalHandlerFn(signal, fn);
+ AddSpecialSignalHandlerFn(signal, fn);
}
}
#endif
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 7460d62..cbc5024 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -105,7 +105,7 @@
if (pid == -1) {
close(caller_to_addr2line[0]);
close(caller_to_addr2line[1]);
- close(addr2line_to_caller[1]);
+ close(addr2line_to_caller[0]);
close(addr2line_to_caller[1]);
return nullptr;
}
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index f72a853..71c6a82 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -31,7 +31,8 @@
n(n_in),
include_runtime_and_upcalls_(include_runtime_and_upcalls),
count(0),
- caller(nullptr) {}
+ caller(nullptr),
+ caller_pc(0) {}
bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
diff --git a/runtime/oat.h b/runtime/oat.h
index 190d533..faa0129 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '1', '6', '\0' }; // Add method infos.
+ static constexpr uint8_t kOatVersion[] = { '1', '1', '7', '\0' }; // Read barriers on MIPS.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 1735045..db6f8ee 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -640,15 +640,8 @@
std::string dir = location.substr(0, pos+1);
dir += "oat/" + std::string(GetInstructionSetString(isa));
- // Find the file portion of the dex location.
- std::string file;
- if (pos == std::string::npos) {
- file = location;
- } else {
- file = location.substr(pos+1);
- }
-
// Get the base part of the file without the extension.
+ std::string file = location.substr(pos+1);
pos = file.rfind('.');
if (pos == std::string::npos) {
*error_msg = "Dex location " + location + " has no extension.";
@@ -725,44 +718,23 @@
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
-// TODO: Use something better than xor for the combined image checksum.
std::unique_ptr<OatFileAssistant::ImageInfo>
OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
CHECK(error_msg != nullptr);
- // Use the currently loaded image to determine the image locations for all
- // the image spaces, regardless of the isa requested. Otherwise we would
- // need to read from the boot image's oat file to determine the rest of the
- // image locations in the case of multi-image.
Runtime* runtime = Runtime::Current();
- std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
- if (image_spaces.empty()) {
- *error_msg = "There are no boot image spaces";
+ std::unique_ptr<ImageInfo> info(new ImageInfo());
+ info->location = runtime->GetImageLocation();
+
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
return nullptr;
}
- std::unique_ptr<ImageInfo> info(new ImageInfo());
- info->location = image_spaces[0]->GetImageLocation();
-
- // TODO: Special casing on isa == kRuntimeISA is presumably motivated by
- // performance: 'it's faster to use an already loaded image header than read
- // the image header from disk'. But the loaded image is not necessarily the
- // same as kRuntimeISA, so this behavior is suspect (b/35659889).
- if (isa == kRuntimeISA) {
- const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
- info->oat_checksum = image_header.GetOatChecksum();
- info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
- info->patch_delta = image_header.GetPatchDelta();
- } else {
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
- if (image_header == nullptr) {
- return nullptr;
- }
- info->oat_checksum = image_header->GetOatChecksum();
- info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
- info->patch_delta = image_header->GetPatchDelta();
- }
+ info->oat_checksum = image_header->GetOatChecksum();
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ info->patch_delta = image_header->GetPatchDelta();
return info;
}
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index d61e994..b84e711 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -369,7 +369,7 @@
std::unique_ptr<OatFile> file_;
bool status_attempted_ = false;
- OatStatus status_;
+ OatStatus status_ = OatStatus::kOatCannotOpen;
// For debugging only.
// If this flag is set, the file has been released to the user and the
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index d04dbbe..a950980 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -482,6 +482,8 @@
}
// Now drain the queue.
+ bool has_duplicates = false;
+ error_msg->clear();
while (!queue.empty()) {
// Modifying the top element is only safe if we pop right after.
DexFileAndClassPair compare_pop(queue.top());
@@ -493,12 +495,15 @@
if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
// Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
- *error_msg =
- StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
+ error_msg->append(
+ StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s\n",
compare_pop.GetCachedDescriptor(),
compare_pop.GetDexFile()->GetLocation().c_str(),
- top.GetDexFile()->GetLocation().c_str());
- return true;
+ top.GetDexFile()->GetLocation().c_str()));
+ if (!VLOG_IS_ON(oat)) {
+ return true;
+ }
+ has_duplicates = true;
}
queue.pop();
AddNext(top, queue);
@@ -510,7 +515,7 @@
AddNext(compare_pop, queue);
}
- return false;
+ return has_duplicates;
}
// Check for class-def collisions in dex files.
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index dd49ad0..e38f265 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -24,6 +24,7 @@
defaults: ["art_defaults"],
host_supported: true,
srcs: ["events.cc",
+ "fixed_up_dex_file.cc",
"object_tagging.cc",
"OpenjdkJvmTi.cc",
"ti_class.cc",
@@ -56,7 +57,10 @@
art_cc_library {
name: "libopenjdkjvmti",
defaults: ["libopenjdkjvmti_defaults"],
- shared_libs: ["libart"],
+ shared_libs: [
+ "libart",
+ "libart-compiler",
+ ],
}
art_cc_library {
@@ -65,5 +69,8 @@
"art_debug_defaults",
"libopenjdkjvmti_defaults",
],
- shared_libs: ["libartd"],
+ shared_libs: [
+ "libartd",
+ "libartd-compiler",
+ ],
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 5401e5c..39e603e 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -79,20 +79,26 @@
class JvmtiFunctions {
private:
- static bool IsValidEnv(jvmtiEnv* env) {
- return env != nullptr;
+ static jvmtiError getEnvironmentError(jvmtiEnv* env) {
+ if (env == nullptr) {
+ return ERR(INVALID_ENVIRONMENT);
+ } else if (art::Thread::Current() == nullptr) {
+ return ERR(UNATTACHED_THREAD);
+ } else {
+ return OK;
+ }
}
-#define ENSURE_VALID_ENV(env) \
- do { \
- if (!IsValidEnv(env)) { \
- return ERR(INVALID_ENVIRONMENT); \
- } \
+#define ENSURE_VALID_ENV(env) \
+ do { \
+ jvmtiError ensure_valid_env_ ## __LINE__ = getEnvironmentError(env); \
+ if (ensure_valid_env_ ## __LINE__ != OK) { \
+ return ensure_valid_env_ ## __LINE__ ; \
+ } \
} while (false)
#define ENSURE_HAS_CAP(env, cap) \
do { \
- ENSURE_VALID_ENV(env); \
if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.cap != 1) { \
return ERR(MUST_POSSESS_CAPABILITY); \
} \
@@ -121,18 +127,22 @@
}
static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadState(env, thread, thread_state_ptr);
}
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetCurrentThread(env, thread_ptr);
}
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetAllThreads(env, threads_count_ptr, threads_ptr);
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -141,11 +151,13 @@
jint request_count ATTRIBUTE_UNUSED,
const jthread* request_list ATTRIBUTE_UNUSED,
jvmtiError* results ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -154,6 +166,7 @@
jint request_count ATTRIBUTE_UNUSED,
const jthread* request_list ATTRIBUTE_UNUSED,
jvmtiError* results ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -161,16 +174,19 @@
static jvmtiError StopThread(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject exception ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadInfo(env, thread, info_ptr);
}
@@ -178,6 +194,7 @@
jthread thread ATTRIBUTE_UNUSED,
jint* owned_monitor_count_ptr ATTRIBUTE_UNUSED,
jobject** owned_monitors_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_owned_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -187,6 +204,7 @@
jthread thread ATTRIBUTE_UNUSED,
jint* monitor_info_count_ptr ATTRIBUTE_UNUSED,
jvmtiMonitorStackDepthInfo** monitor_info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_owned_monitor_stack_depth_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -194,6 +212,7 @@
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject* monitor_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_contended_monitor);
return ERR(NOT_IMPLEMENTED);
}
@@ -203,26 +222,31 @@
jvmtiStartFunction proc,
const void* arg,
jint priority) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::RunAgentThread(env, thread, proc, arg, priority);
}
static jvmtiError SetThreadLocalStorage(jvmtiEnv* env, jthread thread, const void* data) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::SetThreadLocalStorage(env, thread, data);
}
static jvmtiError GetThreadLocalStorage(jvmtiEnv* env, jthread thread, void** data_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadLocalStorage(env, thread, data_ptr);
}
static jvmtiError GetTopThreadGroups(jvmtiEnv* env,
jint* group_count_ptr,
jthreadGroup** groups_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetTopThreadGroups(env, group_count_ptr, groups_ptr);
}
static jvmtiError GetThreadGroupInfo(jvmtiEnv* env,
jthreadGroup group,
jvmtiThreadGroupInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetThreadGroupInfo(env, group, info_ptr);
}
@@ -232,6 +256,7 @@
jthread** threads_ptr,
jint* group_count_ptr,
jthreadGroup** groups_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetThreadGroupChildren(env,
group,
thread_count_ptr,
@@ -246,6 +271,7 @@
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetStackTrace(env,
thread,
start_depth,
@@ -258,6 +284,7 @@
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
jint* thread_count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetAllStackTraces(env, max_frame_count, stack_info_ptr, thread_count_ptr);
}
@@ -266,6 +293,7 @@
const jthread* thread_list,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetThreadListStackTraces(env,
thread_count,
thread_list,
@@ -274,10 +302,12 @@
}
static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetFrameCount(env, thread, count_ptr);
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_pop_frame);
return ERR(NOT_IMPLEMENTED);
}
@@ -287,12 +317,14 @@
jint depth,
jmethodID* method_ptr,
jlocation* location_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetFrameLocation(env, thread, depth, method_ptr, location_ptr);
}
static jvmtiError NotifyFramePop(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jint depth ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_frame_pop_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -300,6 +332,7 @@
static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -307,6 +340,7 @@
static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jint value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -314,6 +348,7 @@
static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jlong value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -321,6 +356,7 @@
static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jfloat value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -328,11 +364,13 @@
static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jdouble value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -343,6 +381,7 @@
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.FollowReferences(env,
@@ -358,12 +397,14 @@
jclass klass,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
static jvmtiError GetTag(jvmtiEnv* env, jobject object, jlong* tag_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
@@ -381,6 +422,7 @@
}
static jvmtiError SetTag(jvmtiEnv* env, jobject object, jlong tag) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
if (object == nullptr) {
@@ -405,6 +447,7 @@
jint* count_ptr,
jobject** object_result_ptr,
jlong** tag_result_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
@@ -422,6 +465,7 @@
}
static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
+ ENSURE_VALID_ENV(env);
return HeapUtil::ForceGarbageCollection(env);
}
@@ -430,6 +474,7 @@
jobject object ATTRIBUTE_UNUSED,
jvmtiObjectReferenceCallback object_reference_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -440,6 +485,7 @@
jvmtiStackReferenceCallback stack_ref_callback ATTRIBUTE_UNUSED,
jvmtiObjectReferenceCallback object_ref_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -448,6 +494,7 @@
jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -458,6 +505,7 @@
jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -467,6 +515,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jobject* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -475,6 +524,7 @@
jthread thread ATTRIBUTE_UNUSED,
jint depth ATTRIBUTE_UNUSED,
jobject* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -484,6 +534,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jint* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -493,6 +544,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jlong* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -502,6 +554,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jfloat* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -511,6 +564,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jdouble* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -520,6 +574,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jobject value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -529,6 +584,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jint value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -538,6 +594,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jlong value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -547,6 +604,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jfloat value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -556,6 +614,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jdouble value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -563,6 +622,7 @@
static jvmtiError SetBreakpoint(jvmtiEnv* env,
jmethodID method ATTRIBUTE_UNUSED,
jlocation location ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -570,6 +630,7 @@
static jvmtiError ClearBreakpoint(jvmtiEnv* env,
jmethodID method ATTRIBUTE_UNUSED,
jlocation location ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -577,6 +638,7 @@
static jvmtiError SetFieldAccessWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -584,6 +646,7 @@
static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -591,6 +654,7 @@
static jvmtiError SetFieldModificationWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -598,11 +662,13 @@
static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
+ ENSURE_VALID_ENV(env);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
@@ -611,6 +677,7 @@
jobject initiating_loader,
jint* class_count_ptr,
jclass** classes_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassLoaderClasses(env, initiating_loader, class_count_ptr, classes_ptr);
}
@@ -618,21 +685,25 @@
jclass klass,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassSignature(env, klass, signature_ptr, generic_ptr);
}
static jvmtiError GetClassStatus(jvmtiEnv* env, jclass klass, jint* status_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassStatus(env, klass, status_ptr);
}
static jvmtiError GetSourceFileName(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
char** source_name_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_file_name);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetClassModifiers(jvmtiEnv* env, jclass klass, jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassModifiers(env, klass, modifiers_ptr);
}
@@ -640,6 +711,7 @@
jclass klass,
jint* method_count_ptr,
jmethodID** methods_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassMethods(env, klass, method_count_ptr, methods_ptr);
}
@@ -647,6 +719,7 @@
jclass klass,
jint* field_count_ptr,
jfieldID** fields_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassFields(env, klass, field_count_ptr, fields_ptr);
}
@@ -654,6 +727,7 @@
jclass klass,
jint* interface_count_ptr,
jclass** interfaces_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetImplementedInterfaces(env, klass, interface_count_ptr, interfaces_ptr);
}
@@ -661,6 +735,7 @@
jclass klass,
jint* minor_version_ptr,
jint* major_version_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassVersionNumbers(env, klass, minor_version_ptr, major_version_ptr);
}
@@ -669,38 +744,45 @@
jint* constant_pool_count_ptr ATTRIBUTE_UNUSED,
jint* constant_pool_byte_count_ptr ATTRIBUTE_UNUSED,
unsigned char** constant_pool_bytes_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_constant_pool);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IsInterface(jvmtiEnv* env, jclass klass, jboolean* is_interface_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::IsInterface(env, klass, is_interface_ptr);
}
static jvmtiError IsArrayClass(jvmtiEnv* env,
jclass klass,
jboolean* is_array_class_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::IsArrayClass(env, klass, is_array_class_ptr);
}
static jvmtiError IsModifiableClass(jvmtiEnv* env,
jclass klass,
jboolean* is_modifiable_class_ptr) {
+ ENSURE_VALID_ENV(env);
return Redefiner::IsModifiableClass(env, klass, is_modifiable_class_ptr);
}
static jvmtiError GetClassLoader(jvmtiEnv* env, jclass klass, jobject* classloader_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassLoader(env, klass, classloader_ptr);
}
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
char** source_debug_extension_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_debug_extension);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
@@ -719,6 +801,7 @@
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
@@ -735,16 +818,19 @@
}
static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) {
+ ENSURE_VALID_ENV(env);
return ObjectUtil::GetObjectSize(env, object, size_ptr);
}
static jvmtiError GetObjectHashCode(jvmtiEnv* env, jobject object, jint* hash_code_ptr) {
+ ENSURE_VALID_ENV(env);
return ObjectUtil::GetObjectHashCode(env, object, hash_code_ptr);
}
static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env,
jobject object ATTRIBUTE_UNUSED,
jvmtiMonitorUsage* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -755,6 +841,7 @@
char** name_ptr,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldName(env, klass, field, name_ptr, signature_ptr, generic_ptr);
}
@@ -762,6 +849,7 @@
jclass klass,
jfieldID field,
jclass* declaring_class_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldDeclaringClass(env, klass, field, declaring_class_ptr);
}
@@ -769,6 +857,7 @@
jclass klass,
jfieldID field,
jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldModifiers(env, klass, field, modifiers_ptr);
}
@@ -776,6 +865,7 @@
jclass klass,
jfieldID field,
jboolean* is_synthetic_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return FieldUtil::IsFieldSynthetic(env, klass, field, is_synthetic_ptr);
}
@@ -785,30 +875,35 @@
char** name_ptr,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodName(env, method, name_ptr, signature_ptr, generic_ptr);
}
static jvmtiError GetMethodDeclaringClass(jvmtiEnv* env,
jmethodID method,
jclass* declaring_class_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodDeclaringClass(env, method, declaring_class_ptr);
}
static jvmtiError GetMethodModifiers(jvmtiEnv* env,
jmethodID method,
jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodModifiers(env, method, modifiers_ptr);
}
static jvmtiError GetMaxLocals(jvmtiEnv* env,
jmethodID method,
jint* max_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMaxLocals(env, method, max_ptr);
}
static jvmtiError GetArgumentsSize(jvmtiEnv* env,
jmethodID method,
jint* size_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetArgumentsSize(env, method, size_ptr);
}
@@ -816,6 +911,7 @@
jmethodID method,
jint* entry_count_ptr,
jvmtiLineNumberEntry** table_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_line_numbers);
return MethodUtil::GetLineNumberTable(env, method, entry_count_ptr, table_ptr);
}
@@ -824,6 +920,7 @@
jmethodID method,
jlocation* start_location_ptr,
jlocation* end_location_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodLocation(env, method, start_location_ptr, end_location_ptr);
}
@@ -831,6 +928,7 @@
jmethodID method ATTRIBUTE_UNUSED,
jint* entry_count_ptr ATTRIBUTE_UNUSED,
jvmtiLocalVariableEntry** table_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -839,24 +937,29 @@
jmethodID method ATTRIBUTE_UNUSED,
jint* bytecode_count_ptr ATTRIBUTE_UNUSED,
unsigned char** bytecodes_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_bytecodes);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IsMethodNative(jvmtiEnv* env, jmethodID method, jboolean* is_native_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::IsMethodNative(env, method, is_native_ptr);
}
static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return MethodUtil::IsMethodSynthetic(env, method, is_synthetic_ptr);
}
static jvmtiError IsMethodObsolete(jvmtiEnv* env, jmethodID method, jboolean* is_obsolete_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::IsMethodObsolete(env, method, is_obsolete_ptr);
}
static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
@@ -864,43 +967,53 @@
static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env,
jint prefix_count ATTRIBUTE_UNUSED,
char** prefixes ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError CreateRawMonitor(jvmtiEnv* env, const char* name, jrawMonitorID* monitor_ptr) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::CreateRawMonitor(env, name, monitor_ptr);
}
static jvmtiError DestroyRawMonitor(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::DestroyRawMonitor(env, monitor);
}
static jvmtiError RawMonitorEnter(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorEnter(env, monitor);
}
static jvmtiError RawMonitorExit(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorExit(env, monitor);
}
static jvmtiError RawMonitorWait(jvmtiEnv* env, jrawMonitorID monitor, jlong millis) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorWait(env, monitor, millis);
}
static jvmtiError RawMonitorNotify(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorNotify(env, monitor);
}
static jvmtiError RawMonitorNotifyAll(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorNotifyAll(env, monitor);
}
static jvmtiError SetJNIFunctionTable(jvmtiEnv* env, const jniNativeInterface* function_table) {
+ ENSURE_VALID_ENV(env);
return JNIUtil::SetJNIFunctionTable(env, function_table);
}
static jvmtiError GetJNIFunctionTable(jvmtiEnv* env, jniNativeInterface** function_table) {
+ ENSURE_VALID_ENV(env);
return JNIUtil::GetJNIFunctionTable(env, function_table);
}
@@ -955,14 +1068,16 @@
return gEventHandler.SetEvent(art_env, art_thread, GetArtJvmtiEvent(art_env, event_type), mode);
}
- static jvmtiError GenerateEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GenerateEvents(jvmtiEnv* env,
jvmtiEvent event_type ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
return OK;
}
- static jvmtiError GetExtensionFunctions(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GetExtensionFunctions(jvmtiEnv* env,
jint* extension_count_ptr,
jvmtiExtensionFunctionInfo** extensions) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension functions.
*extension_count_ptr = 0;
*extensions = nullptr;
@@ -970,9 +1085,10 @@
return ERR(NONE);
}
- static jvmtiError GetExtensionEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GetExtensionEvents(jvmtiEnv* env,
jint* extension_count_ptr,
jvmtiExtensionEventInfo** extensions) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension events.
*extension_count_ptr = 0;
*extensions = nullptr;
@@ -980,9 +1096,10 @@
return ERR(NONE);
}
- static jvmtiError SetExtensionEventCallback(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError SetExtensionEventCallback(jvmtiEnv* env,
jint extension_event_index ATTRIBUTE_UNUSED,
jvmtiExtensionEvent callback ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension events, so any call is illegal.
return ERR(ILLEGAL_ARGUMENT);
}
@@ -999,8 +1116,8 @@
ENSURE_NON_NULL(capabilities_ptr);
ArtJvmTiEnv* art_env = static_cast<ArtJvmTiEnv*>(env);
jvmtiError ret = OK;
- jvmtiCapabilities changed;
- jvmtiCapabilities potential_capabilities;
+ jvmtiCapabilities changed = {};
+ jvmtiCapabilities potential_capabilities = {};
ret = env->GetPotentialCapabilities(&potential_capabilities);
if (ret != OK) {
return ret;
@@ -1072,7 +1189,7 @@
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
ArtJvmTiEnv* art_env = reinterpret_cast<ArtJvmTiEnv*>(env);
- jvmtiCapabilities changed;
+ jvmtiCapabilities changed = {};
#define DEL_CAPABILITY(e) \
do { \
if (capabilities_ptr->e == 1) { \
@@ -1141,17 +1258,20 @@
static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env,
jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env,
jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
@@ -1159,43 +1279,53 @@
static jvmtiError GetThreadCpuTime(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jlong* nanos_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetTimerInfo(env, info_ptr);
}
static jvmtiError GetTime(jvmtiEnv* env, jlong* nanos_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetTime(env, nanos_ptr);
}
static jvmtiError GetAvailableProcessors(jvmtiEnv* env, jint* processor_count_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetAvailableProcessors(env, processor_count_ptr);
}
static jvmtiError AddToBootstrapClassLoaderSearch(jvmtiEnv* env, const char* segment) {
+ ENSURE_VALID_ENV(env);
return SearchUtil::AddToBootstrapClassLoaderSearch(env, segment);
}
static jvmtiError AddToSystemClassLoaderSearch(jvmtiEnv* env, const char* segment) {
+ ENSURE_VALID_ENV(env);
return SearchUtil::AddToSystemClassLoaderSearch(env, segment);
}
static jvmtiError GetSystemProperties(jvmtiEnv* env, jint* count_ptr, char*** property_ptr) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::GetSystemProperties(env, count_ptr, property_ptr);
}
static jvmtiError GetSystemProperty(jvmtiEnv* env, const char* property, char** value_ptr) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::GetSystemProperty(env, property, value_ptr);
}
static jvmtiError SetSystemProperty(jvmtiEnv* env, const char* property, const char* value) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::SetSystemProperty(env, property, value);
}
static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr) {
+ ENSURE_VALID_ENV(env);
return PhaseUtil::GetPhase(env, phase_ptr);
}
@@ -1303,9 +1433,10 @@
}
}
- static jvmtiError SetVerboseFlag(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError SetVerboseFlag(jvmtiEnv* env,
jvmtiVerboseFlag flag,
jboolean value) {
+ ENSURE_VALID_ENV(env);
if (flag == jvmtiVerboseFlag::JVMTI_VERBOSE_OTHER) {
// OTHER is special, as it's 0, so can't do a bit check.
bool val = (value == JNI_TRUE) ? true : false;
@@ -1359,8 +1490,8 @@
return ERR(NONE);
}
- static jvmtiError GetJLocationFormat(jvmtiEnv* env ATTRIBUTE_UNUSED,
- jvmtiJlocationFormat* format_ptr) {
+ static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
+ ENSURE_VALID_ENV(env);
// Report BCI as jlocation format. We report dex bytecode indices.
if (format_ptr == nullptr) {
return ERR(NULL_POINTER);
@@ -1382,8 +1513,8 @@
ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
: art_vm(runtime),
local_data(nullptr),
- capabilities(),
- object_tag_table(new ObjectTagTable(event_handler)) {
+ capabilities() {
+ object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
functions = &gJvmtiInterface;
}
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
index 4f5eb0c..1ddbb86 100644
--- a/runtime/openjdkjvmti/events-inl.h
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -126,6 +126,7 @@
unsigned char** new_class_data) const {
static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable, "Unsupported event");
+ DCHECK(*new_class_data == nullptr);
jint current_len = class_data_len;
unsigned char* current_class_data = const_cast<unsigned char*>(class_data);
ArtJvmTiEnv* last_env = nullptr;
@@ -168,15 +169,19 @@
// exactly the argument types of the corresponding Jvmti kEvent function pointer.
template <ArtJvmtiEvent kEvent, typename ...Args>
-inline void EventHandler::DispatchEvent(art::Thread* thread,
- Args... args) const {
- using FnType = void(jvmtiEnv*, Args...);
+inline void EventHandler::DispatchEvent(art::Thread* thread, Args... args) const {
for (ArtJvmTiEnv* env : envs) {
- if (ShouldDispatch<kEvent>(env, thread)) {
- FnType* callback = impl::GetCallback<kEvent>(env);
- if (callback != nullptr) {
- (*callback)(env, args...);
- }
+ DispatchEvent<kEvent, Args...>(env, thread, args...);
+ }
+}
+
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline void EventHandler::DispatchEvent(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const {
+ using FnType = void(jvmtiEnv*, Args...);
+ if (ShouldDispatch<kEvent>(env, thread)) {
+ FnType* callback = impl::GetCallback<kEvent>(env);
+ if (callback != nullptr) {
+ (*callback)(env, args...);
}
}
}
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
index 4e20d17..ae8bf0f 100644
--- a/runtime/openjdkjvmti/events.h
+++ b/runtime/openjdkjvmti/events.h
@@ -156,9 +156,14 @@
ArtJvmtiEvent event,
jvmtiEventMode mode);
+ // Dispatch event to all registered environments.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
inline void DispatchEvent(art::Thread* thread, Args... args) const;
+ // Dispatch event to the given environment, only.
+ template <ArtJvmtiEvent kEvent, typename ...Args>
+ ALWAYS_INLINE
+ inline void DispatchEvent(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const;
// Tell the event handler capabilities were added/lost so it can adjust the sent events.If
// caps_added is true then caps is all the newly set capabilities of the jvmtiEnv. If it is false
diff --git a/runtime/openjdkjvmti/fixed_up_dex_file.cc b/runtime/openjdkjvmti/fixed_up_dex_file.cc
new file mode 100644
index 0000000..3338358
--- /dev/null
+++ b/runtime/openjdkjvmti/fixed_up_dex_file.cc
@@ -0,0 +1,145 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "fixed_up_dex_file.h"
+#include "dex_file-inl.h"
+
+// Compiler includes.
+#include "dex/dex_to_dex_decompiler.h"
+
+// Runtime includes.
+#include "oat_file.h"
+#include "vdex_file.h"
+
+namespace openjdkjvmti {
+
+static void RecomputeDexChecksum(art::DexFile* dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ reinterpret_cast<art::DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
+ dex_file->CalculateChecksum();
+}
+
+// TODO This is more complicated then it seems like it should be.
+// The fact we don't keep around the data of where in the flat binary log of dex-quickening changes
+// each dex file starts means we need to search for it. Since JVMTI is the exception though we are
+// not going to put in the effort to optimize for it.
+static void DoDexUnquicken(const art::DexFile& new_dex_file,
+ const art::DexFile& original_dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
+ if (oat_dex == nullptr) {
+ return;
+ }
+ const art::OatFile* oat_file = oat_dex->GetOatFile();
+ if (oat_file == nullptr) {
+ return;
+ }
+ const art::VdexFile* vdex = oat_file->GetVdexFile();
+ if (vdex == nullptr || vdex->GetQuickeningInfo().size() == 0) {
+ return;
+ }
+ const art::ArrayRef<const uint8_t> quickening_info(vdex->GetQuickeningInfo());
+ const uint8_t* quickening_info_ptr = quickening_info.data();
+ for (const art::OatDexFile* cur_oat_dex : oat_file->GetOatDexFiles()) {
+ std::string error;
+ std::unique_ptr<const art::DexFile> cur_dex_file(cur_oat_dex->OpenDexFile(&error));
+ DCHECK(cur_dex_file.get() != nullptr);
+ // Is this the dex file we are looking for?
+ if (UNLIKELY(cur_dex_file->Begin() == original_dex_file.Begin())) {
+ // Simple sanity check.
+ CHECK_EQ(new_dex_file.NumClassDefs(), original_dex_file.NumClassDefs());
+ for (uint32_t i = 0; i < new_dex_file.NumClassDefs(); ++i) {
+ const art::DexFile::ClassDef& class_def = new_dex_file.GetClassDef(i);
+ const uint8_t* class_data = new_dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (art::ClassDataItemIterator it(new_dex_file, class_data); it.HasNext(); it.Next()) {
+ if (it.IsAtMethod() && it.GetMethodCodeItem() != nullptr) {
+ uint32_t quickening_size = *reinterpret_cast<const uint32_t*>(quickening_info_ptr);
+ quickening_info_ptr += sizeof(uint32_t);
+ art::optimizer::ArtDecompileDEX(
+ *it.GetMethodCodeItem(),
+ art::ArrayRef<const uint8_t>(quickening_info_ptr, quickening_size),
+ /*decompile_return_instruction*/true);
+ quickening_info_ptr += quickening_size;
+ }
+ }
+ }
+ // We don't need to bother looking through the rest of the dex-files.
+ break;
+ } else {
+ // Not the dex file we want. Skip over all the quickening info for all its classes.
+ for (uint32_t i = 0; i < cur_dex_file->NumClassDefs(); ++i) {
+ const art::DexFile::ClassDef& class_def = cur_dex_file->GetClassDef(i);
+ const uint8_t* class_data = cur_dex_file->GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (art::ClassDataItemIterator it(*cur_dex_file, class_data); it.HasNext(); it.Next()) {
+ if (it.IsAtMethod() && it.GetMethodCodeItem() != nullptr) {
+ uint32_t quickening_size = *reinterpret_cast<const uint32_t*>(quickening_info_ptr);
+ quickening_info_ptr += sizeof(uint32_t);
+ quickening_info_ptr += quickening_size;
+ }
+ }
+ }
+ }
+ }
+}
+
+std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& original) {
+ // Copy the data into mutable memory.
+ std::vector<unsigned char> data;
+ data.resize(original.Size());
+ memcpy(data.data(), original.Begin(), original.Size());
+ std::string error;
+ std::unique_ptr<const art::DexFile> new_dex_file(art::DexFile::Open(
+ data.data(),
+ data.size(),
+ /*location*/"Unquickening_dexfile.dex",
+ /*location_checksum*/0,
+ /*oat_dex_file*/nullptr,
+ /*verify*/false,
+ /*verify_checksum*/false,
+ &error));
+ if (new_dex_file.get() == nullptr) {
+ LOG(ERROR) << "Unable to open dex file from memory for unquickening! error: " << error;
+ return nullptr;
+ }
+
+ DoDexUnquicken(*new_dex_file, original);
+ RecomputeDexChecksum(const_cast<art::DexFile*>(new_dex_file.get()));
+ std::unique_ptr<FixedUpDexFile> ret(new FixedUpDexFile(std::move(new_dex_file), std::move(data)));
+ return ret;
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/fixed_up_dex_file.h b/runtime/openjdkjvmti/fixed_up_dex_file.h
new file mode 100644
index 0000000..db12f48
--- /dev/null
+++ b/runtime/openjdkjvmti/fixed_up_dex_file.h
@@ -0,0 +1,82 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_FIXED_UP_DEX_FILE_H_
+#define ART_RUNTIME_OPENJDKJVMTI_FIXED_UP_DEX_FILE_H_
+
+#include <memory>
+#include <vector>
+
+#include "jni.h"
+#include "jvmti.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+
+namespace openjdkjvmti {
+
+// A holder for a DexFile that has been 'fixed up' to ensure it is fully compliant with the
+// published standard (no internal/quick opcodes, all fields are the defined values, etc). This is
+// used to ensure that agents get a consistent dex file regardless of what version of android they
+// are running on.
+class FixedUpDexFile {
+ public:
+ static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ const art::DexFile& GetDexFile() {
+ return *dex_file_;
+ }
+
+ const unsigned char* Begin() {
+ return data_.data();
+ }
+
+ size_t Size() {
+ return data_.size();
+ }
+
+ private:
+ explicit FixedUpDexFile(std::unique_ptr<const art::DexFile> fixed_up_dex_file,
+ std::vector<unsigned char> data)
+ : dex_file_(std::move(fixed_up_dex_file)),
+ data_(std::move(data)) {}
+
+ // the fixed up DexFile
+ std::unique_ptr<const art::DexFile> dex_file_;
+ // The backing data for dex_file_.
+ const std::vector<unsigned char> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedUpDexFile);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_FIXED_UP_DEX_FILE_H_
diff --git a/runtime/openjdkjvmti/jvmti_weak_table.h b/runtime/openjdkjvmti/jvmti_weak_table.h
index ae36122..eeea75a 100644
--- a/runtime/openjdkjvmti/jvmti_weak_table.h
+++ b/runtime/openjdkjvmti/jvmti_weak_table.h
@@ -53,7 +53,7 @@
class JvmtiWeakTable : public art::gc::SystemWeakHolder {
public:
JvmtiWeakTable()
- : art::gc::SystemWeakHolder(kTaggingLockLevel),
+ : art::gc::SystemWeakHolder(art::kTaggingLockLevel),
update_since_last_sweep_(false) {
}
@@ -200,10 +200,6 @@
}
};
- // The tag table is used when visiting roots. So it needs to have a low lock level.
- static constexpr art::LockLevel kTaggingLockLevel =
- static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
-
std::unordered_map<art::GcRoot<art::mirror::Object>,
T,
HashGcRoot,
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index 4215588..dcdd3ed 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -33,6 +33,7 @@
#include <limits>
+#include "art_jvmti.h"
#include "events-inl.h"
#include "jvmti_weak_table-inl.h"
@@ -60,7 +61,7 @@
return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
}
void ObjectTagTable::HandleNullSweep(jlong tag) {
- event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(nullptr, tag);
+ event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(jvmti_env_, nullptr, tag);
}
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index b5a601c..ca84e44 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -42,12 +42,13 @@
namespace openjdkjvmti {
+struct ArtJvmTiEnv;
class EventHandler;
class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
public:
- explicit ObjectTagTable(EventHandler* event_handler) : event_handler_(event_handler) {
- }
+ ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
+ : event_handler_(event_handler), jvmti_env_(env) {}
bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -77,6 +78,7 @@
private:
EventHandler* event_handler_;
+ ArtJvmTiEnv* jvmti_env_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index 2d1b25e..e94c4e6 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -43,6 +43,7 @@
#include "common_throws.h"
#include "dex_file_annotations.h"
#include "events-inl.h"
+#include "fixed_up_dex_file.h"
#include "gc/heap.h"
#include "gc_root.h"
#include "handle.h"
@@ -55,6 +56,8 @@
#include "mirror/object_reference.h"
#include "mirror/object-inl.h"
#include "mirror/reference.h"
+#include "primitive.h"
+#include "reflection.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
@@ -78,9 +81,9 @@
REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Make the mmap
std::string error_msg;
+ art::ArraySlice<const unsigned char> final_data(final_dex_data, final_len);
std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
- final_len,
- final_dex_data,
+ final_data,
&error_msg));
if (map.get() == nullptr) {
LOG(WARNING) << "Unable to allocate mmap for redefined dex file! Error was: " << error_msg;
@@ -161,6 +164,8 @@
art::JNIEnvExt* env = self->GetJniEnv();
ScopedLocalRef<jobject> loader(
env, class_loader.IsNull() ? nullptr : env->AddLocalReference<jobject>(class_loader.Get()));
+ std::unique_ptr<FixedUpDexFile> dex_file_copy(FixedUpDexFile::Create(initial_dex_file));
+
// Go back to native.
art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
// Call all Non-retransformable agents.
@@ -174,14 +179,14 @@
loader.get(),
name.c_str(),
static_cast<jobject>(nullptr), // Android doesn't seem to have protection domains
- static_cast<jint>(initial_dex_file.Size()),
- static_cast<const unsigned char*>(initial_dex_file.Begin()),
+ static_cast<jint>(dex_file_copy->Size()),
+ static_cast<const unsigned char*>(dex_file_copy->Begin()),
static_cast<jint*>(&post_no_redefine_len),
static_cast<unsigned char**>(&post_no_redefine_dex_data));
if (post_no_redefine_dex_data == nullptr) {
DCHECK_EQ(post_no_redefine_len, 0);
- post_no_redefine_dex_data = const_cast<unsigned char*>(initial_dex_file.Begin());
- post_no_redefine_len = initial_dex_file.Size();
+ post_no_redefine_dex_data = const_cast<unsigned char*>(dex_file_copy->Begin());
+ post_no_redefine_len = dex_file_copy->Size();
} else {
post_no_redefine_unique_ptr = std::unique_ptr<const unsigned char>(post_no_redefine_dex_data);
DCHECK_GT(post_no_redefine_len, 0);
@@ -210,7 +215,7 @@
DCHECK_GT(final_len, 0);
}
- if (final_dex_data != initial_dex_file.Begin()) {
+ if (final_dex_data != dex_file_copy->Begin()) {
LOG(WARNING) << "Changing class " << descriptor;
art::ScopedObjectAccess soa(self);
art::StackHandleScope<2> hs(self);
@@ -228,14 +233,22 @@
}
// Allocate the byte array to store the dex file bytes in.
- art::Handle<art::mirror::ByteArray> arr(hs.NewHandle(
- art::mirror::ByteArray::AllocateAndFill(
- self,
- reinterpret_cast<const signed char*>(post_no_redefine_dex_data),
- post_no_redefine_len)));
+ art::MutableHandle<art::mirror::Object> arr(hs.NewHandle<art::mirror::Object>(nullptr));
+ if (post_no_redefine_dex_data == dex_file_copy->Begin() && name != "java/lang/Long") {
+ // we didn't have any non-retransformable agents. We can just cache a pointer to the
+ // initial_dex_file. It will be kept live by the class_loader.
+ jlong dex_ptr = reinterpret_cast<uintptr_t>(&initial_dex_file);
+ art::JValue val;
+ val.SetJ(dex_ptr);
+ arr.Assign(art::BoxPrimitive(art::Primitive::kPrimLong, val));
+ } else {
+ arr.Assign(art::mirror::ByteArray::AllocateAndFill(
+ self,
+ reinterpret_cast<const signed char*>(post_no_redefine_dex_data),
+ post_no_redefine_len));
+ }
if (arr.IsNull()) {
- LOG(WARNING) << "Unable to allocate byte array for initial dex-file bytes. Aborting "
- << "transformation";
+ LOG(WARNING) << "Unable to allocate memory for initial dex-file. Aborting transformation";
self->AssertPendingOOMException();
return;
}
@@ -259,7 +272,7 @@
}
// Actually set the ClassExt's original bytes once we have actually succeeded.
- ext->SetOriginalDexFileBytes(arr.Get());
+ ext->SetOriginalDexFile(arr.Get());
// Set the return values
*final_class_def = &dex_file->GetClassDef(0);
*final_dex_file = dex_file.release();
diff --git a/runtime/openjdkjvmti/ti_class_definition.cc b/runtime/openjdkjvmti/ti_class_definition.cc
index 2c2a79b..0671105 100644
--- a/runtime/openjdkjvmti/ti_class_definition.cc
+++ b/runtime/openjdkjvmti/ti_class_definition.cc
@@ -31,25 +31,147 @@
#include "ti_class_definition.h"
+#include "base/array_slice.h"
#include "dex_file.h"
+#include "fixed_up_dex_file.h"
#include "handle_scope-inl.h"
#include "handle.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "reflection.h"
#include "thread.h"
namespace openjdkjvmti {
-bool ArtClassDefinition::IsModified(art::Thread* self) const {
- if (modified) {
+bool ArtClassDefinition::IsModified() const {
+ // RedefineClasses calls always are 'modified' since they need to change the original_dex_file of
+ // the class.
+ if (redefined_) {
return true;
}
// Check if the dex file we want to set is the same as the current one.
+ // Unfortunately we need to do this check even if no modifications have been done since it could
+ // be that agents were removed in the mean-time so we still have a different dex file. The dex
+ // checksum means this is likely to be fairly fast.
+ return static_cast<jint>(original_dex_file_.size()) != dex_len_ ||
+ memcmp(&original_dex_file_.At(0), dex_data_.get(), dex_len_) != 0;
+}
+
+jvmtiError ArtClassDefinition::InitCommon(ArtJvmTiEnv* env, jclass klass) {
+ JNIEnv* jni_env = GetJniEnv(env);
+ if (jni_env == nullptr) {
+ return ERR(INTERNAL);
+ }
+ art::ScopedObjectAccess soa(jni_env);
+ art::ObjPtr<art::mirror::Class> m_klass(soa.Decode<art::mirror::Class>(klass));
+ if (m_klass.IsNull()) {
+ return ERR(INVALID_CLASS);
+ }
+ klass_ = klass;
+ loader_ = soa.AddLocalReference<jobject>(m_klass->GetClassLoader());
+ std::string descriptor_store;
+ std::string descriptor(m_klass->GetDescriptor(&descriptor_store));
+ name_ = descriptor.substr(1, descriptor.size() - 2);
+ // Android doesn't really have protection domains.
+ protection_domain_ = nullptr;
+ return OK;
+}
+
+// Gets the data surrounding the given class.
+static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
+ art::Handle<art::mirror::Class> klass,
+ /*out*/jint* dex_data_len,
+ /*out*/unsigned char** dex_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::StackHandleScope<3> hs(art::Thread::Current());
+ art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->GetExtData()));
+ const art::DexFile* dex_file = nullptr;
+ if (!ext.IsNull()) {
+ art::Handle<art::mirror::Object> orig_dex(hs.NewHandle(ext->GetOriginalDexFile()));
+ if (!orig_dex.IsNull()) {
+ if (orig_dex->IsArrayInstance()) {
+ DCHECK(orig_dex->GetClass()->GetComponentType()->IsPrimitiveByte());
+ art::Handle<art::mirror::ByteArray> orig_dex_bytes(
+ hs.NewHandle(art::down_cast<art::mirror::ByteArray*>(orig_dex->AsArray())));
+ *dex_data_len = static_cast<jint>(orig_dex_bytes->GetLength());
+ return CopyDataIntoJvmtiBuffer(
+ env,
+ reinterpret_cast<const unsigned char*>(orig_dex_bytes->GetData()),
+ *dex_data_len,
+ /*out*/dex_data);
+ } else if (orig_dex->IsDexCache()) {
+ dex_file = orig_dex->AsDexCache()->GetDexFile();
+ } else {
+ DCHECK(orig_dex->GetClass()->DescriptorEquals("Ljava/lang/Long;"))
+ << "Expected java/lang/Long but found object of type "
+ << orig_dex->GetClass()->PrettyClass();
+ art::ObjPtr<art::mirror::Class> prim_long_class(
+ art::Runtime::Current()->GetClassLinker()->GetClassRoot(
+ art::ClassLinker::kPrimitiveLong));
+ art::JValue val;
+ if (!art::UnboxPrimitiveForResult(orig_dex.Get(), prim_long_class, &val)) {
+ // This should never happen.
+ return ERR(INTERNAL);
+ }
+ dex_file = reinterpret_cast<const art::DexFile*>(static_cast<uintptr_t>(val.GetJ()));
+ }
+ }
+ }
+ if (dex_file == nullptr) {
+ dex_file = &klass->GetDexFile();
+ }
+ std::unique_ptr<FixedUpDexFile> fixed_dex_file(FixedUpDexFile::Create(*dex_file));
+ *dex_data_len = static_cast<jint>(fixed_dex_file->Size());
+ return CopyDataIntoJvmtiBuffer(env,
+ fixed_dex_file->Begin(),
+ fixed_dex_file->Size(),
+ /*out*/dex_data);
+}
+
+jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, jclass klass) {
+ jvmtiError res = InitCommon(env, klass);
+ if (res != OK) {
+ return res;
+ }
+ unsigned char* new_data = nullptr;
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
- art::Handle<art::mirror::Class> h_klass(hs.NewHandle(self->DecodeJObject(klass)->AsClass()));
- const art::DexFile& cur_dex_file = h_klass->GetDexFile();
- return static_cast<jint>(cur_dex_file.Size()) != dex_len ||
- memcmp(cur_dex_file.Begin(), dex_data.get(), dex_len) != 0;
+ art::Handle<art::mirror::Class> m_klass(hs.NewHandle(self->DecodeJObject(klass)->AsClass()));
+ res = GetDexDataForRetransformation(env, m_klass, &dex_len_, &new_data);
+ if (res != OK) {
+ return res;
+ }
+ dex_data_ = MakeJvmtiUniquePtr(env, new_data);
+ if (m_klass->GetExtData() == nullptr || m_klass->GetExtData()->GetOriginalDexFile() == nullptr) {
+ // We have never redefined class this yet. Keep track of what the (de-quickened) dex file looks
+ // like so we can tell if anything has changed. Really we would like to just always do the
+ // 'else' block but the fact that we de-quickened stuff screws us over.
+ unsigned char* original_data_memory = nullptr;
+ res = CopyDataIntoJvmtiBuffer(env, dex_data_.get(), dex_len_, &original_data_memory);
+ original_dex_file_memory_ = MakeJvmtiUniquePtr(env, original_data_memory);
+ original_dex_file_ = art::ArraySlice<const unsigned char>(original_data_memory, dex_len_);
+ } else {
+ // We know that we have been redefined at least once (there is an original_dex_file set in
+ // the class) so we can just use the current dex file directly.
+ const art::DexFile& dex_file = m_klass->GetDexFile();
+ original_dex_file_ = art::ArraySlice<const unsigned char>(dex_file.Begin(), dex_file.Size());
+ }
+ return res;
+}
+
+jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, const jvmtiClassDefinition& def) {
+ jvmtiError res = InitCommon(env, def.klass);
+ if (res != OK) {
+ return res;
+ }
+ unsigned char* new_data = nullptr;
+ original_dex_file_ = art::ArraySlice<const unsigned char>(def.class_bytes, def.class_byte_count);
+ redefined_ = true;
+ dex_len_ = def.class_byte_count;
+ res = CopyDataIntoJvmtiBuffer(env, def.class_bytes, def.class_byte_count, /*out*/ &new_data);
+ dex_data_ = MakeJvmtiUniquePtr(env, new_data);
+ return res;
}
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class_definition.h b/runtime/openjdkjvmti/ti_class_definition.h
index 3c251d4..43d0c3f 100644
--- a/runtime/openjdkjvmti/ti_class_definition.h
+++ b/runtime/openjdkjvmti/ti_class_definition.h
@@ -39,37 +39,89 @@
// A struct that stores data needed for redefining/transforming classes. This structure should only
// even be accessed from a single thread and must not survive past the completion of the
// redefinition/retransformation function that created it.
-struct ArtClassDefinition {
+class ArtClassDefinition {
public:
- jclass klass;
- jobject loader;
- std::string name;
- jobject protection_domain;
- jint dex_len;
- JvmtiUniquePtr<unsigned char> dex_data;
- art::ArraySlice<const unsigned char> original_dex_file;
+ ArtClassDefinition()
+ : klass_(nullptr),
+ loader_(nullptr),
+ name_(),
+ protection_domain_(nullptr),
+ dex_len_(0),
+ dex_data_(nullptr),
+ original_dex_file_memory_(nullptr),
+ original_dex_file_(),
+ redefined_(false) {}
- ArtClassDefinition() = default;
+ jvmtiError Init(ArtJvmTiEnv* env, jclass klass);
+ jvmtiError Init(ArtJvmTiEnv* env, const jvmtiClassDefinition& def);
+
ArtClassDefinition(ArtClassDefinition&& o) = default;
+ ArtClassDefinition& operator=(ArtClassDefinition&& o) = default;
void SetNewDexData(ArtJvmTiEnv* env, jint new_dex_len, unsigned char* new_dex_data) {
+ DCHECK(IsInitialized());
if (new_dex_data == nullptr) {
return;
- } else if (new_dex_data != dex_data.get() || new_dex_len != dex_len) {
- SetModified();
- dex_len = new_dex_len;
- dex_data = MakeJvmtiUniquePtr(env, new_dex_data);
+ } else if (new_dex_data != dex_data_.get() || new_dex_len != dex_len_) {
+ dex_len_ = new_dex_len;
+ dex_data_ = MakeJvmtiUniquePtr(env, new_dex_data);
}
}
- void SetModified() {
- modified = true;
+ art::ArraySlice<const unsigned char> GetNewOriginalDexFile() const {
+ DCHECK(IsInitialized());
+ if (redefined_) {
+ return original_dex_file_;
+ } else {
+ return art::ArraySlice<const unsigned char>();
+ }
}
- bool IsModified(art::Thread* self) const REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool IsModified() const;
+
+ bool IsInitialized() const {
+ return klass_ != nullptr;
+ }
+
+ jclass GetClass() const {
+ DCHECK(IsInitialized());
+ return klass_;
+ }
+
+ jobject GetLoader() const {
+ DCHECK(IsInitialized());
+ return loader_;
+ }
+
+ const std::string& GetName() const {
+ DCHECK(IsInitialized());
+ return name_;
+ }
+
+ jobject GetProtectionDomain() const {
+ DCHECK(IsInitialized());
+ return protection_domain_;
+ }
+
+ art::ArraySlice<const unsigned char> GetDexData() const {
+ DCHECK(IsInitialized());
+ return art::ArraySlice<const unsigned char>(dex_data_.get(), dex_len_);
+ }
private:
- bool modified;
+ jvmtiError InitCommon(ArtJvmTiEnv* env, jclass klass);
+
+ jclass klass_;
+ jobject loader_;
+ std::string name_;
+ jobject protection_domain_;
+ jint dex_len_;
+ JvmtiUniquePtr<unsigned char> dex_data_;
+ JvmtiUniquePtr<unsigned char> original_dex_file_memory_;
+ art::ArraySlice<const unsigned char> original_dex_file_;
+ bool redefined_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArtClassDefinition);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index c2495e3..49d9aca 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -882,12 +882,17 @@
void AddRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ if (stop_reports_) {
+ return;
+ }
+ bool add_to_worklist = ReportRoot(root_obj, info);
// We use visited_ to mark roots already so we do not need another set.
if (visited_->find(root_obj) == visited_->end()) {
visited_->insert(root_obj);
- worklist_->push_back(root_obj);
+ if (add_to_worklist) {
+ worklist_->push_back(root_obj);
+ }
}
- ReportRoot(root_obj, info);
}
// Remove NO_THREAD_SAFETY_ANALYSIS once ASSERT_CAPABILITY works correctly.
@@ -993,7 +998,7 @@
UNREACHABLE();
}
- void ReportRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
+ bool ReportRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
jvmtiHeapReferenceInfo ref_info;
@@ -1002,6 +1007,7 @@
if ((result & JVMTI_VISIT_ABORT) != 0) {
stop_reports_ = true;
}
+ return (result & JVMTI_VISIT_OBJECTS) != 0;
}
private:
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index e494cb6..941cf7b 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -40,6 +40,7 @@
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "ti_thread.h"
namespace openjdkjvmti {
@@ -69,6 +70,7 @@
break;
case RuntimePhase::kInit:
{
+ ThreadUtil::CacheData();
ScopedLocalRef<jthread> thread(GetJniEnv(), GetCurrentJThread());
art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>(nullptr, GetJniEnv(), thread.get());
@@ -105,6 +107,16 @@
return ERR(NONE);
}
+bool PhaseUtil::IsLivePhase() {
+ jvmtiPhase now = PhaseUtil::current_phase_;
+ DCHECK(now == JVMTI_PHASE_ONLOAD ||
+ now == JVMTI_PHASE_PRIMORDIAL ||
+ now == JVMTI_PHASE_START ||
+ now == JVMTI_PHASE_LIVE ||
+ now == JVMTI_PHASE_DEAD);
+ return now == JVMTI_PHASE_LIVE;
+}
+
void PhaseUtil::SetToOnLoad() {
DCHECK_EQ(0u, static_cast<size_t>(PhaseUtil::current_phase_));
PhaseUtil::current_phase_ = JVMTI_PHASE_ONLOAD;
@@ -117,6 +129,7 @@
void PhaseUtil::SetToLive() {
DCHECK_EQ(static_cast<size_t>(0), static_cast<size_t>(PhaseUtil::current_phase_));
+ ThreadUtil::CacheData();
PhaseUtil::current_phase_ = JVMTI_PHASE_LIVE;
}
diff --git a/runtime/openjdkjvmti/ti_phase.h b/runtime/openjdkjvmti/ti_phase.h
index 851fc27..a2c0d11 100644
--- a/runtime/openjdkjvmti/ti_phase.h
+++ b/runtime/openjdkjvmti/ti_phase.h
@@ -42,6 +42,7 @@
class PhaseUtil {
public:
static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr);
+ static bool IsLivePhase();
static void Register(EventHandler* event_handler);
static void Unregister();
diff --git a/runtime/openjdkjvmti/ti_properties.cc b/runtime/openjdkjvmti/ti_properties.cc
index 4f4f013..8ee5366 100644
--- a/runtime/openjdkjvmti/ti_properties.cc
+++ b/runtime/openjdkjvmti/ti_properties.cc
@@ -34,8 +34,15 @@
#include <string.h>
#include <vector>
+#include "jni.h"
+#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+
#include "art_jvmti.h"
#include "runtime.h"
+#include "thread-inl.h"
+#include "ti_phase.h"
+#include "well_known_classes.h"
namespace openjdkjvmti {
@@ -145,6 +152,56 @@
return result;
}
+// See dalvik_system_VMRuntime.cpp.
+static const char* DefaultToDot(const std::string& class_path) {
+ return class_path.empty() ? "." : class_path.c_str();
+}
+
+// Handle kPropertyLibraryPath.
+static jvmtiError GetLibraryPath(jvmtiEnv* env, char** value_ptr) {
+ const std::vector<std::string>& runtime_props = art::Runtime::Current()->GetProperties();
+ for (const std::string& prop_assignment : runtime_props) {
+ size_t assign_pos = prop_assignment.find('=');
+ if (assign_pos != std::string::npos && assign_pos > 0) {
+ if (prop_assignment.substr(0, assign_pos) == kPropertyLibraryPath) {
+ return Copy(env, prop_assignment.substr(assign_pos + 1).c_str(), value_ptr);
+ }
+ }
+ }
+ if (!PhaseUtil::IsLivePhase()) {
+ return ERR(NOT_AVAILABLE);
+ }
+ // We expect this call to be rare. So don't optimize.
+ DCHECK(art::Thread::Current() != nullptr);
+ JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
+ jmethodID get_prop = jni_env->GetStaticMethodID(art::WellKnownClasses::java_lang_System,
+ "getProperty",
+ "(Ljava/lang/String;)Ljava/lang/String;");
+ CHECK(get_prop != nullptr);
+
+ ScopedLocalRef<jobject> input_str(jni_env, jni_env->NewStringUTF(kPropertyLibraryPath));
+ if (input_str.get() == nullptr) {
+ jni_env->ExceptionClear();
+ return ERR(OUT_OF_MEMORY);
+ }
+
+ ScopedLocalRef<jobject> prop_res(
+ jni_env, jni_env->CallStaticObjectMethod(art::WellKnownClasses::java_lang_System,
+ get_prop,
+ input_str.get()));
+ if (jni_env->ExceptionCheck() == JNI_TRUE) {
+ jni_env->ExceptionClear();
+ return ERR(INTERNAL);
+ }
+ if (prop_res.get() == nullptr) {
+ *value_ptr = nullptr;
+ return ERR(NONE);
+ }
+
+ ScopedUtfChars chars(jni_env, reinterpret_cast<jstring>(prop_res.get()));
+ return Copy(env, chars.c_str(), value_ptr);
+}
+
jvmtiError PropertiesUtil::GetSystemProperty(jvmtiEnv* env,
const char* property,
char** value_ptr) {
@@ -153,22 +210,11 @@
}
if (strcmp(property, kPropertyLibraryPath) == 0) {
- // TODO: In the live phase, we should probably compare to System.getProperty. java.library.path
- // may not be set initially, and is then freely modifiable.
- const std::vector<std::string>& runtime_props = art::Runtime::Current()->GetProperties();
- for (const std::string& prop_assignment : runtime_props) {
- size_t assign_pos = prop_assignment.find('=');
- if (assign_pos != std::string::npos && assign_pos > 0) {
- if (prop_assignment.substr(0, assign_pos) == kPropertyLibraryPath) {
- return Copy(env, prop_assignment.substr(assign_pos + 1).c_str(), value_ptr);
- }
- }
- }
- return ERR(NOT_AVAILABLE);
+ return GetLibraryPath(env, value_ptr);
}
if (strcmp(property, kPropertyClassPath) == 0) {
- return Copy(env, art::Runtime::Current()->GetClassPathString().c_str(), value_ptr);
+ return Copy(env, DefaultToDot(art::Runtime::Current()->GetClassPathString()), value_ptr);
}
for (size_t i = 0; i != kPropertiesSize; ++i) {
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 9c1d6ef..0655079 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -186,6 +186,7 @@
DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
new_obsolete_method->SetIsObsolete();
new_obsolete_method->SetDontCompile();
+ cl->SetEntryPointsForObsoleteMethod(new_obsolete_method);
obsolete_maps_->RecordObsolete(old_method, new_obsolete_method);
// Update JIT Data structures to point to the new method.
art::jit::Jit* jit = art::Runtime::Current()->GetJit();
@@ -215,7 +216,6 @@
jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
jclass klass,
jboolean* is_redefinable) {
- // TODO Check for the appropriate feature flags once we have enabled them.
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
@@ -261,13 +261,12 @@
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
- jint data_len,
- const unsigned char* dex_data,
+ art::ArraySlice<const unsigned char> data,
std::string* error_msg) {
std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
nullptr,
- data_len,
+ data.size(),
PROT_READ|PROT_WRITE,
/*low_4gb*/false,
/*reuse*/false,
@@ -275,7 +274,7 @@
if (map == nullptr) {
return map;
}
- memcpy(map->Begin(), dex_data, data_len);
+ memcpy(map->Begin(), &data.At(0), data.size());
// Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
// programs from corrupting it.
map->Protect(PROT_READ);
@@ -344,13 +343,7 @@
memcpy(class_bytes_copy, definitions[i].class_bytes, definitions[i].class_byte_count);
ArtClassDefinition def;
- def.dex_len = definitions[i].class_byte_count;
- def.dex_data = MakeJvmtiUniquePtr(env, class_bytes_copy);
- // We are definitely modified.
- def.SetModified();
- def.original_dex_file = art::ArraySlice<const unsigned char>(definitions[i].class_bytes,
- definitions[i].class_byte_count);
- res = Transformer::FillInTransformationData(env, definitions[i].klass, &def);
+ res = def.Init(env, definitions[i]);
if (res != OK) {
return res;
}
@@ -386,7 +379,7 @@
Redefiner r(runtime, self, error_msg);
for (const ArtClassDefinition& def : definitions) {
// Only try to transform classes that have been modified.
- if (def.IsModified(self)) {
+ if (def.IsModified()) {
jvmtiError res = r.AddRedefinition(env, def);
if (res != OK) {
return res;
@@ -399,25 +392,24 @@
jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition& def) {
std::string original_dex_location;
jvmtiError ret = OK;
- if ((ret = GetClassLocation(env, def.klass, &original_dex_location))) {
+ if ((ret = GetClassLocation(env, def.GetClass(), &original_dex_location))) {
*error_msg_ = "Unable to get original dex file location!";
return ret;
}
char* generic_ptr_unused = nullptr;
char* signature_ptr = nullptr;
- if ((ret = env->GetClassSignature(def.klass, &signature_ptr, &generic_ptr_unused)) != OK) {
+ if ((ret = env->GetClassSignature(def.GetClass(), &signature_ptr, &generic_ptr_unused)) != OK) {
*error_msg_ = "Unable to get class signature!";
return ret;
}
JvmtiUniquePtr<char> generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
JvmtiUniquePtr<char> signature_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
- def.dex_len,
- def.dex_data.get(),
+ def.GetDexData(),
error_msg_));
std::ostringstream os;
if (map.get() == nullptr) {
- os << "Failed to create anonymous mmap for modified dex file of class " << def.name
+ os << "Failed to create anonymous mmap for modified dex file of class " << def.GetName()
<< "in dex file " << original_dex_location << " because: " << *error_msg_;
*error_msg_ = os.str();
return ERR(OUT_OF_MEMORY);
@@ -434,16 +426,16 @@
/*verify_checksum*/true,
error_msg_));
if (dex_file.get() == nullptr) {
- os << "Unable to load modified dex file for " << def.name << ": " << *error_msg_;
+ os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
*error_msg_ = os.str();
return ERR(INVALID_CLASS_FORMAT);
}
redefinitions_.push_back(
Redefiner::ClassRedefinition(this,
- def.klass,
+ def.GetClass(),
dex_file.release(),
signature_ptr,
- def.original_dex_file));
+ def.GetNewOriginalDexFile()));
return OK;
}
@@ -469,7 +461,7 @@
result_ = result;
}
-art::mirror::ByteArray* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFileBytes() {
+art::mirror::Object* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFile() {
// If we have been specifically given a new set of bytes use that
if (original_dex_file_.size() != 0) {
return art::mirror::ByteArray::AllocateAndFill(
@@ -481,24 +473,21 @@
// See if we already have one set.
art::ObjPtr<art::mirror::ClassExt> ext(GetMirrorClass()->GetExtData());
if (!ext.IsNull()) {
- art::ObjPtr<art::mirror::ByteArray> old_original_bytes(ext->GetOriginalDexFileBytes());
- if (!old_original_bytes.IsNull()) {
+ art::ObjPtr<art::mirror::Object> old_original_dex_file(ext->GetOriginalDexFile());
+ if (!old_original_dex_file.IsNull()) {
// We do. Use it.
- return old_original_bytes.Ptr();
+ return old_original_dex_file.Ptr();
}
}
- // Copy the current dex_file
- const art::DexFile& current_dex_file = GetMirrorClass()->GetDexFile();
+ // return the current dex_cache which has the dex file in it.
+ art::ObjPtr<art::mirror::DexCache> current_dex_cache(GetMirrorClass()->GetDexCache());
// TODO Handle this or make it so it cannot happen.
- if (current_dex_file.NumClassDefs() != 1) {
+ if (current_dex_cache->GetDexFile()->NumClassDefs() != 1) {
LOG(WARNING) << "Current dex file has more than one class in it. Calling RetransformClasses "
<< "on this class might fail if no transformations are applied to it!";
}
- return art::mirror::ByteArray::AllocateAndFill(
- driver_->self_,
- reinterpret_cast<const signed char*>(current_dex_file.Begin()),
- current_dex_file.Size());
+ return current_dex_cache.Ptr();
}
struct CallbackCtx {
@@ -800,9 +789,11 @@
kSlotNewDexCache = 3,
kSlotMirrorClass = 4,
kSlotOrigDexFile = 5,
+ kSlotOldObsoleteMethods = 6,
+ kSlotOldDexCaches = 7,
// Must be last one.
- kNumSlots = 6,
+ kNumSlots = 8,
};
// This needs to have a HandleScope passed in that is capable of creating a new Handle without
@@ -825,7 +816,6 @@
return arr_.IsNull();
}
- // TODO Maybe make an iterable view type to simplify using this.
art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
@@ -847,9 +837,21 @@
return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
}
- art::mirror::ByteArray* GetOriginalDexFileBytes(jint klass_index) const
+ art::mirror::Object* GetOriginalDexFile(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::ByteArray*>(GetSlot(klass_index, kSlotOrigDexFile));
+ return art::down_cast<art::mirror::Object*>(GetSlot(klass_index, kSlotOrigDexFile));
+ }
+
+ art::mirror::PointerArray* GetOldObsoleteMethods(jint klass_index) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::PointerArray*>(
+ GetSlot(klass_index, kSlotOldObsoleteMethods));
+ }
+
+ art::mirror::ObjectArray<art::mirror::DexCache>* GetOldDexCaches(jint klass_index) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::ObjectArray<art::mirror::DexCache>*>(
+ GetSlot(klass_index, kSlotOldDexCaches));
}
void SetSourceClassLoader(jint klass_index, art::mirror::ClassLoader* loader)
@@ -872,10 +874,18 @@
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotMirrorClass, klass);
}
- void SetOriginalDexFileBytes(jint klass_index, art::mirror::ByteArray* bytes)
+ void SetOriginalDexFile(jint klass_index, art::mirror::Object* bytes)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotOrigDexFile, bytes);
}
+ void SetOldObsoleteMethods(jint klass_index, art::mirror::PointerArray* methods)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotOldObsoleteMethods, methods);
+ }
+ void SetOldDexCaches(jint klass_index, art::mirror::ObjectArray<art::mirror::DexCache>* caches)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotOldDexCaches, caches);
+ }
int32_t Length() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_->GetLength() / kNumSlots;
@@ -985,10 +995,19 @@
art::mirror::Class* GetMirrorClass() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetMirrorClass(idx_);
}
- art::mirror::ByteArray* GetOriginalDexFileBytes() const
+ art::mirror::Object* GetOriginalDexFile() const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return holder_.GetOriginalDexFileBytes(idx_);
+ return holder_.GetOriginalDexFile(idx_);
}
+ art::mirror::PointerArray* GetOldObsoleteMethods() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOldObsoleteMethods(idx_);
+ }
+ art::mirror::ObjectArray<art::mirror::DexCache>* GetOldDexCaches() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOldDexCaches(idx_);
+ }
+
int32_t GetIndex() const {
return idx_;
}
@@ -1010,9 +1029,17 @@
void SetMirrorClass(art::mirror::Class* klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetMirrorClass(idx_, klass);
}
- void SetOriginalDexFileBytes(art::mirror::ByteArray* bytes)
+ void SetOriginalDexFile(art::mirror::Object* bytes)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- holder_.SetOriginalDexFileBytes(idx_, bytes);
+ holder_.SetOriginalDexFile(idx_, bytes);
+ }
+ void SetOldObsoleteMethods(art::mirror::PointerArray* methods)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOldObsoleteMethods(idx_, methods);
+ }
+ void SetOldDexCaches(art::mirror::ObjectArray<art::mirror::DexCache>* caches)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOldDexCaches(idx_, caches);
}
private:
@@ -1138,8 +1165,8 @@
}
// We won't always need to set this field.
- cur_data->SetOriginalDexFileBytes(AllocateOrGetOriginalDexFileBytes());
- if (cur_data->GetOriginalDexFileBytes() == nullptr) {
+ cur_data->SetOriginalDexFile(AllocateOrGetOriginalDexFile());
+ if (cur_data->GetOriginalDexFile() == nullptr) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate array for original dex file");
@@ -1174,9 +1201,15 @@
return true;
}
-bool Redefiner::EnsureAllClassAllocationsFinished() {
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (!redef.EnsureClassAllocationsFinished()) {
+void Redefiner::RestoreObsoleteMethodMapsIfUnneeded(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ data.GetRedefinition().RestoreObsoleteMethodMapsIfUnneeded(&data);
+ }
+}
+
+bool Redefiner::EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (!data.GetRedefinition().EnsureClassAllocationsFinished(&data)) {
return false;
}
}
@@ -1249,13 +1282,9 @@
// between allocating them and pausing all threads before we can update them so we need to do a
// try loop.
if (!CheckAllRedefinitionAreValid() ||
- !EnsureAllClassAllocationsFinished() ||
+ !EnsureAllClassAllocationsFinished(holder) ||
!FinishAllRemainingAllocations(holder) ||
!CheckAllClassesAreVerified(holder)) {
- // TODO Null out the ClassExt fields we allocated (if possible, might be racing with another
- // redefineclass call which made it even bigger. Leak shouldn't be huge (2x array of size
- // declared_methods_.length) but would be good to get rid of. All other allocations should be
- // cleaned up by the GC eventually.
return result_;
}
@@ -1285,13 +1314,13 @@
art::mirror::Class* klass = data.GetMirrorClass();
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFileBytes());
+ redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFile());
}
+ RestoreObsoleteMethodMapsIfUnneeded(holder);
// TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
// are, force a full-world deoptimization before finishing redefinition. If we don't do this then
// methods that have been jitted prior to the current redefinition being applied might continue
// to use the old versions of the intrinsics!
- // TODO Shrink the obsolete method maps if possible?
// TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
// owns the DexFile and when ownership is transferred.
ReleaseAllDexFiles();
@@ -1365,7 +1394,7 @@
void Redefiner::ClassRedefinition::UpdateClass(
art::ObjPtr<art::mirror::Class> mclass,
art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- art::ObjPtr<art::mirror::ByteArray> original_dex_file) {
+ art::ObjPtr<art::mirror::Object> original_dex_file) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
const art::DexFile::ClassDef& class_def = dex_file_->GetClassDef(0);
UpdateMethods(mclass, new_dex_cache, class_def);
@@ -1379,12 +1408,38 @@
mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_.c_str())));
art::ObjPtr<art::mirror::ClassExt> ext(mclass->GetExtData());
CHECK(!ext.IsNull());
- ext->SetOriginalDexFileBytes(original_dex_file);
+ ext->SetOriginalDexFile(original_dex_file);
+}
+
+// Restores the old obsolete methods maps if it turns out they weren't needed (ie there were no new
+// obsolete methods).
+void Redefiner::ClassRedefinition::RestoreObsoleteMethodMapsIfUnneeded(
+ const RedefinitionDataIter* cur_data) {
+ art::mirror::Class* klass = GetMirrorClass();
+ art::mirror::ClassExt* ext = klass->GetExtData();
+ art::mirror::PointerArray* methods = ext->GetObsoleteMethods();
+ int32_t old_length =
+ cur_data->GetOldDexCaches() == nullptr ? 0 : cur_data->GetOldDexCaches()->GetLength();
+ int32_t expected_length =
+ old_length + klass->NumDirectMethods() + klass->NumDeclaredVirtualMethods();
+ // Check to make sure we are only undoing this one.
+ if (expected_length == methods->GetLength()) {
+ for (int32_t i = old_length; i < expected_length; i++) {
+ if (methods->GetElementPtrSize<art::ArtMethod*>(i, art::kRuntimePointerSize) != nullptr) {
+ // We actually have some new obsolete methods. Just abort since we cannot safely shrink the
+ // obsolete methods array.
+ return;
+ }
+ }
+ // No new obsolete methods! We can get rid of the maps.
+ ext->SetObsoleteArrays(cur_data->GetOldObsoleteMethods(), cur_data->GetOldDexCaches());
+ }
}
// This function does all (java) allocations we need to do for the Class being redefined.
// TODO Change this name maybe?
-bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
+bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished(
+ /*out*/RedefinitionDataIter* cur_data) {
art::StackHandleScope<2> hs(driver_->self_);
art::Handle<art::mirror::Class> klass(hs.NewHandle(
driver_->self_->DecodeJObject(klass_)->AsClass()));
@@ -1401,22 +1456,20 @@
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
return false;
}
- // Allocate the 2 arrays that make up the obsolete methods map. Since the contents of the arrays
+ // First save the old values of the 2 arrays that make up the obsolete methods maps. Then
+ // allocate the 2 arrays that make up the obsolete methods map. Since the contents of the arrays
// are only modified when all threads (other than the modifying one) are suspended we don't need
// to worry about missing the unsyncronized writes to the array. We do synchronize when setting it
// however, since that can happen at any time.
- // TODO Clear these after we walk the stacks in order to free them in the (likely?) event there
- // are no obsolete methods.
- {
- art::ObjectLock<art::mirror::ClassExt> lock(driver_->self_, ext);
- if (!ext->ExtendObsoleteArrays(
- driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
- // OOM. Clear exception and return error.
- driver_->self_->AssertPendingOOMException();
- driver_->self_->ClearException();
- RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
- return false;
- }
+ cur_data->SetOldObsoleteMethods(ext->GetObsoleteMethods());
+ cur_data->SetOldDexCaches(ext->GetObsoleteDexCaches());
+ if (!ext->ExtendObsoleteArrays(
+ driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
+ // OOM. Clear exception and return error.
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
+ return false;
}
return true;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 4313a94..5e31627 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -69,15 +69,12 @@
class RedefinitionDataIter;
// Class that can redefine a single class's methods.
-// TODO We should really make this be driven by an outside class so we can do multiple classes at
-// the same time and have less required cleanup.
class Redefiner {
public:
// Redefine the given classes with the given dex data. Note this function does not take ownership
// of the dex_data pointers. It is not used after this call however and may be freed if desired.
// The caller is responsible for freeing it. The runtime makes its own copy of the data. This
// function does not call the transformation events.
- // TODO Check modified flag of the definitions.
static jvmtiError RedefineClassesDirect(ArtJvmTiEnv* env,
art::Runtime* runtime,
art::Thread* self,
@@ -87,7 +84,6 @@
// Redefine the given classes with the given dex data. Note this function does not take ownership
// of the dex_data pointers. It is not used after this call however and may be freed if desired.
// The caller is responsible for freeing it. The runtime makes its own copy of the data.
- // TODO This function should call the transformation events.
static jvmtiError RedefineClasses(ArtJvmTiEnv* env,
EventHandler* event_handler,
art::Runtime* runtime,
@@ -99,8 +95,7 @@
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- jint data_len,
- const unsigned char* dex_data,
+ art::ArraySlice<const unsigned char> data,
std::string* error_msg);
private:
@@ -137,7 +132,7 @@
REQUIRES_SHARED(art::Locks::mutator_lock_);
// This may return nullptr with a OOME pending if allocation fails.
- art::mirror::ByteArray* AllocateOrGetOriginalDexFileBytes()
+ art::mirror::Object* AllocateOrGetOriginalDexFile()
REQUIRES_SHARED(art::Locks::mutator_lock_);
void RecordFailure(jvmtiError e, const std::string& err) {
@@ -165,9 +160,8 @@
REQUIRES_SHARED(art::Locks::mutator_lock_);
// Preallocates all needed allocations in klass so that we can pause execution safely.
- // TODO We should be able to free the arrays if they end up not being used. Investigate doing
- // this in the future. For now we will just take the memory hit.
- bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool EnsureClassAllocationsFinished(/*out*/RedefinitionDataIter* data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
// This will check that no constraints are violated (more than 1 class in dex file, any changes
// in number/declaration of methods & fields, changes in access flags, etc.)
@@ -196,7 +190,10 @@
void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- art::ObjPtr<art::mirror::ByteArray> original_dex_file)
+ art::ObjPtr<art::mirror::Object> original_dex_file)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void RestoreObsoleteMethodMapsIfUnneeded(const RedefinitionDataIter* cur_data)
REQUIRES(art::Locks::mutator_lock_);
void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -219,7 +216,6 @@
// mirror::Class difficult and confusing.
std::string* error_msg_;
- // TODO Maybe change jclass to a mirror::Class
Redefiner(art::Runtime* runtime,
art::Thread* self,
std::string* error_msg)
@@ -236,17 +232,21 @@
/*out*/std::string* error_msg)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- // TODO Put on all the lock qualifiers.
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
void UnregisterAllBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ // Restores the old obsolete methods maps if it turns out they weren't needed (ie there were no
+ // new obsolete methods).
+ void RestoreObsoleteMethodMapsIfUnneeded(RedefinitionDataHolder& holder)
+ REQUIRES(art::Locks::mutator_lock_);
void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
void RecordFailure(jvmtiError result, const std::string& error_msg) {
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 788ac30..e5ff090 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -44,6 +44,7 @@
#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "obj_ptr.h"
+#include "ti_phase.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
@@ -54,6 +55,8 @@
namespace openjdkjvmti {
+art::ArtField* ThreadUtil::context_class_loader_ = nullptr;
+
struct ThreadCallback : public art::ThreadLifecycleCallback, public art::RuntimePhaseCallback {
jthread GetThreadObject(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (self->GetPeer() == nullptr) {
@@ -121,6 +124,16 @@
runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gThreadCallback);
}
+void ThreadUtil::CacheData() {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::ObjPtr<art::mirror::Class> thread_class =
+ soa.Decode<art::mirror::Class>(art::WellKnownClasses::java_lang_Thread);
+ CHECK(thread_class != nullptr);
+ context_class_loader_ = thread_class->FindDeclaredInstanceField("contextClassLoader",
+ "Ljava/lang/ClassLoader;");
+ CHECK(context_class_loader_ != nullptr);
+}
+
void ThreadUtil::Unregister() {
art::ScopedThreadStateChange stsc(art::Thread::Current(),
art::ThreadState::kWaitingForDebuggerToAttach);
@@ -146,22 +159,6 @@
return ERR(NONE);
}
-// Read the context classloader from a Java thread object. This is a lazy implementation
-// that assumes GetThreadInfo isn't called too often. If we instead cache the ArtField,
-// we will have to add synchronization as this can't be cached on startup (which is
-// potentially runtime startup).
-static art::ObjPtr<art::mirror::Object> GetContextClassLoader(art::ObjPtr<art::mirror::Object> peer)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- if (peer == nullptr) {
- return nullptr;
- }
- art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
- art::ArtField* cc_field = klass->FindDeclaredInstanceField("contextClassLoader",
- "Ljava/lang/ClassLoader;");
- CHECK(cc_field != nullptr);
- return cc_field->GetObject(peer);
-}
-
// Get the native thread. The spec says a null object denotes the current thread.
static art::Thread* GetNativeThread(jthread thread,
const art::ScopedObjectAccessAlreadyRunnable& soa)
@@ -178,6 +175,9 @@
if (info_ptr == nullptr) {
return ERR(NULL_POINTER);
}
+ if (!PhaseUtil::IsLivePhase()) {
+ return JVMTI_ERROR_WRONG_PHASE;
+ }
art::ScopedObjectAccess soa(art::Thread::Current());
@@ -217,7 +217,10 @@
}
// Context classloader.
- art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ DCHECK(context_class_loader_ != nullptr);
+ art::ObjPtr<art::mirror::Object> ccl = peer != nullptr
+ ? context_class_loader_->GetObject(peer)
+ : nullptr;
info_ptr->context_class_loader = ccl == nullptr
? nullptr
: soa.AddLocalReference<jobject>(ccl);
@@ -272,7 +275,10 @@
}
// Context classloader.
- art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ DCHECK(context_class_loader_ != nullptr);
+ art::ObjPtr<art::mirror::Object> ccl = peer != nullptr
+ ? context_class_loader_->GetObject(peer)
+ : nullptr;
info_ptr->context_class_loader = ccl == nullptr
? nullptr
: soa.AddLocalReference<jobject>(ccl);
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
index f6f93ee..c7f75d8 100644
--- a/runtime/openjdkjvmti/ti_thread.h
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -35,6 +35,10 @@
#include "jni.h"
#include "jvmti.h"
+namespace art {
+class ArtField;
+}
+
namespace openjdkjvmti {
class EventHandler;
@@ -44,6 +48,9 @@
static void Register(EventHandler* event_handler);
static void Unregister();
+ // To be called when it is safe to cache data.
+ static void CacheData();
+
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr);
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr);
@@ -60,6 +67,9 @@
jvmtiStartFunction proc,
const void* arg,
jint priority);
+
+ private:
+ static art::ArtField* context_class_loader_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index bd52cbb..15d8dd0 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -42,6 +42,7 @@
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
+#include "jvalue.h"
#include "jvmti.h"
#include "linear_alloc.h"
#include "mem_map.h"
@@ -69,17 +70,18 @@
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
+ art::ArraySlice<const unsigned char> dex_data = def.GetDexData();
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
GetJniEnv(env),
- def.klass,
- def.loader,
- def.name.c_str(),
- def.protection_domain,
- def.dex_len,
- static_cast<const unsigned char*>(def.dex_data.get()),
- &new_len,
- &new_data);
+ def.GetClass(),
+ def.GetLoader(),
+ def.GetName().c_str(),
+ def.GetProtectionDomain(),
+ static_cast<jint>(dex_data.size()),
+ &dex_data.At(0),
+ /*out*/&new_len,
+ /*out*/&new_data);
def.SetNewDexData(env, new_len, new_data);
}
return OK;
@@ -117,7 +119,7 @@
return ERR(UNMODIFIABLE_CLASS);
}
ArtClassDefinition def;
- res = FillInTransformationData(env, classes[i], &def);
+ res = def.Init(env, classes[i]);
if (res != OK) {
return res;
}
@@ -146,63 +148,4 @@
return OK;
}
-jvmtiError Transformer::GetDexDataForRetransformation(ArtJvmTiEnv* env,
- art::Handle<art::mirror::Class> klass,
- /*out*/jint* dex_data_len,
- /*out*/unsigned char** dex_data) {
- art::StackHandleScope<2> hs(art::Thread::Current());
- art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->GetExtData()));
- if (!ext.IsNull()) {
- art::Handle<art::mirror::ByteArray> orig_dex(hs.NewHandle(ext->GetOriginalDexFileBytes()));
- if (!orig_dex.IsNull()) {
- *dex_data_len = static_cast<jint>(orig_dex->GetLength());
- return CopyDataIntoJvmtiBuffer(env,
- reinterpret_cast<const unsigned char*>(orig_dex->GetData()),
- *dex_data_len,
- /*out*/dex_data);
- }
- }
- // TODO De-quicken the dex file before passing it to the agents.
- LOG(WARNING) << "Dex file is not de-quickened yet! Quickened dex instructions might be present";
- const art::DexFile& dex = klass->GetDexFile();
- *dex_data_len = static_cast<jint>(dex.Size());
- return CopyDataIntoJvmtiBuffer(env, dex.Begin(), *dex_data_len, /*out*/dex_data);
-}
-
-// TODO Move this function somewhere more appropriate.
-// Gets the data surrounding the given class.
-// TODO Make this less magical.
-jvmtiError Transformer::FillInTransformationData(ArtJvmTiEnv* env,
- jclass klass,
- ArtClassDefinition* def) {
- JNIEnv* jni_env = GetJniEnv(env);
- if (jni_env == nullptr) {
- // TODO Different error might be better?
- return ERR(INTERNAL);
- }
- art::ScopedObjectAccess soa(jni_env);
- art::StackHandleScope<3> hs(art::Thread::Current());
- art::Handle<art::mirror::Class> hs_klass(hs.NewHandle(soa.Decode<art::mirror::Class>(klass)));
- if (hs_klass.IsNull()) {
- return ERR(INVALID_CLASS);
- }
- def->klass = klass;
- def->loader = soa.AddLocalReference<jobject>(hs_klass->GetClassLoader());
- std::string descriptor_store;
- std::string descriptor(hs_klass->GetDescriptor(&descriptor_store));
- def->name = descriptor.substr(1, descriptor.size() - 2);
- // TODO is this always null?
- def->protection_domain = nullptr;
- if (def->dex_data.get() == nullptr) {
- unsigned char* new_data;
- jvmtiError res = GetDexDataForRetransformation(env, hs_klass, &def->dex_len, &new_data);
- if (res == OK) {
- def->dex_data = MakeJvmtiUniquePtr(env, new_data);
- } else {
- return res;
- }
- }
- return OK;
-}
-
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index c6a36e8..ba40e04 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -61,18 +61,6 @@
jint class_count,
const jclass* classes,
/*out*/std::string* error_msg);
-
- // Gets the data surrounding the given class.
- static jvmtiError FillInTransformationData(ArtJvmTiEnv* env,
- jclass klass,
- ArtClassDefinition* def);
-
- private:
- static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
- art::Handle<art::mirror::Class> klass,
- /*out*/jint* dex_data_length,
- /*out*/unsigned char** dex_data)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
};
} // namespace openjdkjvmti
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 9113f83..0784e59 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -96,7 +96,7 @@
// .WithType<std::vector<ti::Agent>>().AppendValues()
// .IntoKey(M::AgentLib)
.Define("-agentpath:_")
- .WithType<std::vector<ti::Agent>>().AppendValues()
+ .WithType<std::list<ti::Agent>>().AppendValues()
.IntoKey(M::AgentPath)
.Define("-Xms_")
.WithType<MemoryKiB>()
@@ -708,6 +708,7 @@
UsageMessage(stream, " -Xps-min-classes-to-save:integervalue\n");
UsageMessage(stream, " -Xps-min-notification-before-wake:integervalue\n");
UsageMessage(stream, " -Xps-max-notification-before-wake:integervalue\n");
+ UsageMessage(stream, " -Xps-profile-path:file-path\n");
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
diff --git a/runtime/primitive.cc b/runtime/primitive.cc
index 2380284..1ec345a 100644
--- a/runtime/primitive.cc
+++ b/runtime/primitive.cc
@@ -44,7 +44,7 @@
"Ljava/lang/Void;",
};
-#define COUNT_OF(x) (sizeof(x) / sizeof(x[0]))
+#define COUNT_OF(x) (sizeof(x) / sizeof((x)[0]))
const char* Primitive::PrettyDescriptor(Primitive::Type type) {
static_assert(COUNT_OF(kTypeNames) == static_cast<size_t>(Primitive::kPrimLast) + 1,
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index e254dfe..2f70ded 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -509,7 +509,6 @@
};
TEST_F(ReflectionTest, StaticMainMethod) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
StackHandleScope<1> hs(soa.Self());
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 13370a0..e563027 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -269,13 +269,6 @@
UnloadNativeBridge();
}
- if (dump_gc_performance_on_shutdown_) {
- // This can't be called from the Heap destructor below because it
- // could call RosAlloc::InspectAll() which needs the thread_list
- // to be still alive.
- heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
- }
-
Thread* self = Thread::Current();
const bool attach_shutdown_thread = self == nullptr;
if (attach_shutdown_thread) {
@@ -285,6 +278,13 @@
LOG(WARNING) << "Current thread not detached in Runtime shutdown";
}
+ if (dump_gc_performance_on_shutdown_) {
+ // This can't be called from the Heap destructor below because it
+ // could call RosAlloc::InspectAll() which needs the thread_list
+ // to be still alive.
+ heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
+ }
+
if (jit_ != nullptr) {
// Stop the profile saver thread before marking the runtime as shutting down.
// The saver will try to dump the profiles before being sopped and that
@@ -806,11 +806,11 @@
// before fork aren't attributed to an app.
heap_->ResetGcPerformanceInfo();
-
- if (!is_system_server &&
+ // We may want to collect profiling samples for system server, but we never want to JIT there.
+ if ((!is_system_server || !jit_options_->UseJitCompilation()) &&
!safe_mode_ &&
(jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
- jit_.get() == nullptr) {
+ jit_ == nullptr) {
// Note that when running ART standalone (not zygote, nor zygote fork),
// the jit may have already been created.
CreateJit();
@@ -2155,6 +2155,19 @@
jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
if (jit_.get() == nullptr) {
LOG(WARNING) << "Failed to create JIT " << error_msg;
+ return;
+ }
+
+ // In case we have a profile path passed as a command line argument,
+ // register the current class path for profiling now. Note that we cannot do
+ // this before we create the JIT and having it here is the most convenient way.
+ // This is used when testing profiles with dalvikvm command as there is no
+ // framework to register the dex files for profiling.
+ if (jit_options_->GetSaveProfilingInfo() &&
+ !jit_options_->GetProfileSaverOptions().GetProfilePath().empty()) {
+ std::vector<std::string> dex_filenames;
+ Split(class_path_string_, ':', &dex_filenames);
+ RegisterAppInfo(dex_filenames, jit_options_->GetProfileSaverOptions().GetProfilePath());
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d244a9b..20db628 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -661,6 +661,10 @@
void InitThreadGroups(Thread* self);
+ void SetDumpGCPerformanceOnShutdown(bool value) {
+ dump_gc_performance_on_shutdown_ = value;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -733,7 +737,7 @@
std::string class_path_string_;
std::vector<std::string> properties_;
- std::vector<ti::Agent> agents_;
+ std::list<ti::Agent> agents_;
std::vector<Plugin> plugins_;
// The default stack size for managed threads created by the runtime.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index e68a1b2..16190cd 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -120,8 +120,8 @@
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{...}
-RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
-RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
+RUNTIME_OPTIONS_KEY (std::list<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
+RUNTIME_OPTIONS_KEY (std::list<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library>
// Not parse-able from command line, but can be provided explicitly.
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 674459d..0b7ea2f 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -32,6 +32,7 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "gc/heap.h"
+#include "jit/profile_saver.h"
#include "os.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
@@ -154,8 +155,9 @@
}
void SignalCatcher::HandleSigUsr1() {
- LOG(INFO) << "SIGUSR1 forcing GC (no HPROF)";
+ LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save";
Runtime::Current()->GetHeap()->CollectGarbage(false);
+ ProfileSaver::ForceProcessProfiles();
}
int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) {
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 0628643..333128b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -649,7 +649,7 @@
}
const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
+ if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) {
return;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index 5c9614a..bdaa4c3 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -430,8 +430,15 @@
private:
ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
- : link_(link), method_(method), result_register_(nullptr), dex_pc_ptr_(nullptr),
- code_item_(nullptr), number_of_vregs_(num_vregs), dex_pc_(dex_pc) {
+ : link_(link),
+ method_(method),
+ result_register_(nullptr),
+ dex_pc_ptr_(nullptr),
+ code_item_(nullptr),
+ number_of_vregs_(num_vregs),
+ dex_pc_(dex_pc),
+ cached_hotness_countdown_(0),
+ hotness_countdown_(0) {
// TODO(iam): Remove this parameter, it's an an artifact of portable removal
DCHECK(has_reference_array);
if (has_reference_array) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index ffa17c9..a224986 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -688,7 +688,13 @@
class StackMapEncoding {
public:
- StackMapEncoding() {}
+ StackMapEncoding()
+ : dex_pc_bit_offset_(0),
+ dex_register_map_bit_offset_(0),
+ inline_info_bit_offset_(0),
+ register_mask_index_bit_offset_(0),
+ stack_mask_index_bit_offset_(0),
+ total_bit_size_(0) {}
// Set stack map bit layout based on given sizes.
// Returns the size of stack map in bits.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 30a4046..008c388 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -16,6 +16,10 @@
#include "thread.h"
+#if !defined(__APPLE__)
+#include <sched.h>
+#endif
+
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
@@ -1591,8 +1595,21 @@
if (thread != nullptr) {
int policy;
sched_param sp;
+#if !defined(__APPLE__)
+ // b/36445592 Don't use pthread_getschedparam since pthread may have exited.
+ policy = sched_getscheduler(tid);
+ if (policy == -1) {
+ PLOG(WARNING) << "sched_getscheduler(" << tid << ")";
+ }
+ int sched_getparam_result = sched_getparam(tid, &sp);
+ if (sched_getparam_result == -1) {
+ PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)";
+ sp.sched_priority = -1;
+ }
+#else
CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
__FUNCTION__);
+#endif
os << " sched=" << policy << "/" << sp.sched_priority
<< " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index caed369..8d72fe8 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -736,7 +736,7 @@
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
if ((errno != EAGAIN) && (errno != EINTR)) {
if (errno == ETIMEDOUT) {
- LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
+ LOG(::android::base::FATAL)
<< "Timed out waiting for threads to suspend, waited for "
<< PrettyDuration(NanoTime() - start_time);
} else {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7aa98cd..0333fe8 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -162,7 +162,7 @@
FieldValueKind kind;
bool is_volatile;
- FieldValue() = default;
+ FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {}
FieldValue(FieldValue&& log) = default;
private:
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 97c1228..9206292 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -508,7 +508,7 @@
dex::StringIndex string_idx = dex_file->GetIndexForStringId(*string_id);
ASSERT_TRUE(string_idx.IsValid());
// String should only get resolved by the initializer.
- EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get()) == nullptr);
EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
// Do the transaction, then roll back.
Transaction transaction;
@@ -518,7 +518,7 @@
ASSERT_TRUE(h_klass->IsInitialized());
// Make sure the string got resolved by the transaction.
{
- mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache);
+ mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get());
ASSERT_TRUE(s != nullptr);
EXPECT_STREQ(s->ToModifiedUtf8().c_str(), kResolvedString);
EXPECT_EQ(s, h_dex_cache->GetResolvedString(string_idx));
@@ -526,7 +526,7 @@
Runtime::Current()->ExitTransactionMode();
transaction.Rollback();
// Check that the string did not stay resolved.
- EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get()) == nullptr);
EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
ASSERT_FALSE(h_klass->IsInitialized());
ASSERT_FALSE(soa.Self()->IsExceptionPending());
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 6a20eaf..8d216ce 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -52,77 +52,6 @@
using android::base::StringAppendF;
using android::base::StringPrintf;
-static const uint8_t kBase64Map[256] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
- 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
- 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
- 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
- 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255
-};
-
-uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
- std::vector<uint8_t> tmp;
- uint32_t t = 0, y = 0;
- int g = 3;
- for (size_t i = 0; src[i] != '\0'; ++i) {
- uint8_t c = kBase64Map[src[i] & 0xFF];
- if (c == 255) continue;
- // the final = symbols are read and used to trim the remaining bytes
- if (c == 254) {
- c = 0;
- // prevent g < 0 which would potentially allow an overflow later
- if (--g < 0) {
- *dst_size = 0;
- return nullptr;
- }
- } else if (g != 3) {
- // we only allow = to be at the end
- *dst_size = 0;
- return nullptr;
- }
- t = (t << 6) | c;
- if (++y == 4) {
- tmp.push_back((t >> 16) & 255);
- if (g > 1) {
- tmp.push_back((t >> 8) & 255);
- }
- if (g > 2) {
- tmp.push_back(t & 255);
- }
- y = t = 0;
- }
- }
- if (y != 0) {
- *dst_size = 0;
- return nullptr;
- }
- std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
- if (dst_size != nullptr) {
- *dst_size = tmp.size();
- } else {
- *dst_size = 0;
- }
- std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst.release();
-}
-
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
diff --git a/runtime/utils.h b/runtime/utils.h
index 24fd205..2011d9e 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -70,8 +70,6 @@
return intp & 0xFFFFFFFFU;
}
-uint8_t* DecodeBase64(const char* src, size_t* dst_size);
-
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 2481c8b..9ff104b 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -120,4 +120,30 @@
}
}
+bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::string* error_msg) {
+ size_t i = 0;
+ for (const uint8_t* dex_file_start = GetNextDexFileData(nullptr);
+ dex_file_start != nullptr;
+ dex_file_start = GetNextDexFileData(dex_file_start), ++i) {
+ size_t size = reinterpret_cast<const DexFile::Header*>(dex_file_start)->file_size_;
+ // TODO: Supply the location information for a vdex file.
+ static constexpr char kVdexLocation[] = "";
+ std::string location = DexFile::GetMultiDexLocation(i, kVdexLocation);
+ std::unique_ptr<const DexFile> dex(DexFile::Open(dex_file_start,
+ size,
+ location,
+ GetLocationChecksum(i),
+ nullptr /*oat_dex_file*/,
+ false /*verify*/,
+ false /*verify_checksum*/,
+ error_msg));
+ if (dex == nullptr) {
+ return false;
+ }
+ dex_files->push_back(std::move(dex));
+ }
+ return true;
+}
+
} // namespace art
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 898d07d..9840555 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -27,6 +27,8 @@
namespace art {
+class DexFile;
+
// VDEX files contain extracted DEX files. The VdexFile class maps the file to
// memory and provides tools for accessing its individual sections.
//
@@ -122,6 +124,12 @@
return reinterpret_cast<const uint32_t*>(Begin() + sizeof(Header))[dex_file_index];
}
+ // Opens all the dex files contained in this vdex file. This is currently
+ // used for dumping tools only, and has not been tested for use by the
+ // remainder of the runtime.
+ bool OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::string* error_msg);
+
private:
explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 54cce98..5aef062 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -34,7 +34,6 @@
namespace art {
-jclass WellKnownClasses::com_android_dex_Dex;
jclass WellKnownClasses::dalvik_annotation_optimization_CriticalNative;
jclass WellKnownClasses::dalvik_annotation_optimization_FastNative;
jclass WellKnownClasses::dalvik_system_BaseDexClassLoader;
@@ -267,7 +266,6 @@
#undef STRING_INIT_LIST
void WellKnownClasses::Init(JNIEnv* env) {
- com_android_dex_Dex = CacheClass(env, "com/android/dex/Dex");
dalvik_annotation_optimization_CriticalNative =
CacheClass(env, "dalvik/annotation/optimization/CriticalNative");
dalvik_annotation_optimization_FastNative = CacheClass(env, "dalvik/annotation/optimization/FastNative");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index af4dbbf..c184731 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -44,7 +44,6 @@
static ObjPtr<mirror::Class> ToClass(jclass global_jclass) REQUIRES_SHARED(Locks::mutator_lock_);
- static jclass com_android_dex_Dex;
static jclass dalvik_annotation_optimization_CriticalNative;
static jclass dalvik_annotation_optimization_FastNative;
static jclass dalvik_system_BaseDexClassLoader;
diff --git a/sigchainlib/OWNERS b/sigchainlib/OWNERS
new file mode 100644
index 0000000..450fc12
--- /dev/null
+++ b/sigchainlib/OWNERS
@@ -0,0 +1,4 @@
+# Default maintainers and code reviewers:
+jmgao@google.com
+dimitry@google.com
+sehr@google.com
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index c1efecd..cc1b78d 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -22,81 +22,45 @@
#endif
#include <dlfcn.h>
+#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
+
+#include <initializer_list>
+#include <utility>
#include "sigchain.h"
#if defined(__APPLE__)
#define _NSIG NSIG
#define sighandler_t sig_t
+
+// Darwin has an #error when ucontext.h is included without _XOPEN_SOURCE defined.
+#define _XOPEN_SOURCE
#endif
-namespace art {
+#include <ucontext.h>
-typedef int (*SigActionFnPtr)(int, const struct sigaction*, struct sigaction*);
+// libsigchain provides an interception layer for signal handlers, to allow ART and others to give
+// their signal handlers the first stab at handling signals before passing them on to user code.
+//
+// It implements wrapper functions for signal, sigaction, and sigprocmask, and a handler that
+// forwards signals appropriately.
+//
+// In our handler, we start off with all signals blocked, fetch the original signal mask from the
+// passed in ucontext, and then adjust our signal mask appropriately for the user handler.
+//
+// It's somewhat tricky for us to properly handle some flag cases:
+// SA_NOCLDSTOP and SA_NOCLDWAIT: shouldn't matter, we don't have special handlers for SIGCHLD.
+// SA_NODEFER: unimplemented, we can manually change the signal mask appropriately.
+// ~SA_ONSTACK: always silently enable this
+// SA_RESETHAND: unimplemented, but we can probably do this?
+// ~SA_RESTART: unimplemented, maybe we can reserve an RT signal, register an empty handler that
+// doesn't have SA_RESTART, and raise the signal to avoid restarting syscalls that are
+// expected to be interrupted?
-class SignalAction {
- public:
- SignalAction() : claimed_(false), uses_old_style_(false), special_handler_(nullptr) {
- }
-
- // Claim the signal and keep the action specified.
- void Claim(const struct sigaction& action) {
- action_ = action;
- claimed_ = true;
- }
-
- // Unclaim the signal and restore the old action.
- void Unclaim(int signal) {
- claimed_ = false;
- sigaction(signal, &action_, nullptr); // Restore old action.
- }
-
- // Get the action associated with this signal.
- const struct sigaction& GetAction() const {
- return action_;
- }
-
- // Is the signal claimed?
- bool IsClaimed() const {
- return claimed_;
- }
-
- // Change the recorded action to that specified.
- // If oldstyle is true then this action is from an older style signal()
- // call as opposed to sigaction(). In this case the sa_handler is
- // used when invoking the user's handler.
- void SetAction(const struct sigaction& action, bool oldstyle) {
- action_ = action;
- uses_old_style_ = oldstyle;
- }
-
- bool OldStyle() const {
- return uses_old_style_;
- }
-
- void SetSpecialHandler(SpecialSignalHandlerFn fn) {
- special_handler_ = fn;
- }
-
- SpecialSignalHandlerFn GetSpecialHandler() {
- return special_handler_;
- }
-
- private:
- struct sigaction action_; // Action to be performed.
- bool claimed_; // Whether signal is claimed or not.
- bool uses_old_style_; // Action is created using signal(). Use sa_handler.
- SpecialSignalHandlerFn special_handler_; // A special handler executed before user handlers.
-};
-
-// User's signal handlers
-static SignalAction user_sigactions[_NSIG];
-static bool initialized;
-static void* linked_sigaction_sym;
-static void* linked_sigprocmask_sym;
static void log(const char* format, ...) {
char buf[256];
@@ -111,102 +75,186 @@
va_end(ap);
}
-static void CheckSignalValid(int signal) {
- if (signal <= 0 || signal >= _NSIG) {
- log("Invalid signal %d", signal);
- abort();
- }
-}
+#define fatal(...) log(__VA_ARGS__); abort()
-// Sigchainlib's own handler so we can ensure a managed handler is called first even if nobody
-// claimed a chain. Simply forward to InvokeUserSignalHandler.
-static void sigchainlib_managed_handler_sigaction(int sig, siginfo_t* info, void* context) {
- InvokeUserSignalHandler(sig, info, context);
-}
-
-// Claim a signal chain for a particular signal.
-extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
- CheckSignalValid(signal);
-
- user_sigactions[signal].Claim(*oldaction);
-}
-
-extern "C" void UnclaimSignalChain(int signal) {
- CheckSignalValid(signal);
-
- user_sigactions[signal].Unclaim(signal);
-}
-
-// Invoke the user's signal handler.
-extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
- // Check the arguments.
- CheckSignalValid(sig);
-
- // The signal must have been claimed in order to get here. Check it.
- if (!user_sigactions[sig].IsClaimed()) {
- abort();
- }
-
- // Do we have a managed handler? If so, run it first.
- SpecialSignalHandlerFn managed = user_sigactions[sig].GetSpecialHandler();
- if (managed != nullptr) {
- sigset_t mask, old_mask;
- sigfillset(&mask);
- sigprocmask(SIG_BLOCK, &mask, &old_mask);
- // Call the handler. If it succeeds, we're done.
- if (managed(sig, info, context)) {
- sigprocmask(SIG_SETMASK, &old_mask, nullptr);
- return;
+static int sigorset(sigset_t* dest, sigset_t* left, sigset_t* right) {
+ sigemptyset(dest);
+ for (size_t i = 0; i < sizeof(sigset_t) * CHAR_BIT; ++i) {
+ if (sigismember(left, i) == 1 || sigismember(right, i) == 1) {
+ sigaddset(dest, i);
}
- sigprocmask(SIG_SETMASK, &old_mask, nullptr);
+ }
+ return 0;
+}
+
+namespace art {
+
+static decltype(&sigaction) linked_sigaction;
+static decltype(&sigprocmask) linked_sigprocmask;
+__thread bool handling_signal;
+
+class SignalChain {
+ public:
+ SignalChain() : claimed_(false) {
}
- const struct sigaction& action = user_sigactions[sig].GetAction();
- if (user_sigactions[sig].OldStyle()) {
- if (action.sa_handler != nullptr) {
- action.sa_handler(sig);
- } else {
- signal(sig, SIG_DFL);
- raise(sig);
+ bool IsClaimed() {
+ return claimed_;
+ }
+
+ void Claim(int signo) {
+ if (!claimed_) {
+ Register(signo);
+ claimed_ = true;
}
+ }
+
+ // Register the signal chain with the kernel if needed.
+ void Register(int signo) {
+ struct sigaction handler_action = {};
+ handler_action.sa_sigaction = SignalChain::Handler;
+ handler_action.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
+ sigfillset(&handler_action.sa_mask);
+ linked_sigaction(signo, &handler_action, &action_);
+ }
+
+ void SetAction(const struct sigaction* action) {
+ action_ = *action;
+ }
+
+ struct sigaction GetAction() {
+ return action_;
+ }
+
+ void AddSpecialHandler(SpecialSignalHandlerFn fn) {
+ for (SpecialSignalHandlerFn& slot : special_handlers_) {
+ if (slot == nullptr) {
+ slot = fn;
+ return;
+ }
+ }
+
+ fatal("too many special signal handlers");
+ }
+
+ void RemoveSpecialHandler(SpecialSignalHandlerFn fn) {
+ // This isn't thread safe, but it's unlikely to be a real problem.
+ size_t len = sizeof(special_handlers_)/sizeof(*special_handlers_);
+ for (size_t i = 0; i < len; ++i) {
+ if (special_handlers_[i] == fn) {
+ for (size_t j = i; j < len - 1; ++j) {
+ special_handlers_[j] = special_handlers_[j + 1];
+ }
+ special_handlers_[len - 1] = nullptr;
+ return;
+ }
+ }
+
+ fatal("failed to find special handler to remove");
+ }
+
+
+ static void Handler(int signo, siginfo_t* siginfo, void*);
+
+ private:
+ bool claimed_;
+ struct sigaction action_;
+ SpecialSignalHandlerFn special_handlers_[2];
+};
+
+static SignalChain chains[_NSIG];
+
+class ScopedFlagRestorer {
+ public:
+ explicit ScopedFlagRestorer(bool* flag) : flag_(flag), original_value_(*flag) {
+ }
+
+ ~ScopedFlagRestorer() {
+ *flag_ = original_value_;
+ }
+
+ private:
+ bool* flag_;
+ bool original_value_;
+};
+
+class ScopedSignalUnblocker {
+ public:
+ explicit ScopedSignalUnblocker(const std::initializer_list<int>& signals) {
+ sigset_t new_mask;
+ sigemptyset(&new_mask);
+ for (int signal : signals) {
+ sigaddset(&new_mask, signal);
+ }
+ if (sigprocmask(SIG_UNBLOCK, &new_mask, &previous_mask_) != 0) {
+ fatal("failed to unblock signals: %s", strerror(errno));
+ }
+ }
+
+ ~ScopedSignalUnblocker() {
+ if (sigprocmask(SIG_SETMASK, &previous_mask_, nullptr) != 0) {
+ fatal("failed to unblock signals: %s", strerror(errno));
+ }
+ }
+
+ private:
+ sigset_t previous_mask_;
+};
+
+void SignalChain::Handler(int signo, siginfo_t* siginfo, void* ucontext_raw) {
+ ScopedFlagRestorer flag(&handling_signal);
+
+ // Try the special handlers first.
+ // If one of them crashes, we'll reenter this handler and pass that crash onto the user handler.
+ if (!handling_signal) {
+ ScopedSignalUnblocker unblocked { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV }; // NOLINT
+ handling_signal = true;
+
+ for (const auto& handler : chains[signo].special_handlers_) {
+ if (handler != nullptr && handler(signo, siginfo, ucontext_raw)) {
+ return;
+ }
+ }
+ }
+
+ // Forward to the user's signal handler.
+ int handler_flags = chains[signo].action_.sa_flags;
+ ucontext_t* ucontext = static_cast<ucontext_t*>(ucontext_raw);
+ sigset_t mask;
+ sigorset(&mask, &ucontext->uc_sigmask, &chains[signo].action_.sa_mask);
+ if ((handler_flags & SA_NODEFER)) {
+ sigdelset(&mask, signo);
+ }
+ sigprocmask(SIG_SETMASK, &mask, nullptr);
+
+ if ((handler_flags & SA_SIGINFO)) {
+ chains[signo].action_.sa_sigaction(signo, siginfo, ucontext_raw);
} else {
- if (action.sa_sigaction != nullptr) {
- sigset_t old_mask;
- sigprocmask(SIG_BLOCK, &action.sa_mask, &old_mask);
- action.sa_sigaction(sig, info, context);
- sigprocmask(SIG_SETMASK, &old_mask, nullptr);
+ auto handler = chains[signo].action_.sa_handler;
+ if (handler == SIG_IGN) {
+ return;
+ } else if (handler == SIG_DFL) {
+ fatal("exiting due to SIG_DFL handler for signal %d", signo);
} else {
- signal(sig, SIG_DFL);
- raise(sig);
+ handler(signo);
}
}
}
-extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action) {
- CheckSignalValid(signal);
- // Read the current action without looking at the chain, it should be the expected action.
- SigActionFnPtr linked_sigaction = reinterpret_cast<SigActionFnPtr>(linked_sigaction_sym);
- struct sigaction current_action;
- linked_sigaction(signal, nullptr, ¤t_action);
- // If the sigactions don't match then we put the current action on the chain and make ourself as
- // the main action.
- if (current_action.sa_sigaction != expected_action->sa_sigaction) {
- log("Warning: Unexpected sigaction action found %p\n", current_action.sa_sigaction);
- user_sigactions[signal].Claim(current_action);
- linked_sigaction(signal, expected_action, nullptr);
- }
-}
-
extern "C" int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
// If this signal has been claimed as a signal chain, record the user's
// action but don't pass it on to the kernel.
// Note that we check that the signal number is in range here. An out of range signal
// number should behave exactly as the libc sigaction.
- if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed() &&
- (new_action == nullptr || new_action->sa_handler != SIG_DFL)) {
- struct sigaction saved_action = user_sigactions[signal].GetAction();
+ if (signal < 0 || signal >= _NSIG) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (chains[signal].IsClaimed()) {
+ struct sigaction saved_action = chains[signal].GetAction();
if (new_action != nullptr) {
- user_sigactions[signal].SetAction(*new_action, false);
+ chains[signal].SetAction(new_action);
}
if (old_action != nullptr) {
*old_action = saved_action;
@@ -216,73 +264,52 @@
// Will only get here if the signal chain has not been claimed. We want
// to pass the sigaction on to the kernel via the real sigaction in libc.
-
- if (linked_sigaction_sym == nullptr) {
- // Perform lazy initialization.
- // This will only occur outside of a signal context since we have
- // not been initialized and therefore cannot be within the ART
- // runtime.
- InitializeSignalChain();
- }
-
- if (linked_sigaction_sym == nullptr) {
- log("Unable to find next sigaction in signal chain");
- abort();
- }
- SigActionFnPtr linked_sigaction = reinterpret_cast<SigActionFnPtr>(linked_sigaction_sym);
+ InitializeSignalChain();
return linked_sigaction(signal, new_action, old_action);
}
-static sighandler_t signal_impl(int signal, sighandler_t handler) {
- struct sigaction sa;
+extern "C" sighandler_t signal(int signo, sighandler_t handler) {
+ if (signo < 0 || signo > _NSIG) {
+ errno = EINVAL;
+ return SIG_ERR;
+ }
+
+ struct sigaction sa = {};
sigemptyset(&sa.sa_mask);
sa.sa_handler = handler;
- sa.sa_flags = SA_RESTART;
+ sa.sa_flags = SA_RESTART | SA_ONSTACK;
sighandler_t oldhandler;
// If this signal has been claimed as a signal chain, record the user's
// action but don't pass it on to the kernel.
- // Note that we check that the signal number is in range here. An out of range signal
- // number should behave exactly as the libc sigaction.
- if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed() && handler != SIG_DFL) {
- oldhandler = reinterpret_cast<sighandler_t>(user_sigactions[signal].GetAction().sa_handler);
- user_sigactions[signal].SetAction(sa, true);
+ if (chains[signo].IsClaimed()) {
+ oldhandler = reinterpret_cast<sighandler_t>(chains[signo].GetAction().sa_handler);
+ chains[signo].SetAction(&sa);
return oldhandler;
}
// Will only get here if the signal chain has not been claimed. We want
// to pass the sigaction on to the kernel via the real sigaction in libc.
-
- if (linked_sigaction_sym == nullptr) {
- // Perform lazy initialization.
- InitializeSignalChain();
- }
-
- if (linked_sigaction_sym == nullptr) {
- log("Unable to find next sigaction in signal chain");
- abort();
- }
-
- typedef int (*SigAction)(int, const struct sigaction*, struct sigaction*);
- SigAction linked_sigaction = reinterpret_cast<SigAction>(linked_sigaction_sym);
- if (linked_sigaction(signal, &sa, &sa) == -1) {
+ InitializeSignalChain();
+ if (linked_sigaction(signo, &sa, &sa) == -1) {
return SIG_ERR;
}
return reinterpret_cast<sighandler_t>(sa.sa_handler);
}
-extern "C" sighandler_t signal(int signal, sighandler_t handler) {
- return signal_impl(signal, handler);
-}
-
#if !defined(__LP64__)
-extern "C" sighandler_t bsd_signal(int signal, sighandler_t handler) {
- return signal_impl(signal, handler);
+extern "C" sighandler_t bsd_signal(int signo, sighandler_t handler) {
+ return signal(signo, handler);
}
#endif
extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
+ // When inside a signal handler, forward directly to the actual sigprocmask.
+ if (handling_signal) {
+ return linked_sigprocmask(how, bionic_new_set, bionic_old_set);
+ }
+
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
if (bionic_new_set != nullptr) {
@@ -292,7 +319,7 @@
// Don't allow claimed signals in the mask. If a signal chain has been claimed
// we can't allow the user to block that signal.
for (int i = 0 ; i < _NSIG; ++i) {
- if (user_sigactions[i].IsClaimed() && sigismember(&tmpset, i)) {
+ if (chains[i].IsClaimed() && sigismember(&tmpset, i)) {
sigdelset(&tmpset, i);
}
}
@@ -300,18 +327,7 @@
new_set_ptr = &tmpset;
}
- if (linked_sigprocmask_sym == nullptr) {
- // Perform lazy initialization.
- InitializeSignalChain();
- }
-
- if (linked_sigprocmask_sym == nullptr) {
- log("Unable to find next sigprocmask in signal chain");
- abort();
- }
-
- typedef int (*SigProcMask)(int how, const sigset_t*, sigset_t*);
- SigProcMask linked_sigprocmask= reinterpret_cast<SigProcMask>(linked_sigprocmask_sym);
+ InitializeSignalChain();
return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
}
@@ -322,49 +338,67 @@
// taken and if it so happens that a signal occurs while one of these
// locks is already taken, dlsym will block trying to reenter a
// mutex and we will never get out of it.
+ static bool initialized = false;
if (initialized) {
// Don't initialize twice.
return;
}
- linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
+
+ void* linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
if (linked_sigaction_sym == nullptr) {
linked_sigaction_sym = dlsym(RTLD_DEFAULT, "sigaction");
if (linked_sigaction_sym == nullptr ||
linked_sigaction_sym == reinterpret_cast<void*>(sigaction)) {
- linked_sigaction_sym = nullptr;
+ fatal("Unable to find next sigaction in signal chain");
}
}
- linked_sigprocmask_sym = dlsym(RTLD_NEXT, "sigprocmask");
+ void* linked_sigprocmask_sym = dlsym(RTLD_NEXT, "sigprocmask");
if (linked_sigprocmask_sym == nullptr) {
linked_sigprocmask_sym = dlsym(RTLD_DEFAULT, "sigprocmask");
if (linked_sigprocmask_sym == nullptr ||
linked_sigprocmask_sym == reinterpret_cast<void*>(sigprocmask)) {
- linked_sigprocmask_sym = nullptr;
+ fatal("Unable to find next sigprocmask in signal chain");
}
}
+
+ linked_sigaction = reinterpret_cast<decltype(linked_sigaction)>(linked_sigaction_sym);
+ linked_sigprocmask = reinterpret_cast<decltype(linked_sigprocmask)>(linked_sigprocmask_sym);
initialized = true;
}
-extern "C" void SetSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) {
- CheckSignalValid(signal);
+extern "C" void AddSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) {
+ if (signal <= 0 || signal >= _NSIG) {
+ fatal("Invalid signal %d", signal);
+ }
// Set the managed_handler.
- user_sigactions[signal].SetSpecialHandler(fn);
+ chains[signal].AddSpecialHandler(fn);
+ chains[signal].Claim(signal);
+}
- // In case the chain isn't claimed, claim it for ourself so we can ensure the managed handler
- // goes first.
- if (!user_sigactions[signal].IsClaimed()) {
- struct sigaction act, old_act;
- act.sa_sigaction = sigchainlib_managed_handler_sigaction;
- sigemptyset(&act.sa_mask);
- act.sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__APPLE__) && !defined(__mips__)
- act.sa_restorer = nullptr;
-#endif
- if (sigaction(signal, &act, &old_act) != -1) {
- user_sigactions[signal].Claim(old_act);
- }
+extern "C" void RemoveSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) {
+ if (signal <= 0 || signal >= _NSIG) {
+ fatal("Invalid signal %d", signal);
+ }
+
+ chains[signal].RemoveSpecialHandler(fn);
+}
+
+extern "C" void EnsureFrontOfChain(int signal) {
+ if (signal <= 0 || signal >= _NSIG) {
+ fatal("Invalid signal %d", signal);
+ }
+
+ // Read the current action without looking at the chain, it should be the expected action.
+ struct sigaction current_action;
+ InitializeSignalChain();
+ linked_sigaction(signal, nullptr, ¤t_action);
+ // If the sigactions don't match then we put the current action on the chain and make ourself as
+ // the main action.
+ if (current_action.sa_sigaction != SignalChain::Handler) {
+ log("Warning: Unexpected sigaction action found %p\n", current_action.sa_sigaction);
+ chains[signal].Register(signal);
}
}
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
index 01ccedf..960d221 100644
--- a/sigchainlib/sigchain.h
+++ b/sigchainlib/sigchain.h
@@ -23,16 +23,11 @@
extern "C" void InitializeSignalChain();
-extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction);
-
-extern "C" void UnclaimSignalChain(int signal);
-
typedef bool (*SpecialSignalHandlerFn)(int, siginfo_t*, void*);
-extern "C" void SetSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn);
+extern "C" void AddSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn);
+extern "C" void RemoveSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn);
-extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
-
-extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action);
+extern "C" void EnsureFrontOfChain(int signal);
} // namespace art
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index aa3c360..d6a5e12 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -48,37 +48,23 @@
namespace art {
-
-extern "C" void ClaimSignalChain(int signal ATTRIBUTE_UNUSED,
- struct sigaction* oldaction ATTRIBUTE_UNUSED) {
- log("ClaimSignalChain is not exported by the main executable.");
- abort();
-}
-
-extern "C" void UnclaimSignalChain(int signal ATTRIBUTE_UNUSED) {
- log("UnclaimSignalChain is not exported by the main executable.");
- abort();
-}
-
-extern "C" void InvokeUserSignalHandler(int sig ATTRIBUTE_UNUSED,
- siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- log("InvokeUserSignalHandler is not exported by the main executable.");
- abort();
-}
-
extern "C" void InitializeSignalChain() {
log("InitializeSignalChain is not exported by the main executable.");
abort();
}
-extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED,
- struct sigaction* expected_action ATTRIBUTE_UNUSED) {
+extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED) {
log("EnsureFrontOfChain is not exported by the main executable.");
abort();
}
-extern "C" void SetSpecialSignalHandlerFn(int signal ATTRIBUTE_UNUSED,
+extern "C" void AddSpecialSignalHandlerFn(int signal ATTRIBUTE_UNUSED,
+ SpecialSignalHandlerFn fn ATTRIBUTE_UNUSED) {
+ log("SetSpecialSignalHandlerFn is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void RemoveSpecialSignalHandlerFn(int signal ATTRIBUTE_UNUSED,
SpecialSignalHandlerFn fn ATTRIBUTE_UNUSED) {
log("SetSpecialSignalHandlerFn is not exported by the main executable.");
abort();
diff --git a/sigchainlib/version-script32.txt b/sigchainlib/version-script32.txt
index eec9103..f360efa 100644
--- a/sigchainlib/version-script32.txt
+++ b/sigchainlib/version-script32.txt
@@ -1,11 +1,9 @@
{
global:
- ClaimSignalChain;
- UnclaimSignalChain;
- InvokeUserSignalHandler;
InitializeSignalChain;
EnsureFrontOfChain;
- SetSpecialSignalHandlerFn;
+ AddSpecialSignalHandlerFn;
+ RemoveSpecialSignalHandlerFn;
bsd_signal;
sigaction;
signal;
diff --git a/sigchainlib/version-script64.txt b/sigchainlib/version-script64.txt
index 08c312e..319d1c6 100644
--- a/sigchainlib/version-script64.txt
+++ b/sigchainlib/version-script64.txt
@@ -1,11 +1,9 @@
{
global:
- ClaimSignalChain;
- UnclaimSignalChain;
- InvokeUserSignalHandler;
InitializeSignalChain;
EnsureFrontOfChain;
- SetSpecialSignalHandlerFn;
+ AddSpecialSignalHandlerFn;
+ RemoveSpecialSignalHandlerFn;
sigaction;
signal;
sigprocmask;
diff --git a/test.py b/test.py
new file mode 100755
index 0000000..414d779
--- /dev/null
+++ b/test.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# --run-test : To run run-test
+# --gtest : To run gtest
+# -j : Number of jobs
+# --host: for host tests
+# --target: for target tests
+# All the other arguments will be passed to the run-test testrunner.
+import sys
+import subprocess
+import os
+import argparse
+
+ANDROID_BUILD_TOP = os.environ.get('ANDROID_BUILD_TOP', os.getcwd())
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-j', default='', dest='n_threads')
+parser.add_argument('--run-test', '-r', action='store_true', dest='run_test')
+parser.add_argument('--gtest', '-g', action='store_true', dest='gtest')
+parser.add_argument('--target', action='store_true', dest='target')
+parser.add_argument('--host', action='store_true', dest='host')
+options, unknown = parser.parse_known_args()
+
+if options.run_test or not options.gtest:
+ testrunner = os.path.join('./',
+ ANDROID_BUILD_TOP,
+ 'art/test/testrunner/testrunner.py')
+ run_test_args = []
+ for arg in sys.argv[1:]:
+ if arg == '--run-test' or arg == '--gtest' \
+ or arg == '-r' or arg == '-g':
+ continue
+ run_test_args.append(arg)
+
+ test_runner_cmd = [testrunner] + run_test_args
+ print test_runner_cmd
+ if subprocess.call(test_runner_cmd):
+ sys.exit(1)
+
+if options.gtest or not options.run_test:
+ build_target = ''
+ if options.host or not options.target:
+ build_target += ' test-art-host-gtest'
+ if options.target or not options.host:
+ build_target += ' test-art-target-gtest'
+
+ build_command = 'make'
+ build_command += ' -j' + str(options.n_threads)
+
+ build_command += ' -C ' + ANDROID_BUILD_TOP
+ build_command += ' ' + build_target
+ # Add 'dist' to avoid Jack issues b/36169180.
+ build_command += ' dist'
+
+ print build_command
+
+ if subprocess.call(build_command.split()):
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/test/004-SignalTest/expected.txt b/test/004-SignalTest/expected.txt
index b3a0e1c..847b56f 100644
--- a/test/004-SignalTest/expected.txt
+++ b/test/004-SignalTest/expected.txt
@@ -3,4 +3,8 @@
Caught NullPointerException
Caught StackOverflowError
signal caught
+unblocked signal received
+unblocking blocked signal
+blocked signal received
+signal handler done
Signal test OK
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 6dd6355..a58a075 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -18,13 +18,14 @@
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <sys/ucontext.h>
#include <unistd.h>
#include "base/macros.h"
static int signal_count;
-static const int kMaxSignal = 2;
+static const int kMaxSignal = 1;
#if defined(__i386__) || defined(__x86_64__)
#if defined(__APPLE__)
@@ -47,6 +48,17 @@
#endif
#endif
+#define BLOCKED_SIGNAL SIGUSR1
+#define UNBLOCKED_SIGNAL SIGUSR2
+
+static void blocked_signal(int sig ATTRIBUTE_UNUSED) {
+ printf("blocked signal received\n");
+}
+
+static void unblocked_signal(int sig ATTRIBUTE_UNUSED) {
+ printf("unblocked signal received\n");
+}
+
static void signalhandler(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
void* context) {
printf("signal caught\n");
@@ -54,6 +66,16 @@
if (signal_count > kMaxSignal) {
abort();
}
+
+ raise(UNBLOCKED_SIGNAL);
+ raise(BLOCKED_SIGNAL);
+ printf("unblocking blocked signal\n");
+
+ sigset_t mask;
+ sigemptyset(&mask);
+ sigaddset(&mask, BLOCKED_SIGNAL);
+ sigprocmask(SIG_UNBLOCK, &mask, nullptr);
+
#if defined(__arm__)
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
@@ -71,6 +93,8 @@
#else
UNUSED(context);
#endif
+
+ printf("signal handler done\n");
}
static struct sigaction oldaction;
@@ -78,13 +102,21 @@
extern "C" JNIEXPORT void JNICALL Java_Main_initSignalTest(JNIEnv*, jclass) {
struct sigaction action;
action.sa_sigaction = signalhandler;
- sigemptyset(&action.sa_mask);
+ sigfillset(&action.sa_mask);
+ sigdelset(&action.sa_mask, UNBLOCKED_SIGNAL);
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
sigaction(SIGSEGV, &action, &oldaction);
+ struct sigaction check;
+ sigaction(SIGSEGV, nullptr, &check);
+ if (memcmp(&action, &check, sizeof(action)) != 0) {
+ printf("sigaction returned different value\n");
+ }
+ signal(BLOCKED_SIGNAL, blocked_signal);
+ signal(UNBLOCKED_SIGNAL, unblocked_signal);
}
extern "C" JNIEXPORT void JNICALL Java_Main_terminateSignalTest(JNIEnv*, jclass) {
@@ -96,6 +128,12 @@
char *go_away_compiler = nullptr;
extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {
+ // Unblock UNBLOCKED_SIGNAL.
+ sigset_t mask;
+ memset(&mask, 0, sizeof(mask));
+ sigaddset(&mask, UNBLOCKED_SIGNAL);
+ sigprocmask(SIG_UNBLOCK, &mask, nullptr);
+
#if defined(__arm__) || defined(__i386__) || defined(__aarch64__)
// On supported architectures we cause a real SEGV.
*go_away_compiler = 'a';
diff --git a/test/051-thread/expected.txt b/test/051-thread/expected.txt
index c6cd4f8..3fc3492 100644
--- a/test/051-thread/expected.txt
+++ b/test/051-thread/expected.txt
@@ -1,6 +1,6 @@
JNI_OnLoad called
thread test starting
-testThreadCapacity thread count: 512
+testThreadCapacity thread count: 128
testThreadDaemons starting thread 'TestDaemonThread'
testThreadDaemons @ Thread running
testThreadDaemons @ Got expected setDaemon exception
diff --git a/test/051-thread/src/Main.java b/test/051-thread/src/Main.java
index 2e26b22..82fc0d4 100644
--- a/test/051-thread/src/Main.java
+++ b/test/051-thread/src/Main.java
@@ -35,8 +35,8 @@
* Simple thread capacity test.
*/
private static void testThreadCapacity() throws Exception {
- TestCapacityThread[] threads = new TestCapacityThread[512];
- for (int i = 0; i < 512; i++) {
+ TestCapacityThread[] threads = new TestCapacityThread[128];
+ for (int i = 0; i < threads.length; i++) {
threads[i] = new TestCapacityThread();
}
diff --git a/test/080-oom-throw/run b/test/080-oom-throw/run
new file mode 100644
index 0000000..eb47378
--- /dev/null
+++ b/test/080-oom-throw/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} $@ --runtime-option -Xmx16m
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index a6c18b7..3d5d062 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -114,13 +114,13 @@
static Object[] holder;
public static void blowup() throws Exception {
- int size = 32 * 1024 * 1024;
+ int size = 2 * 1024 * 1024;
for (int i = 0; i < holder.length; ) {
try {
holder[i] = new char[size];
i++;
} catch (OutOfMemoryError oome) {
- size = size / 2;
+ size = size / 16;
if (size == 0) {
break;
}
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 41329af..87287f8 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -395,20 +395,6 @@
#endif
#endif
-static bool cannot_be_blocked(int signum) {
- // These two sigs cannot be blocked anywhere.
- if ((signum == SIGKILL) || (signum == SIGSTOP)) {
- return true;
- }
-
- // The invalid rt_sig cannot be blocked.
- if (((signum >= 32) && (signum < SIGRTMIN)) || (signum > SIGRTMAX)) {
- return true;
- }
-
- return false;
-}
-
// A dummy special handler, continueing after the faulting location. This code comes from
// 004-SignalTest.
static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) {
@@ -433,22 +419,6 @@
#endif
}
- // Before invoking this handler, all other unclaimed signals must be blocked.
- // We're trying to check the signal mask to verify its status here.
- sigset_t tmpset;
- sigemptyset(&tmpset);
- sigprocmask(SIG_SETMASK, nullptr, &tmpset);
- int other_claimed = (sig == SIGSEGV) ? SIGILL : SIGSEGV;
- for (int signum = 0; signum < NSIG; ++signum) {
- if (cannot_be_blocked(signum)) {
- continue;
- } else if ((sigismember(&tmpset, signum)) && (signum == other_claimed)) {
- printf("ERROR: The claimed signal %d is blocked\n", signum);
- } else if ((!sigismember(&tmpset, signum)) && (signum != other_claimed)) {
- printf("ERROR: The unclaimed signal %d is not blocked\n", signum);
- }
- }
-
// We handled this...
return true;
}
@@ -476,9 +446,9 @@
return false;
}
-extern "C" bool native_bridge_initNamespace(const char* public_ns_sonames ATTRIBUTE_UNUSED,
- const char* anon_ns_library_path ATTRIBUTE_UNUSED) {
- printf("Initializing namespaces in native bridge.\n");
+extern "C" bool native_bridge_initAnonymousNamespace(const char* public_ns_sonames ATTRIBUTE_UNUSED,
+ const char* anon_ns_library_path ATTRIBUTE_UNUSED) {
+ printf("Initializing anonymous namespace in native bridge.\n");
return false;
}
@@ -493,6 +463,13 @@
return nullptr;
}
+extern "C" bool native_bridge_linkNamespaces(android::native_bridge_namespace_t* from ATTRIBUTE_UNUSED,
+ android::native_bridge_namespace_t* to ATTRIBUTE_UNUSED,
+ const char* shared_libs_sonames ATTRIBUTE_UNUSED) {
+ printf("Linking namespaces in native bridge.\n");
+ return false;
+}
+
extern "C" void* native_bridge_loadLibraryExt(const char* libpath ATTRIBUTE_UNUSED,
int flag ATTRIBUTE_UNUSED,
android::native_bridge_namespace_t* ns ATTRIBUTE_UNUSED) {
@@ -517,7 +494,8 @@
.unloadLibrary = &native_bridge_unloadLibrary,
.getError = &native_bridge_getError,
.isPathSupported = &native_bridge_isPathSupported,
- .initNamespace = &native_bridge_initNamespace,
+ .initAnonymousNamespace = &native_bridge_initAnonymousNamespace,
.createNamespace = &native_bridge_createNamespace,
+ .linkNamespaces = &native_bridge_linkNamespaces,
.loadLibraryExt = &native_bridge_loadLibraryExt
};
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 595c70d..7e8431f 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -59,7 +59,7 @@
// Stop the JIT to ensure its threads and work queue are not keeping classes
// artifically alive.
stopJit();
- Runtime.getRuntime().gc();
+ doUnloading();
System.runFinalization();
BufferedReader reader = new BufferedReader(new FileReader ("/proc/" + pid + "/maps"));
String line;
@@ -83,12 +83,20 @@
}
}
+ private static void doUnloading() {
+ // Do multiple GCs to prevent rare flakiness if some other thread is keeping the
+ // classloader live.
+ for (int i = 0; i < 5; ++i) {
+ Runtime.getRuntime().gc();
+ }
+ }
+
private static void testUnloadClass(Constructor<?> constructor) throws Exception {
WeakReference<Class> klass = setUpUnloadClassWeak(constructor);
// No strong references to class loader, should get unloaded.
- Runtime.getRuntime().gc();
+ doUnloading();
WeakReference<Class> klass2 = setUpUnloadClassWeak(constructor);
- Runtime.getRuntime().gc();
+ doUnloading();
// If the weak reference is cleared, then it was unloaded.
System.out.println(klass.get());
System.out.println(klass2.get());
@@ -98,7 +106,7 @@
throws Exception {
WeakReference<ClassLoader> loader = setUpUnloadLoader(constructor, true);
// No strong references to class loader, should get unloaded.
- Runtime.getRuntime().gc();
+ doUnloading();
// If the weak reference is cleared, then it was unloaded.
System.out.println(loader.get());
}
@@ -110,7 +118,7 @@
Throwable throwable = (Throwable) stackTraceMethod.invoke(klass);
stackTraceMethod = null;
klass = null;
- Runtime.getRuntime().gc();
+ doUnloading();
boolean isNull = weak_klass.get() == null;
System.out.println("class null " + isNull + " " + throwable.getMessage());
}
@@ -118,7 +126,7 @@
private static void testLoadAndUnloadLibrary(Constructor<?> constructor) throws Exception {
WeakReference<ClassLoader> loader = setUpLoadLibrary(constructor);
// No strong references to class loader, should get unloaded.
- Runtime.getRuntime().gc();
+ doUnloading();
// If the weak reference is cleared, then it was unloaded.
System.out.println(loader.get());
}
@@ -147,7 +155,7 @@
private static void testNoUnloadInstance(Constructor<?> constructor) throws Exception {
Pair p = testNoUnloadInstanceHelper(constructor);
- Runtime.getRuntime().gc();
+ doUnloading();
// If the class loader was unloded too early due to races, just pass the test.
boolean isNull = p.classLoader.get() == null;
System.out.println("loader null " + isNull);
diff --git a/test/159-app-image-fields/expected.txt b/test/159-app-image-fields/expected.txt
new file mode 100644
index 0000000..f63e8e3
--- /dev/null
+++ b/test/159-app-image-fields/expected.txt
@@ -0,0 +1,3 @@
+Eating all memory.
+memoryWasAllocated = true
+match: true
diff --git a/test/159-app-image-fields/info.txt b/test/159-app-image-fields/info.txt
new file mode 100644
index 0000000..9b10078
--- /dev/null
+++ b/test/159-app-image-fields/info.txt
@@ -0,0 +1,3 @@
+Regression test for erroneously storing an ArtField* in the app image DexCache
+when the class from the corresponding FieldId is not in the app image, only the
+declaring class is.
diff --git a/test/159-app-image-fields/profile b/test/159-app-image-fields/profile
new file mode 100644
index 0000000..4184fa2
--- /dev/null
+++ b/test/159-app-image-fields/profile
@@ -0,0 +1,3 @@
+LAAA/Base;
+LMain;
+LFields;
diff --git a/test/159-app-image-fields/run b/test/159-app-image-fields/run
new file mode 100644
index 0000000..7cc107a
--- /dev/null
+++ b/test/159-app-image-fields/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use a profile to put specific classes in the app image.
+# Also run the compiler with -j1 to ensure specific class verification order.
+exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile \
+ -Xcompiler-option -j1
diff --git a/test/ti-agent/common_load.h b/test/159-app-image-fields/src/AAA/Base.java
similarity index 67%
copy from test/ti-agent/common_load.h
copy to test/159-app-image-fields/src/AAA/Base.java
index e79a006..41ee83a 100644
--- a/test/ti-agent/common_load.h
+++ b/test/159-app-image-fields/src/AAA/Base.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,9 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+package AAA;
-#include "jvmti.h"
-
-namespace art {
-
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+class Base {
+ // The field is public but the class is package-private.
+ public static int value = 42;
+}
diff --git a/test/ti-agent/common_load.h b/test/159-app-image-fields/src/AAA/Derived.java
similarity index 67%
copy from test/ti-agent/common_load.h
copy to test/159-app-image-fields/src/AAA/Derived.java
index e79a006..f6045d5 100644
--- a/test/ti-agent/common_load.h
+++ b/test/159-app-image-fields/src/AAA/Derived.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+package AAA;
-#include "jvmti.h"
-
-namespace art {
-
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+public class Derived extends Base {
+ // Allows public access to Base.value (Base is package-private) referenced as Derived.value.
+}
diff --git a/test/159-app-image-fields/src/Main.java b/test/159-app-image-fields/src/Main.java
new file mode 100644
index 0000000..d06a502
--- /dev/null
+++ b/test/159-app-image-fields/src/Main.java
@@ -0,0 +1,2156 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AAA.Derived;
+
+public class Main {
+ public static void main(String[] args) {
+ try {
+ // Make sure we resolve Fields before eating memory.
+ // (Making sure that the test passes in no-image configurations.)
+ Class.forName("Fields", false, Main.class.getClassLoader());
+ System.out.println("Eating all memory.");
+ Object memory = eatAllMemory();
+
+ // This test assumes that Derived is not yet resolved. In some configurations
+ // (notably interp-ac), Derived is already resolved by verifying Main at run
+ // time. Therefore we cannot assume that we get a certain `value` and need to
+ // simply check for consistency, i.e. `value == another_value`.
+ int value = 0;
+ try {
+ // If the ArtField* is erroneously left in the DexCache, this
+ // shall succeed despite the class Derived being unresolved so
+ // far. Otherwise, we shall throw OOME trying to resolve it.
+ value = Derived.value;
+ } catch (OutOfMemoryError e) {
+ value = -1;
+ }
+ Fields.clobberDexCache();
+ int another_value = 0;
+ try {
+ // Try again for comparison. Since the DexCache field array has been
+ // clobbered by Fields.clobberDexCache(), this shall throw OOME.
+ another_value = Derived.value;
+ } catch (OutOfMemoryError e) {
+ another_value = -1;
+ }
+ boolean memoryWasAllocated = (memory != null);
+ memory = null;
+ System.out.println("memoryWasAllocated = " + memoryWasAllocated);
+ System.out.println("match: " + (value == another_value));
+ if (value != another_value || (value != -1 && value != 42)) {
+ // Mismatch or unexpected value, print additional debugging information.
+ System.out.println("value: " + value);
+ System.out.println("another_value: " + another_value);
+ }
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ }
+
+ public static Object eatAllMemory() {
+ Object[] result = null;
+ int size = 1000000;
+ while (result == null && size != 0) {
+ try {
+ result = new Object[size];
+ } catch (OutOfMemoryError oome) {
+ size /= 2;
+ }
+ }
+ if (result != null) {
+ int index = 0;
+ while (index != result.length && size != 0) {
+ try {
+ result[index] = new byte[size];
+ ++index;
+ } catch (OutOfMemoryError oome) {
+ size /= 2;
+ }
+ }
+ }
+ return result;
+ }
+}
+
+// The naming is deliberate to take into account two different situations:
+// - eagerly preloading DexCache with the available candidate with the lowest index,
+// - not preloading DexCache and relying on the verification to populate it.
+// This corresponds to new and old behavior, respectively.
+//
+// Eager preloading: "LFields;" is after "LAAA/Base;" and "LAAA/Derived;" so that
+// Derived.value takes priority over Fields.testField*.
+//
+// Relying on verifier: "LFields;" is before "LMain;" so that the class definition
+// of Fields precedes the definition of Main (this is not strictly required but the
+// tools look at lexicographic ordering when there is no inheritance relationship)
+// and the verification of Main is last and fills the DexCache with Derived.value.
+//
+class Fields {
+ public static int clobberDexCache() {
+ return 0
+ + testField0000
+ + testField0001
+ + testField0002
+ + testField0003
+ + testField0004
+ + testField0005
+ + testField0006
+ + testField0007
+ + testField0008
+ + testField0009
+ + testField0010
+ + testField0011
+ + testField0012
+ + testField0013
+ + testField0014
+ + testField0015
+ + testField0016
+ + testField0017
+ + testField0018
+ + testField0019
+ + testField0020
+ + testField0021
+ + testField0022
+ + testField0023
+ + testField0024
+ + testField0025
+ + testField0026
+ + testField0027
+ + testField0028
+ + testField0029
+ + testField0030
+ + testField0031
+ + testField0032
+ + testField0033
+ + testField0034
+ + testField0035
+ + testField0036
+ + testField0037
+ + testField0038
+ + testField0039
+ + testField0040
+ + testField0041
+ + testField0042
+ + testField0043
+ + testField0044
+ + testField0045
+ + testField0046
+ + testField0047
+ + testField0048
+ + testField0049
+ + testField0050
+ + testField0051
+ + testField0052
+ + testField0053
+ + testField0054
+ + testField0055
+ + testField0056
+ + testField0057
+ + testField0058
+ + testField0059
+ + testField0060
+ + testField0061
+ + testField0062
+ + testField0063
+ + testField0064
+ + testField0065
+ + testField0066
+ + testField0067
+ + testField0068
+ + testField0069
+ + testField0070
+ + testField0071
+ + testField0072
+ + testField0073
+ + testField0074
+ + testField0075
+ + testField0076
+ + testField0077
+ + testField0078
+ + testField0079
+ + testField0080
+ + testField0081
+ + testField0082
+ + testField0083
+ + testField0084
+ + testField0085
+ + testField0086
+ + testField0087
+ + testField0088
+ + testField0089
+ + testField0090
+ + testField0091
+ + testField0092
+ + testField0093
+ + testField0094
+ + testField0095
+ + testField0096
+ + testField0097
+ + testField0098
+ + testField0099
+ + testField0100
+ + testField0101
+ + testField0102
+ + testField0103
+ + testField0104
+ + testField0105
+ + testField0106
+ + testField0107
+ + testField0108
+ + testField0109
+ + testField0110
+ + testField0111
+ + testField0112
+ + testField0113
+ + testField0114
+ + testField0115
+ + testField0116
+ + testField0117
+ + testField0118
+ + testField0119
+ + testField0120
+ + testField0121
+ + testField0122
+ + testField0123
+ + testField0124
+ + testField0125
+ + testField0126
+ + testField0127
+ + testField0128
+ + testField0129
+ + testField0130
+ + testField0131
+ + testField0132
+ + testField0133
+ + testField0134
+ + testField0135
+ + testField0136
+ + testField0137
+ + testField0138
+ + testField0139
+ + testField0140
+ + testField0141
+ + testField0142
+ + testField0143
+ + testField0144
+ + testField0145
+ + testField0146
+ + testField0147
+ + testField0148
+ + testField0149
+ + testField0150
+ + testField0151
+ + testField0152
+ + testField0153
+ + testField0154
+ + testField0155
+ + testField0156
+ + testField0157
+ + testField0158
+ + testField0159
+ + testField0160
+ + testField0161
+ + testField0162
+ + testField0163
+ + testField0164
+ + testField0165
+ + testField0166
+ + testField0167
+ + testField0168
+ + testField0169
+ + testField0170
+ + testField0171
+ + testField0172
+ + testField0173
+ + testField0174
+ + testField0175
+ + testField0176
+ + testField0177
+ + testField0178
+ + testField0179
+ + testField0180
+ + testField0181
+ + testField0182
+ + testField0183
+ + testField0184
+ + testField0185
+ + testField0186
+ + testField0187
+ + testField0188
+ + testField0189
+ + testField0190
+ + testField0191
+ + testField0192
+ + testField0193
+ + testField0194
+ + testField0195
+ + testField0196
+ + testField0197
+ + testField0198
+ + testField0199
+ + testField0200
+ + testField0201
+ + testField0202
+ + testField0203
+ + testField0204
+ + testField0205
+ + testField0206
+ + testField0207
+ + testField0208
+ + testField0209
+ + testField0210
+ + testField0211
+ + testField0212
+ + testField0213
+ + testField0214
+ + testField0215
+ + testField0216
+ + testField0217
+ + testField0218
+ + testField0219
+ + testField0220
+ + testField0221
+ + testField0222
+ + testField0223
+ + testField0224
+ + testField0225
+ + testField0226
+ + testField0227
+ + testField0228
+ + testField0229
+ + testField0230
+ + testField0231
+ + testField0232
+ + testField0233
+ + testField0234
+ + testField0235
+ + testField0236
+ + testField0237
+ + testField0238
+ + testField0239
+ + testField0240
+ + testField0241
+ + testField0242
+ + testField0243
+ + testField0244
+ + testField0245
+ + testField0246
+ + testField0247
+ + testField0248
+ + testField0249
+ + testField0250
+ + testField0251
+ + testField0252
+ + testField0253
+ + testField0254
+ + testField0255
+ + testField0256
+ + testField0257
+ + testField0258
+ + testField0259
+ + testField0260
+ + testField0261
+ + testField0262
+ + testField0263
+ + testField0264
+ + testField0265
+ + testField0266
+ + testField0267
+ + testField0268
+ + testField0269
+ + testField0270
+ + testField0271
+ + testField0272
+ + testField0273
+ + testField0274
+ + testField0275
+ + testField0276
+ + testField0277
+ + testField0278
+ + testField0279
+ + testField0280
+ + testField0281
+ + testField0282
+ + testField0283
+ + testField0284
+ + testField0285
+ + testField0286
+ + testField0287
+ + testField0288
+ + testField0289
+ + testField0290
+ + testField0291
+ + testField0292
+ + testField0293
+ + testField0294
+ + testField0295
+ + testField0296
+ + testField0297
+ + testField0298
+ + testField0299
+ + testField0300
+ + testField0301
+ + testField0302
+ + testField0303
+ + testField0304
+ + testField0305
+ + testField0306
+ + testField0307
+ + testField0308
+ + testField0309
+ + testField0310
+ + testField0311
+ + testField0312
+ + testField0313
+ + testField0314
+ + testField0315
+ + testField0316
+ + testField0317
+ + testField0318
+ + testField0319
+ + testField0320
+ + testField0321
+ + testField0322
+ + testField0323
+ + testField0324
+ + testField0325
+ + testField0326
+ + testField0327
+ + testField0328
+ + testField0329
+ + testField0330
+ + testField0331
+ + testField0332
+ + testField0333
+ + testField0334
+ + testField0335
+ + testField0336
+ + testField0337
+ + testField0338
+ + testField0339
+ + testField0340
+ + testField0341
+ + testField0342
+ + testField0343
+ + testField0344
+ + testField0345
+ + testField0346
+ + testField0347
+ + testField0348
+ + testField0349
+ + testField0350
+ + testField0351
+ + testField0352
+ + testField0353
+ + testField0354
+ + testField0355
+ + testField0356
+ + testField0357
+ + testField0358
+ + testField0359
+ + testField0360
+ + testField0361
+ + testField0362
+ + testField0363
+ + testField0364
+ + testField0365
+ + testField0366
+ + testField0367
+ + testField0368
+ + testField0369
+ + testField0370
+ + testField0371
+ + testField0372
+ + testField0373
+ + testField0374
+ + testField0375
+ + testField0376
+ + testField0377
+ + testField0378
+ + testField0379
+ + testField0380
+ + testField0381
+ + testField0382
+ + testField0383
+ + testField0384
+ + testField0385
+ + testField0386
+ + testField0387
+ + testField0388
+ + testField0389
+ + testField0390
+ + testField0391
+ + testField0392
+ + testField0393
+ + testField0394
+ + testField0395
+ + testField0396
+ + testField0397
+ + testField0398
+ + testField0399
+ + testField0400
+ + testField0401
+ + testField0402
+ + testField0403
+ + testField0404
+ + testField0405
+ + testField0406
+ + testField0407
+ + testField0408
+ + testField0409
+ + testField0410
+ + testField0411
+ + testField0412
+ + testField0413
+ + testField0414
+ + testField0415
+ + testField0416
+ + testField0417
+ + testField0418
+ + testField0419
+ + testField0420
+ + testField0421
+ + testField0422
+ + testField0423
+ + testField0424
+ + testField0425
+ + testField0426
+ + testField0427
+ + testField0428
+ + testField0429
+ + testField0430
+ + testField0431
+ + testField0432
+ + testField0433
+ + testField0434
+ + testField0435
+ + testField0436
+ + testField0437
+ + testField0438
+ + testField0439
+ + testField0440
+ + testField0441
+ + testField0442
+ + testField0443
+ + testField0444
+ + testField0445
+ + testField0446
+ + testField0447
+ + testField0448
+ + testField0449
+ + testField0450
+ + testField0451
+ + testField0452
+ + testField0453
+ + testField0454
+ + testField0455
+ + testField0456
+ + testField0457
+ + testField0458
+ + testField0459
+ + testField0460
+ + testField0461
+ + testField0462
+ + testField0463
+ + testField0464
+ + testField0465
+ + testField0466
+ + testField0467
+ + testField0468
+ + testField0469
+ + testField0470
+ + testField0471
+ + testField0472
+ + testField0473
+ + testField0474
+ + testField0475
+ + testField0476
+ + testField0477
+ + testField0478
+ + testField0479
+ + testField0480
+ + testField0481
+ + testField0482
+ + testField0483
+ + testField0484
+ + testField0485
+ + testField0486
+ + testField0487
+ + testField0488
+ + testField0489
+ + testField0490
+ + testField0491
+ + testField0492
+ + testField0493
+ + testField0494
+ + testField0495
+ + testField0496
+ + testField0497
+ + testField0498
+ + testField0499
+ + testField0500
+ + testField0501
+ + testField0502
+ + testField0503
+ + testField0504
+ + testField0505
+ + testField0506
+ + testField0507
+ + testField0508
+ + testField0509
+ + testField0510
+ + testField0511
+ + testField0512
+ + testField0513
+ + testField0514
+ + testField0515
+ + testField0516
+ + testField0517
+ + testField0518
+ + testField0519
+ + testField0520
+ + testField0521
+ + testField0522
+ + testField0523
+ + testField0524
+ + testField0525
+ + testField0526
+ + testField0527
+ + testField0528
+ + testField0529
+ + testField0530
+ + testField0531
+ + testField0532
+ + testField0533
+ + testField0534
+ + testField0535
+ + testField0536
+ + testField0537
+ + testField0538
+ + testField0539
+ + testField0540
+ + testField0541
+ + testField0542
+ + testField0543
+ + testField0544
+ + testField0545
+ + testField0546
+ + testField0547
+ + testField0548
+ + testField0549
+ + testField0550
+ + testField0551
+ + testField0552
+ + testField0553
+ + testField0554
+ + testField0555
+ + testField0556
+ + testField0557
+ + testField0558
+ + testField0559
+ + testField0560
+ + testField0561
+ + testField0562
+ + testField0563
+ + testField0564
+ + testField0565
+ + testField0566
+ + testField0567
+ + testField0568
+ + testField0569
+ + testField0570
+ + testField0571
+ + testField0572
+ + testField0573
+ + testField0574
+ + testField0575
+ + testField0576
+ + testField0577
+ + testField0578
+ + testField0579
+ + testField0580
+ + testField0581
+ + testField0582
+ + testField0583
+ + testField0584
+ + testField0585
+ + testField0586
+ + testField0587
+ + testField0588
+ + testField0589
+ + testField0590
+ + testField0591
+ + testField0592
+ + testField0593
+ + testField0594
+ + testField0595
+ + testField0596
+ + testField0597
+ + testField0598
+ + testField0599
+ + testField0600
+ + testField0601
+ + testField0602
+ + testField0603
+ + testField0604
+ + testField0605
+ + testField0606
+ + testField0607
+ + testField0608
+ + testField0609
+ + testField0610
+ + testField0611
+ + testField0612
+ + testField0613
+ + testField0614
+ + testField0615
+ + testField0616
+ + testField0617
+ + testField0618
+ + testField0619
+ + testField0620
+ + testField0621
+ + testField0622
+ + testField0623
+ + testField0624
+ + testField0625
+ + testField0626
+ + testField0627
+ + testField0628
+ + testField0629
+ + testField0630
+ + testField0631
+ + testField0632
+ + testField0633
+ + testField0634
+ + testField0635
+ + testField0636
+ + testField0637
+ + testField0638
+ + testField0639
+ + testField0640
+ + testField0641
+ + testField0642
+ + testField0643
+ + testField0644
+ + testField0645
+ + testField0646
+ + testField0647
+ + testField0648
+ + testField0649
+ + testField0650
+ + testField0651
+ + testField0652
+ + testField0653
+ + testField0654
+ + testField0655
+ + testField0656
+ + testField0657
+ + testField0658
+ + testField0659
+ + testField0660
+ + testField0661
+ + testField0662
+ + testField0663
+ + testField0664
+ + testField0665
+ + testField0666
+ + testField0667
+ + testField0668
+ + testField0669
+ + testField0670
+ + testField0671
+ + testField0672
+ + testField0673
+ + testField0674
+ + testField0675
+ + testField0676
+ + testField0677
+ + testField0678
+ + testField0679
+ + testField0680
+ + testField0681
+ + testField0682
+ + testField0683
+ + testField0684
+ + testField0685
+ + testField0686
+ + testField0687
+ + testField0688
+ + testField0689
+ + testField0690
+ + testField0691
+ + testField0692
+ + testField0693
+ + testField0694
+ + testField0695
+ + testField0696
+ + testField0697
+ + testField0698
+ + testField0699
+ + testField0700
+ + testField0701
+ + testField0702
+ + testField0703
+ + testField0704
+ + testField0705
+ + testField0706
+ + testField0707
+ + testField0708
+ + testField0709
+ + testField0710
+ + testField0711
+ + testField0712
+ + testField0713
+ + testField0714
+ + testField0715
+ + testField0716
+ + testField0717
+ + testField0718
+ + testField0719
+ + testField0720
+ + testField0721
+ + testField0722
+ + testField0723
+ + testField0724
+ + testField0725
+ + testField0726
+ + testField0727
+ + testField0728
+ + testField0729
+ + testField0730
+ + testField0731
+ + testField0732
+ + testField0733
+ + testField0734
+ + testField0735
+ + testField0736
+ + testField0737
+ + testField0738
+ + testField0739
+ + testField0740
+ + testField0741
+ + testField0742
+ + testField0743
+ + testField0744
+ + testField0745
+ + testField0746
+ + testField0747
+ + testField0748
+ + testField0749
+ + testField0750
+ + testField0751
+ + testField0752
+ + testField0753
+ + testField0754
+ + testField0755
+ + testField0756
+ + testField0757
+ + testField0758
+ + testField0759
+ + testField0760
+ + testField0761
+ + testField0762
+ + testField0763
+ + testField0764
+ + testField0765
+ + testField0766
+ + testField0767
+ + testField0768
+ + testField0769
+ + testField0770
+ + testField0771
+ + testField0772
+ + testField0773
+ + testField0774
+ + testField0775
+ + testField0776
+ + testField0777
+ + testField0778
+ + testField0779
+ + testField0780
+ + testField0781
+ + testField0782
+ + testField0783
+ + testField0784
+ + testField0785
+ + testField0786
+ + testField0787
+ + testField0788
+ + testField0789
+ + testField0790
+ + testField0791
+ + testField0792
+ + testField0793
+ + testField0794
+ + testField0795
+ + testField0796
+ + testField0797
+ + testField0798
+ + testField0799
+ + testField0800
+ + testField0801
+ + testField0802
+ + testField0803
+ + testField0804
+ + testField0805
+ + testField0806
+ + testField0807
+ + testField0808
+ + testField0809
+ + testField0810
+ + testField0811
+ + testField0812
+ + testField0813
+ + testField0814
+ + testField0815
+ + testField0816
+ + testField0817
+ + testField0818
+ + testField0819
+ + testField0820
+ + testField0821
+ + testField0822
+ + testField0823
+ + testField0824
+ + testField0825
+ + testField0826
+ + testField0827
+ + testField0828
+ + testField0829
+ + testField0830
+ + testField0831
+ + testField0832
+ + testField0833
+ + testField0834
+ + testField0835
+ + testField0836
+ + testField0837
+ + testField0838
+ + testField0839
+ + testField0840
+ + testField0841
+ + testField0842
+ + testField0843
+ + testField0844
+ + testField0845
+ + testField0846
+ + testField0847
+ + testField0848
+ + testField0849
+ + testField0850
+ + testField0851
+ + testField0852
+ + testField0853
+ + testField0854
+ + testField0855
+ + testField0856
+ + testField0857
+ + testField0858
+ + testField0859
+ + testField0860
+ + testField0861
+ + testField0862
+ + testField0863
+ + testField0864
+ + testField0865
+ + testField0866
+ + testField0867
+ + testField0868
+ + testField0869
+ + testField0870
+ + testField0871
+ + testField0872
+ + testField0873
+ + testField0874
+ + testField0875
+ + testField0876
+ + testField0877
+ + testField0878
+ + testField0879
+ + testField0880
+ + testField0881
+ + testField0882
+ + testField0883
+ + testField0884
+ + testField0885
+ + testField0886
+ + testField0887
+ + testField0888
+ + testField0889
+ + testField0890
+ + testField0891
+ + testField0892
+ + testField0893
+ + testField0894
+ + testField0895
+ + testField0896
+ + testField0897
+ + testField0898
+ + testField0899
+ + testField0900
+ + testField0901
+ + testField0902
+ + testField0903
+ + testField0904
+ + testField0905
+ + testField0906
+ + testField0907
+ + testField0908
+ + testField0909
+ + testField0910
+ + testField0911
+ + testField0912
+ + testField0913
+ + testField0914
+ + testField0915
+ + testField0916
+ + testField0917
+ + testField0918
+ + testField0919
+ + testField0920
+ + testField0921
+ + testField0922
+ + testField0923
+ + testField0924
+ + testField0925
+ + testField0926
+ + testField0927
+ + testField0928
+ + testField0929
+ + testField0930
+ + testField0931
+ + testField0932
+ + testField0933
+ + testField0934
+ + testField0935
+ + testField0936
+ + testField0937
+ + testField0938
+ + testField0939
+ + testField0940
+ + testField0941
+ + testField0942
+ + testField0943
+ + testField0944
+ + testField0945
+ + testField0946
+ + testField0947
+ + testField0948
+ + testField0949
+ + testField0950
+ + testField0951
+ + testField0952
+ + testField0953
+ + testField0954
+ + testField0955
+ + testField0956
+ + testField0957
+ + testField0958
+ + testField0959
+ + testField0960
+ + testField0961
+ + testField0962
+ + testField0963
+ + testField0964
+ + testField0965
+ + testField0966
+ + testField0967
+ + testField0968
+ + testField0969
+ + testField0970
+ + testField0971
+ + testField0972
+ + testField0973
+ + testField0974
+ + testField0975
+ + testField0976
+ + testField0977
+ + testField0978
+ + testField0979
+ + testField0980
+ + testField0981
+ + testField0982
+ + testField0983
+ + testField0984
+ + testField0985
+ + testField0986
+ + testField0987
+ + testField0988
+ + testField0989
+ + testField0990
+ + testField0991
+ + testField0992
+ + testField0993
+ + testField0994
+ + testField0995
+ + testField0996
+ + testField0997
+ + testField0998
+ + testField0999
+ + testField1000
+ + testField1001
+ + testField1002
+ + testField1003
+ + testField1004
+ + testField1005
+ + testField1006
+ + testField1007
+ + testField1008
+ + testField1009
+ + testField1010
+ + testField1011
+ + testField1012
+ + testField1013
+ + testField1014
+ + testField1015
+ + testField1016
+ + testField1017
+ + testField1018
+ + testField1019
+ + testField1020
+ + testField1021
+ + testField1022
+ + testField1023
+ + 0;
+ }
+
+ private static int testField0000 = 0;
+ private static int testField0001 = 1;
+ private static int testField0002 = 2;
+ private static int testField0003 = 3;
+ private static int testField0004 = 4;
+ private static int testField0005 = 5;
+ private static int testField0006 = 6;
+ private static int testField0007 = 7;
+ private static int testField0008 = 8;
+ private static int testField0009 = 9;
+ private static int testField0010 = 10;
+ private static int testField0011 = 11;
+ private static int testField0012 = 12;
+ private static int testField0013 = 13;
+ private static int testField0014 = 14;
+ private static int testField0015 = 15;
+ private static int testField0016 = 16;
+ private static int testField0017 = 17;
+ private static int testField0018 = 18;
+ private static int testField0019 = 19;
+ private static int testField0020 = 20;
+ private static int testField0021 = 21;
+ private static int testField0022 = 22;
+ private static int testField0023 = 23;
+ private static int testField0024 = 24;
+ private static int testField0025 = 25;
+ private static int testField0026 = 26;
+ private static int testField0027 = 27;
+ private static int testField0028 = 28;
+ private static int testField0029 = 29;
+ private static int testField0030 = 30;
+ private static int testField0031 = 31;
+ private static int testField0032 = 32;
+ private static int testField0033 = 33;
+ private static int testField0034 = 34;
+ private static int testField0035 = 35;
+ private static int testField0036 = 36;
+ private static int testField0037 = 37;
+ private static int testField0038 = 38;
+ private static int testField0039 = 39;
+ private static int testField0040 = 40;
+ private static int testField0041 = 41;
+ private static int testField0042 = 42;
+ private static int testField0043 = 43;
+ private static int testField0044 = 44;
+ private static int testField0045 = 45;
+ private static int testField0046 = 46;
+ private static int testField0047 = 47;
+ private static int testField0048 = 48;
+ private static int testField0049 = 49;
+ private static int testField0050 = 50;
+ private static int testField0051 = 51;
+ private static int testField0052 = 52;
+ private static int testField0053 = 53;
+ private static int testField0054 = 54;
+ private static int testField0055 = 55;
+ private static int testField0056 = 56;
+ private static int testField0057 = 57;
+ private static int testField0058 = 58;
+ private static int testField0059 = 59;
+ private static int testField0060 = 60;
+ private static int testField0061 = 61;
+ private static int testField0062 = 62;
+ private static int testField0063 = 63;
+ private static int testField0064 = 64;
+ private static int testField0065 = 65;
+ private static int testField0066 = 66;
+ private static int testField0067 = 67;
+ private static int testField0068 = 68;
+ private static int testField0069 = 69;
+ private static int testField0070 = 70;
+ private static int testField0071 = 71;
+ private static int testField0072 = 72;
+ private static int testField0073 = 73;
+ private static int testField0074 = 74;
+ private static int testField0075 = 75;
+ private static int testField0076 = 76;
+ private static int testField0077 = 77;
+ private static int testField0078 = 78;
+ private static int testField0079 = 79;
+ private static int testField0080 = 80;
+ private static int testField0081 = 81;
+ private static int testField0082 = 82;
+ private static int testField0083 = 83;
+ private static int testField0084 = 84;
+ private static int testField0085 = 85;
+ private static int testField0086 = 86;
+ private static int testField0087 = 87;
+ private static int testField0088 = 88;
+ private static int testField0089 = 89;
+ private static int testField0090 = 90;
+ private static int testField0091 = 91;
+ private static int testField0092 = 92;
+ private static int testField0093 = 93;
+ private static int testField0094 = 94;
+ private static int testField0095 = 95;
+ private static int testField0096 = 96;
+ private static int testField0097 = 97;
+ private static int testField0098 = 98;
+ private static int testField0099 = 99;
+ private static int testField0100 = 100;
+ private static int testField0101 = 101;
+ private static int testField0102 = 102;
+ private static int testField0103 = 103;
+ private static int testField0104 = 104;
+ private static int testField0105 = 105;
+ private static int testField0106 = 106;
+ private static int testField0107 = 107;
+ private static int testField0108 = 108;
+ private static int testField0109 = 109;
+ private static int testField0110 = 110;
+ private static int testField0111 = 111;
+ private static int testField0112 = 112;
+ private static int testField0113 = 113;
+ private static int testField0114 = 114;
+ private static int testField0115 = 115;
+ private static int testField0116 = 116;
+ private static int testField0117 = 117;
+ private static int testField0118 = 118;
+ private static int testField0119 = 119;
+ private static int testField0120 = 120;
+ private static int testField0121 = 121;
+ private static int testField0122 = 122;
+ private static int testField0123 = 123;
+ private static int testField0124 = 124;
+ private static int testField0125 = 125;
+ private static int testField0126 = 126;
+ private static int testField0127 = 127;
+ private static int testField0128 = 128;
+ private static int testField0129 = 129;
+ private static int testField0130 = 130;
+ private static int testField0131 = 131;
+ private static int testField0132 = 132;
+ private static int testField0133 = 133;
+ private static int testField0134 = 134;
+ private static int testField0135 = 135;
+ private static int testField0136 = 136;
+ private static int testField0137 = 137;
+ private static int testField0138 = 138;
+ private static int testField0139 = 139;
+ private static int testField0140 = 140;
+ private static int testField0141 = 141;
+ private static int testField0142 = 142;
+ private static int testField0143 = 143;
+ private static int testField0144 = 144;
+ private static int testField0145 = 145;
+ private static int testField0146 = 146;
+ private static int testField0147 = 147;
+ private static int testField0148 = 148;
+ private static int testField0149 = 149;
+ private static int testField0150 = 150;
+ private static int testField0151 = 151;
+ private static int testField0152 = 152;
+ private static int testField0153 = 153;
+ private static int testField0154 = 154;
+ private static int testField0155 = 155;
+ private static int testField0156 = 156;
+ private static int testField0157 = 157;
+ private static int testField0158 = 158;
+ private static int testField0159 = 159;
+ private static int testField0160 = 160;
+ private static int testField0161 = 161;
+ private static int testField0162 = 162;
+ private static int testField0163 = 163;
+ private static int testField0164 = 164;
+ private static int testField0165 = 165;
+ private static int testField0166 = 166;
+ private static int testField0167 = 167;
+ private static int testField0168 = 168;
+ private static int testField0169 = 169;
+ private static int testField0170 = 170;
+ private static int testField0171 = 171;
+ private static int testField0172 = 172;
+ private static int testField0173 = 173;
+ private static int testField0174 = 174;
+ private static int testField0175 = 175;
+ private static int testField0176 = 176;
+ private static int testField0177 = 177;
+ private static int testField0178 = 178;
+ private static int testField0179 = 179;
+ private static int testField0180 = 180;
+ private static int testField0181 = 181;
+ private static int testField0182 = 182;
+ private static int testField0183 = 183;
+ private static int testField0184 = 184;
+ private static int testField0185 = 185;
+ private static int testField0186 = 186;
+ private static int testField0187 = 187;
+ private static int testField0188 = 188;
+ private static int testField0189 = 189;
+ private static int testField0190 = 190;
+ private static int testField0191 = 191;
+ private static int testField0192 = 192;
+ private static int testField0193 = 193;
+ private static int testField0194 = 194;
+ private static int testField0195 = 195;
+ private static int testField0196 = 196;
+ private static int testField0197 = 197;
+ private static int testField0198 = 198;
+ private static int testField0199 = 199;
+ private static int testField0200 = 200;
+ private static int testField0201 = 201;
+ private static int testField0202 = 202;
+ private static int testField0203 = 203;
+ private static int testField0204 = 204;
+ private static int testField0205 = 205;
+ private static int testField0206 = 206;
+ private static int testField0207 = 207;
+ private static int testField0208 = 208;
+ private static int testField0209 = 209;
+ private static int testField0210 = 210;
+ private static int testField0211 = 211;
+ private static int testField0212 = 212;
+ private static int testField0213 = 213;
+ private static int testField0214 = 214;
+ private static int testField0215 = 215;
+ private static int testField0216 = 216;
+ private static int testField0217 = 217;
+ private static int testField0218 = 218;
+ private static int testField0219 = 219;
+ private static int testField0220 = 220;
+ private static int testField0221 = 221;
+ private static int testField0222 = 222;
+ private static int testField0223 = 223;
+ private static int testField0224 = 224;
+ private static int testField0225 = 225;
+ private static int testField0226 = 226;
+ private static int testField0227 = 227;
+ private static int testField0228 = 228;
+ private static int testField0229 = 229;
+ private static int testField0230 = 230;
+ private static int testField0231 = 231;
+ private static int testField0232 = 232;
+ private static int testField0233 = 233;
+ private static int testField0234 = 234;
+ private static int testField0235 = 235;
+ private static int testField0236 = 236;
+ private static int testField0237 = 237;
+ private static int testField0238 = 238;
+ private static int testField0239 = 239;
+ private static int testField0240 = 240;
+ private static int testField0241 = 241;
+ private static int testField0242 = 242;
+ private static int testField0243 = 243;
+ private static int testField0244 = 244;
+ private static int testField0245 = 245;
+ private static int testField0246 = 246;
+ private static int testField0247 = 247;
+ private static int testField0248 = 248;
+ private static int testField0249 = 249;
+ private static int testField0250 = 250;
+ private static int testField0251 = 251;
+ private static int testField0252 = 252;
+ private static int testField0253 = 253;
+ private static int testField0254 = 254;
+ private static int testField0255 = 255;
+ private static int testField0256 = 256;
+ private static int testField0257 = 257;
+ private static int testField0258 = 258;
+ private static int testField0259 = 259;
+ private static int testField0260 = 260;
+ private static int testField0261 = 261;
+ private static int testField0262 = 262;
+ private static int testField0263 = 263;
+ private static int testField0264 = 264;
+ private static int testField0265 = 265;
+ private static int testField0266 = 266;
+ private static int testField0267 = 267;
+ private static int testField0268 = 268;
+ private static int testField0269 = 269;
+ private static int testField0270 = 270;
+ private static int testField0271 = 271;
+ private static int testField0272 = 272;
+ private static int testField0273 = 273;
+ private static int testField0274 = 274;
+ private static int testField0275 = 275;
+ private static int testField0276 = 276;
+ private static int testField0277 = 277;
+ private static int testField0278 = 278;
+ private static int testField0279 = 279;
+ private static int testField0280 = 280;
+ private static int testField0281 = 281;
+ private static int testField0282 = 282;
+ private static int testField0283 = 283;
+ private static int testField0284 = 284;
+ private static int testField0285 = 285;
+ private static int testField0286 = 286;
+ private static int testField0287 = 287;
+ private static int testField0288 = 288;
+ private static int testField0289 = 289;
+ private static int testField0290 = 290;
+ private static int testField0291 = 291;
+ private static int testField0292 = 292;
+ private static int testField0293 = 293;
+ private static int testField0294 = 294;
+ private static int testField0295 = 295;
+ private static int testField0296 = 296;
+ private static int testField0297 = 297;
+ private static int testField0298 = 298;
+ private static int testField0299 = 299;
+ private static int testField0300 = 300;
+ private static int testField0301 = 301;
+ private static int testField0302 = 302;
+ private static int testField0303 = 303;
+ private static int testField0304 = 304;
+ private static int testField0305 = 305;
+ private static int testField0306 = 306;
+ private static int testField0307 = 307;
+ private static int testField0308 = 308;
+ private static int testField0309 = 309;
+ private static int testField0310 = 310;
+ private static int testField0311 = 311;
+ private static int testField0312 = 312;
+ private static int testField0313 = 313;
+ private static int testField0314 = 314;
+ private static int testField0315 = 315;
+ private static int testField0316 = 316;
+ private static int testField0317 = 317;
+ private static int testField0318 = 318;
+ private static int testField0319 = 319;
+ private static int testField0320 = 320;
+ private static int testField0321 = 321;
+ private static int testField0322 = 322;
+ private static int testField0323 = 323;
+ private static int testField0324 = 324;
+ private static int testField0325 = 325;
+ private static int testField0326 = 326;
+ private static int testField0327 = 327;
+ private static int testField0328 = 328;
+ private static int testField0329 = 329;
+ private static int testField0330 = 330;
+ private static int testField0331 = 331;
+ private static int testField0332 = 332;
+ private static int testField0333 = 333;
+ private static int testField0334 = 334;
+ private static int testField0335 = 335;
+ private static int testField0336 = 336;
+ private static int testField0337 = 337;
+ private static int testField0338 = 338;
+ private static int testField0339 = 339;
+ private static int testField0340 = 340;
+ private static int testField0341 = 341;
+ private static int testField0342 = 342;
+ private static int testField0343 = 343;
+ private static int testField0344 = 344;
+ private static int testField0345 = 345;
+ private static int testField0346 = 346;
+ private static int testField0347 = 347;
+ private static int testField0348 = 348;
+ private static int testField0349 = 349;
+ private static int testField0350 = 350;
+ private static int testField0351 = 351;
+ private static int testField0352 = 352;
+ private static int testField0353 = 353;
+ private static int testField0354 = 354;
+ private static int testField0355 = 355;
+ private static int testField0356 = 356;
+ private static int testField0357 = 357;
+ private static int testField0358 = 358;
+ private static int testField0359 = 359;
+ private static int testField0360 = 360;
+ private static int testField0361 = 361;
+ private static int testField0362 = 362;
+ private static int testField0363 = 363;
+ private static int testField0364 = 364;
+ private static int testField0365 = 365;
+ private static int testField0366 = 366;
+ private static int testField0367 = 367;
+ private static int testField0368 = 368;
+ private static int testField0369 = 369;
+ private static int testField0370 = 370;
+ private static int testField0371 = 371;
+ private static int testField0372 = 372;
+ private static int testField0373 = 373;
+ private static int testField0374 = 374;
+ private static int testField0375 = 375;
+ private static int testField0376 = 376;
+ private static int testField0377 = 377;
+ private static int testField0378 = 378;
+ private static int testField0379 = 379;
+ private static int testField0380 = 380;
+ private static int testField0381 = 381;
+ private static int testField0382 = 382;
+ private static int testField0383 = 383;
+ private static int testField0384 = 384;
+ private static int testField0385 = 385;
+ private static int testField0386 = 386;
+ private static int testField0387 = 387;
+ private static int testField0388 = 388;
+ private static int testField0389 = 389;
+ private static int testField0390 = 390;
+ private static int testField0391 = 391;
+ private static int testField0392 = 392;
+ private static int testField0393 = 393;
+ private static int testField0394 = 394;
+ private static int testField0395 = 395;
+ private static int testField0396 = 396;
+ private static int testField0397 = 397;
+ private static int testField0398 = 398;
+ private static int testField0399 = 399;
+ private static int testField0400 = 400;
+ private static int testField0401 = 401;
+ private static int testField0402 = 402;
+ private static int testField0403 = 403;
+ private static int testField0404 = 404;
+ private static int testField0405 = 405;
+ private static int testField0406 = 406;
+ private static int testField0407 = 407;
+ private static int testField0408 = 408;
+ private static int testField0409 = 409;
+ private static int testField0410 = 410;
+ private static int testField0411 = 411;
+ private static int testField0412 = 412;
+ private static int testField0413 = 413;
+ private static int testField0414 = 414;
+ private static int testField0415 = 415;
+ private static int testField0416 = 416;
+ private static int testField0417 = 417;
+ private static int testField0418 = 418;
+ private static int testField0419 = 419;
+ private static int testField0420 = 420;
+ private static int testField0421 = 421;
+ private static int testField0422 = 422;
+ private static int testField0423 = 423;
+ private static int testField0424 = 424;
+ private static int testField0425 = 425;
+ private static int testField0426 = 426;
+ private static int testField0427 = 427;
+ private static int testField0428 = 428;
+ private static int testField0429 = 429;
+ private static int testField0430 = 430;
+ private static int testField0431 = 431;
+ private static int testField0432 = 432;
+ private static int testField0433 = 433;
+ private static int testField0434 = 434;
+ private static int testField0435 = 435;
+ private static int testField0436 = 436;
+ private static int testField0437 = 437;
+ private static int testField0438 = 438;
+ private static int testField0439 = 439;
+ private static int testField0440 = 440;
+ private static int testField0441 = 441;
+ private static int testField0442 = 442;
+ private static int testField0443 = 443;
+ private static int testField0444 = 444;
+ private static int testField0445 = 445;
+ private static int testField0446 = 446;
+ private static int testField0447 = 447;
+ private static int testField0448 = 448;
+ private static int testField0449 = 449;
+ private static int testField0450 = 450;
+ private static int testField0451 = 451;
+ private static int testField0452 = 452;
+ private static int testField0453 = 453;
+ private static int testField0454 = 454;
+ private static int testField0455 = 455;
+ private static int testField0456 = 456;
+ private static int testField0457 = 457;
+ private static int testField0458 = 458;
+ private static int testField0459 = 459;
+ private static int testField0460 = 460;
+ private static int testField0461 = 461;
+ private static int testField0462 = 462;
+ private static int testField0463 = 463;
+ private static int testField0464 = 464;
+ private static int testField0465 = 465;
+ private static int testField0466 = 466;
+ private static int testField0467 = 467;
+ private static int testField0468 = 468;
+ private static int testField0469 = 469;
+ private static int testField0470 = 470;
+ private static int testField0471 = 471;
+ private static int testField0472 = 472;
+ private static int testField0473 = 473;
+ private static int testField0474 = 474;
+ private static int testField0475 = 475;
+ private static int testField0476 = 476;
+ private static int testField0477 = 477;
+ private static int testField0478 = 478;
+ private static int testField0479 = 479;
+ private static int testField0480 = 480;
+ private static int testField0481 = 481;
+ private static int testField0482 = 482;
+ private static int testField0483 = 483;
+ private static int testField0484 = 484;
+ private static int testField0485 = 485;
+ private static int testField0486 = 486;
+ private static int testField0487 = 487;
+ private static int testField0488 = 488;
+ private static int testField0489 = 489;
+ private static int testField0490 = 490;
+ private static int testField0491 = 491;
+ private static int testField0492 = 492;
+ private static int testField0493 = 493;
+ private static int testField0494 = 494;
+ private static int testField0495 = 495;
+ private static int testField0496 = 496;
+ private static int testField0497 = 497;
+ private static int testField0498 = 498;
+ private static int testField0499 = 499;
+ private static int testField0500 = 500;
+ private static int testField0501 = 501;
+ private static int testField0502 = 502;
+ private static int testField0503 = 503;
+ private static int testField0504 = 504;
+ private static int testField0505 = 505;
+ private static int testField0506 = 506;
+ private static int testField0507 = 507;
+ private static int testField0508 = 508;
+ private static int testField0509 = 509;
+ private static int testField0510 = 510;
+ private static int testField0511 = 511;
+ private static int testField0512 = 512;
+ private static int testField0513 = 513;
+ private static int testField0514 = 514;
+ private static int testField0515 = 515;
+ private static int testField0516 = 516;
+ private static int testField0517 = 517;
+ private static int testField0518 = 518;
+ private static int testField0519 = 519;
+ private static int testField0520 = 520;
+ private static int testField0521 = 521;
+ private static int testField0522 = 522;
+ private static int testField0523 = 523;
+ private static int testField0524 = 524;
+ private static int testField0525 = 525;
+ private static int testField0526 = 526;
+ private static int testField0527 = 527;
+ private static int testField0528 = 528;
+ private static int testField0529 = 529;
+ private static int testField0530 = 530;
+ private static int testField0531 = 531;
+ private static int testField0532 = 532;
+ private static int testField0533 = 533;
+ private static int testField0534 = 534;
+ private static int testField0535 = 535;
+ private static int testField0536 = 536;
+ private static int testField0537 = 537;
+ private static int testField0538 = 538;
+ private static int testField0539 = 539;
+ private static int testField0540 = 540;
+ private static int testField0541 = 541;
+ private static int testField0542 = 542;
+ private static int testField0543 = 543;
+ private static int testField0544 = 544;
+ private static int testField0545 = 545;
+ private static int testField0546 = 546;
+ private static int testField0547 = 547;
+ private static int testField0548 = 548;
+ private static int testField0549 = 549;
+ private static int testField0550 = 550;
+ private static int testField0551 = 551;
+ private static int testField0552 = 552;
+ private static int testField0553 = 553;
+ private static int testField0554 = 554;
+ private static int testField0555 = 555;
+ private static int testField0556 = 556;
+ private static int testField0557 = 557;
+ private static int testField0558 = 558;
+ private static int testField0559 = 559;
+ private static int testField0560 = 560;
+ private static int testField0561 = 561;
+ private static int testField0562 = 562;
+ private static int testField0563 = 563;
+ private static int testField0564 = 564;
+ private static int testField0565 = 565;
+ private static int testField0566 = 566;
+ private static int testField0567 = 567;
+ private static int testField0568 = 568;
+ private static int testField0569 = 569;
+ private static int testField0570 = 570;
+ private static int testField0571 = 571;
+ private static int testField0572 = 572;
+ private static int testField0573 = 573;
+ private static int testField0574 = 574;
+ private static int testField0575 = 575;
+ private static int testField0576 = 576;
+ private static int testField0577 = 577;
+ private static int testField0578 = 578;
+ private static int testField0579 = 579;
+ private static int testField0580 = 580;
+ private static int testField0581 = 581;
+ private static int testField0582 = 582;
+ private static int testField0583 = 583;
+ private static int testField0584 = 584;
+ private static int testField0585 = 585;
+ private static int testField0586 = 586;
+ private static int testField0587 = 587;
+ private static int testField0588 = 588;
+ private static int testField0589 = 589;
+ private static int testField0590 = 590;
+ private static int testField0591 = 591;
+ private static int testField0592 = 592;
+ private static int testField0593 = 593;
+ private static int testField0594 = 594;
+ private static int testField0595 = 595;
+ private static int testField0596 = 596;
+ private static int testField0597 = 597;
+ private static int testField0598 = 598;
+ private static int testField0599 = 599;
+ private static int testField0600 = 600;
+ private static int testField0601 = 601;
+ private static int testField0602 = 602;
+ private static int testField0603 = 603;
+ private static int testField0604 = 604;
+ private static int testField0605 = 605;
+ private static int testField0606 = 606;
+ private static int testField0607 = 607;
+ private static int testField0608 = 608;
+ private static int testField0609 = 609;
+ private static int testField0610 = 610;
+ private static int testField0611 = 611;
+ private static int testField0612 = 612;
+ private static int testField0613 = 613;
+ private static int testField0614 = 614;
+ private static int testField0615 = 615;
+ private static int testField0616 = 616;
+ private static int testField0617 = 617;
+ private static int testField0618 = 618;
+ private static int testField0619 = 619;
+ private static int testField0620 = 620;
+ private static int testField0621 = 621;
+ private static int testField0622 = 622;
+ private static int testField0623 = 623;
+ private static int testField0624 = 624;
+ private static int testField0625 = 625;
+ private static int testField0626 = 626;
+ private static int testField0627 = 627;
+ private static int testField0628 = 628;
+ private static int testField0629 = 629;
+ private static int testField0630 = 630;
+ private static int testField0631 = 631;
+ private static int testField0632 = 632;
+ private static int testField0633 = 633;
+ private static int testField0634 = 634;
+ private static int testField0635 = 635;
+ private static int testField0636 = 636;
+ private static int testField0637 = 637;
+ private static int testField0638 = 638;
+ private static int testField0639 = 639;
+ private static int testField0640 = 640;
+ private static int testField0641 = 641;
+ private static int testField0642 = 642;
+ private static int testField0643 = 643;
+ private static int testField0644 = 644;
+ private static int testField0645 = 645;
+ private static int testField0646 = 646;
+ private static int testField0647 = 647;
+ private static int testField0648 = 648;
+ private static int testField0649 = 649;
+ private static int testField0650 = 650;
+ private static int testField0651 = 651;
+ private static int testField0652 = 652;
+ private static int testField0653 = 653;
+ private static int testField0654 = 654;
+ private static int testField0655 = 655;
+ private static int testField0656 = 656;
+ private static int testField0657 = 657;
+ private static int testField0658 = 658;
+ private static int testField0659 = 659;
+ private static int testField0660 = 660;
+ private static int testField0661 = 661;
+ private static int testField0662 = 662;
+ private static int testField0663 = 663;
+ private static int testField0664 = 664;
+ private static int testField0665 = 665;
+ private static int testField0666 = 666;
+ private static int testField0667 = 667;
+ private static int testField0668 = 668;
+ private static int testField0669 = 669;
+ private static int testField0670 = 670;
+ private static int testField0671 = 671;
+ private static int testField0672 = 672;
+ private static int testField0673 = 673;
+ private static int testField0674 = 674;
+ private static int testField0675 = 675;
+ private static int testField0676 = 676;
+ private static int testField0677 = 677;
+ private static int testField0678 = 678;
+ private static int testField0679 = 679;
+ private static int testField0680 = 680;
+ private static int testField0681 = 681;
+ private static int testField0682 = 682;
+ private static int testField0683 = 683;
+ private static int testField0684 = 684;
+ private static int testField0685 = 685;
+ private static int testField0686 = 686;
+ private static int testField0687 = 687;
+ private static int testField0688 = 688;
+ private static int testField0689 = 689;
+ private static int testField0690 = 690;
+ private static int testField0691 = 691;
+ private static int testField0692 = 692;
+ private static int testField0693 = 693;
+ private static int testField0694 = 694;
+ private static int testField0695 = 695;
+ private static int testField0696 = 696;
+ private static int testField0697 = 697;
+ private static int testField0698 = 698;
+ private static int testField0699 = 699;
+ private static int testField0700 = 700;
+ private static int testField0701 = 701;
+ private static int testField0702 = 702;
+ private static int testField0703 = 703;
+ private static int testField0704 = 704;
+ private static int testField0705 = 705;
+ private static int testField0706 = 706;
+ private static int testField0707 = 707;
+ private static int testField0708 = 708;
+ private static int testField0709 = 709;
+ private static int testField0710 = 710;
+ private static int testField0711 = 711;
+ private static int testField0712 = 712;
+ private static int testField0713 = 713;
+ private static int testField0714 = 714;
+ private static int testField0715 = 715;
+ private static int testField0716 = 716;
+ private static int testField0717 = 717;
+ private static int testField0718 = 718;
+ private static int testField0719 = 719;
+ private static int testField0720 = 720;
+ private static int testField0721 = 721;
+ private static int testField0722 = 722;
+ private static int testField0723 = 723;
+ private static int testField0724 = 724;
+ private static int testField0725 = 725;
+ private static int testField0726 = 726;
+ private static int testField0727 = 727;
+ private static int testField0728 = 728;
+ private static int testField0729 = 729;
+ private static int testField0730 = 730;
+ private static int testField0731 = 731;
+ private static int testField0732 = 732;
+ private static int testField0733 = 733;
+ private static int testField0734 = 734;
+ private static int testField0735 = 735;
+ private static int testField0736 = 736;
+ private static int testField0737 = 737;
+ private static int testField0738 = 738;
+ private static int testField0739 = 739;
+ private static int testField0740 = 740;
+ private static int testField0741 = 741;
+ private static int testField0742 = 742;
+ private static int testField0743 = 743;
+ private static int testField0744 = 744;
+ private static int testField0745 = 745;
+ private static int testField0746 = 746;
+ private static int testField0747 = 747;
+ private static int testField0748 = 748;
+ private static int testField0749 = 749;
+ private static int testField0750 = 750;
+ private static int testField0751 = 751;
+ private static int testField0752 = 752;
+ private static int testField0753 = 753;
+ private static int testField0754 = 754;
+ private static int testField0755 = 755;
+ private static int testField0756 = 756;
+ private static int testField0757 = 757;
+ private static int testField0758 = 758;
+ private static int testField0759 = 759;
+ private static int testField0760 = 760;
+ private static int testField0761 = 761;
+ private static int testField0762 = 762;
+ private static int testField0763 = 763;
+ private static int testField0764 = 764;
+ private static int testField0765 = 765;
+ private static int testField0766 = 766;
+ private static int testField0767 = 767;
+ private static int testField0768 = 768;
+ private static int testField0769 = 769;
+ private static int testField0770 = 770;
+ private static int testField0771 = 771;
+ private static int testField0772 = 772;
+ private static int testField0773 = 773;
+ private static int testField0774 = 774;
+ private static int testField0775 = 775;
+ private static int testField0776 = 776;
+ private static int testField0777 = 777;
+ private static int testField0778 = 778;
+ private static int testField0779 = 779;
+ private static int testField0780 = 780;
+ private static int testField0781 = 781;
+ private static int testField0782 = 782;
+ private static int testField0783 = 783;
+ private static int testField0784 = 784;
+ private static int testField0785 = 785;
+ private static int testField0786 = 786;
+ private static int testField0787 = 787;
+ private static int testField0788 = 788;
+ private static int testField0789 = 789;
+ private static int testField0790 = 790;
+ private static int testField0791 = 791;
+ private static int testField0792 = 792;
+ private static int testField0793 = 793;
+ private static int testField0794 = 794;
+ private static int testField0795 = 795;
+ private static int testField0796 = 796;
+ private static int testField0797 = 797;
+ private static int testField0798 = 798;
+ private static int testField0799 = 799;
+ private static int testField0800 = 800;
+ private static int testField0801 = 801;
+ private static int testField0802 = 802;
+ private static int testField0803 = 803;
+ private static int testField0804 = 804;
+ private static int testField0805 = 805;
+ private static int testField0806 = 806;
+ private static int testField0807 = 807;
+ private static int testField0808 = 808;
+ private static int testField0809 = 809;
+ private static int testField0810 = 810;
+ private static int testField0811 = 811;
+ private static int testField0812 = 812;
+ private static int testField0813 = 813;
+ private static int testField0814 = 814;
+ private static int testField0815 = 815;
+ private static int testField0816 = 816;
+ private static int testField0817 = 817;
+ private static int testField0818 = 818;
+ private static int testField0819 = 819;
+ private static int testField0820 = 820;
+ private static int testField0821 = 821;
+ private static int testField0822 = 822;
+ private static int testField0823 = 823;
+ private static int testField0824 = 824;
+ private static int testField0825 = 825;
+ private static int testField0826 = 826;
+ private static int testField0827 = 827;
+ private static int testField0828 = 828;
+ private static int testField0829 = 829;
+ private static int testField0830 = 830;
+ private static int testField0831 = 831;
+ private static int testField0832 = 832;
+ private static int testField0833 = 833;
+ private static int testField0834 = 834;
+ private static int testField0835 = 835;
+ private static int testField0836 = 836;
+ private static int testField0837 = 837;
+ private static int testField0838 = 838;
+ private static int testField0839 = 839;
+ private static int testField0840 = 840;
+ private static int testField0841 = 841;
+ private static int testField0842 = 842;
+ private static int testField0843 = 843;
+ private static int testField0844 = 844;
+ private static int testField0845 = 845;
+ private static int testField0846 = 846;
+ private static int testField0847 = 847;
+ private static int testField0848 = 848;
+ private static int testField0849 = 849;
+ private static int testField0850 = 850;
+ private static int testField0851 = 851;
+ private static int testField0852 = 852;
+ private static int testField0853 = 853;
+ private static int testField0854 = 854;
+ private static int testField0855 = 855;
+ private static int testField0856 = 856;
+ private static int testField0857 = 857;
+ private static int testField0858 = 858;
+ private static int testField0859 = 859;
+ private static int testField0860 = 860;
+ private static int testField0861 = 861;
+ private static int testField0862 = 862;
+ private static int testField0863 = 863;
+ private static int testField0864 = 864;
+ private static int testField0865 = 865;
+ private static int testField0866 = 866;
+ private static int testField0867 = 867;
+ private static int testField0868 = 868;
+ private static int testField0869 = 869;
+ private static int testField0870 = 870;
+ private static int testField0871 = 871;
+ private static int testField0872 = 872;
+ private static int testField0873 = 873;
+ private static int testField0874 = 874;
+ private static int testField0875 = 875;
+ private static int testField0876 = 876;
+ private static int testField0877 = 877;
+ private static int testField0878 = 878;
+ private static int testField0879 = 879;
+ private static int testField0880 = 880;
+ private static int testField0881 = 881;
+ private static int testField0882 = 882;
+ private static int testField0883 = 883;
+ private static int testField0884 = 884;
+ private static int testField0885 = 885;
+ private static int testField0886 = 886;
+ private static int testField0887 = 887;
+ private static int testField0888 = 888;
+ private static int testField0889 = 889;
+ private static int testField0890 = 890;
+ private static int testField0891 = 891;
+ private static int testField0892 = 892;
+ private static int testField0893 = 893;
+ private static int testField0894 = 894;
+ private static int testField0895 = 895;
+ private static int testField0896 = 896;
+ private static int testField0897 = 897;
+ private static int testField0898 = 898;
+ private static int testField0899 = 899;
+ private static int testField0900 = 900;
+ private static int testField0901 = 901;
+ private static int testField0902 = 902;
+ private static int testField0903 = 903;
+ private static int testField0904 = 904;
+ private static int testField0905 = 905;
+ private static int testField0906 = 906;
+ private static int testField0907 = 907;
+ private static int testField0908 = 908;
+ private static int testField0909 = 909;
+ private static int testField0910 = 910;
+ private static int testField0911 = 911;
+ private static int testField0912 = 912;
+ private static int testField0913 = 913;
+ private static int testField0914 = 914;
+ private static int testField0915 = 915;
+ private static int testField0916 = 916;
+ private static int testField0917 = 917;
+ private static int testField0918 = 918;
+ private static int testField0919 = 919;
+ private static int testField0920 = 920;
+ private static int testField0921 = 921;
+ private static int testField0922 = 922;
+ private static int testField0923 = 923;
+ private static int testField0924 = 924;
+ private static int testField0925 = 925;
+ private static int testField0926 = 926;
+ private static int testField0927 = 927;
+ private static int testField0928 = 928;
+ private static int testField0929 = 929;
+ private static int testField0930 = 930;
+ private static int testField0931 = 931;
+ private static int testField0932 = 932;
+ private static int testField0933 = 933;
+ private static int testField0934 = 934;
+ private static int testField0935 = 935;
+ private static int testField0936 = 936;
+ private static int testField0937 = 937;
+ private static int testField0938 = 938;
+ private static int testField0939 = 939;
+ private static int testField0940 = 940;
+ private static int testField0941 = 941;
+ private static int testField0942 = 942;
+ private static int testField0943 = 943;
+ private static int testField0944 = 944;
+ private static int testField0945 = 945;
+ private static int testField0946 = 946;
+ private static int testField0947 = 947;
+ private static int testField0948 = 948;
+ private static int testField0949 = 949;
+ private static int testField0950 = 950;
+ private static int testField0951 = 951;
+ private static int testField0952 = 952;
+ private static int testField0953 = 953;
+ private static int testField0954 = 954;
+ private static int testField0955 = 955;
+ private static int testField0956 = 956;
+ private static int testField0957 = 957;
+ private static int testField0958 = 958;
+ private static int testField0959 = 959;
+ private static int testField0960 = 960;
+ private static int testField0961 = 961;
+ private static int testField0962 = 962;
+ private static int testField0963 = 963;
+ private static int testField0964 = 964;
+ private static int testField0965 = 965;
+ private static int testField0966 = 966;
+ private static int testField0967 = 967;
+ private static int testField0968 = 968;
+ private static int testField0969 = 969;
+ private static int testField0970 = 970;
+ private static int testField0971 = 971;
+ private static int testField0972 = 972;
+ private static int testField0973 = 973;
+ private static int testField0974 = 974;
+ private static int testField0975 = 975;
+ private static int testField0976 = 976;
+ private static int testField0977 = 977;
+ private static int testField0978 = 978;
+ private static int testField0979 = 979;
+ private static int testField0980 = 980;
+ private static int testField0981 = 981;
+ private static int testField0982 = 982;
+ private static int testField0983 = 983;
+ private static int testField0984 = 984;
+ private static int testField0985 = 985;
+ private static int testField0986 = 986;
+ private static int testField0987 = 987;
+ private static int testField0988 = 988;
+ private static int testField0989 = 989;
+ private static int testField0990 = 990;
+ private static int testField0991 = 991;
+ private static int testField0992 = 992;
+ private static int testField0993 = 993;
+ private static int testField0994 = 994;
+ private static int testField0995 = 995;
+ private static int testField0996 = 996;
+ private static int testField0997 = 997;
+ private static int testField0998 = 998;
+ private static int testField0999 = 999;
+ private static int testField1000 = 1000;
+ private static int testField1001 = 1001;
+ private static int testField1002 = 1002;
+ private static int testField1003 = 1003;
+ private static int testField1004 = 1004;
+ private static int testField1005 = 1005;
+ private static int testField1006 = 1006;
+ private static int testField1007 = 1007;
+ private static int testField1008 = 1008;
+ private static int testField1009 = 1009;
+ private static int testField1010 = 1010;
+ private static int testField1011 = 1011;
+ private static int testField1012 = 1012;
+ private static int testField1013 = 1013;
+ private static int testField1014 = 1014;
+ private static int testField1015 = 1015;
+ private static int testField1016 = 1016;
+ private static int testField1017 = 1017;
+ private static int testField1018 = 1018;
+ private static int testField1019 = 1019;
+ private static int testField1020 = 1020;
+ private static int testField1021 = 1021;
+ private static int testField1022 = 1022;
+ private static int testField1023 = 1023;
+}
diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java
index 65486e9..330aa74 100644
--- a/test/476-checker-ctor-memory-barrier/src/Main.java
+++ b/test/476-checker-ctor-memory-barrier/src/Main.java
@@ -37,6 +37,7 @@
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
public ClassWithFinals() {
+ // Exactly one constructor barrier.
x = 0;
}
@@ -45,7 +46,7 @@
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
public ClassWithFinals(int x) {
- // This should have two barriers:
+ // This should have exactly two barriers:
// - one for the constructor
// - one for the `new` which should be inlined.
obj = new ClassWithFinals();
@@ -62,6 +63,8 @@
/// CHECK-NOT: InvokeStaticOrDirect
public InheritFromClassWithFinals() {
// Should inline the super constructor.
+ //
+ // Exactly one constructor barrier here.
}
/// CHECK-START: void InheritFromClassWithFinals.<init>(boolean) register (after)
@@ -77,6 +80,7 @@
/// CHECK-START: void InheritFromClassWithFinals.<init>(int) register (after)
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK-NOT: MemoryBarrier kind:StoreStore
/// CHECK: ReturnVoid
/// CHECK-START: void InheritFromClassWithFinals.<init>(int) register (after)
@@ -94,12 +98,13 @@
/// CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>() register (after)
/// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
/// CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>() register (after)
/// CHECK-NOT: InvokeStaticOrDirect
public HaveFinalsAndInheritFromClassWithFinals() {
- // Should inline the super constructor and remove the memory barrier.
+ // Should inline the super constructor and keep the memory barrier.
y = 0;
}
@@ -117,17 +122,19 @@
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
/// CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>(int) register (after)
/// CHECK-NOT: InvokeStaticOrDirect
public HaveFinalsAndInheritFromClassWithFinals(int unused) {
- // Should inline the super constructor and keep just one memory barrier.
+ // Should inline the super constructor and keep keep both memory barriers.
y = 0;
- // Should inline new instance and keep one barrier.
+ // Should inline new instance and keep both memory barriers.
new HaveFinalsAndInheritFromClassWithFinals();
- // Should inline new instance and keep one barrier.
+ // Should inline new instance and have exactly one barrier.
new InheritFromClassWithFinals();
}
}
@@ -166,6 +173,7 @@
/// CHECK-START: void Main.inlineNew2() register (after)
/// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
/// CHECK-START: void Main.inlineNew2() register (after)
@@ -177,6 +185,8 @@
/// CHECK-START: void Main.inlineNew3() register (after)
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK: MemoryBarrier kind:StoreStore
+ /// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
/// CHECK-START: void Main.inlineNew3() register (after)
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
index 3de900a..a5caa7b 100644
--- a/test/527-checker-array-access-split/src/Main.java
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -327,17 +327,17 @@
// check.
/// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (before)
- /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<Const7:i\d+>> IntConstant 7
/// CHECK: <<Array:l\d+>> NewArray
/// CHECK: <<Index:i\d+>> Phi
/// CHECK: If
// -------------- Loop
/// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
- /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>]
+ /// CHECK: <<Div:i\d+>> Div [<<ArrayGet>>,<<Const7>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Div>>]
/// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (after)
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
/// CHECK: <<Index:i\d+>> Phi
@@ -345,12 +345,12 @@
// -------------- Loop
/// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: <<Div:i\d+>> Div [<<ArrayGet>>,<<Const7>>]
/// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
- /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+ /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Div>>]
/// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN$after_arch (after)
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
/// CHECK: <<Index:i\d+>> Phi
@@ -358,23 +358,23 @@
// -------------- Loop
/// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: <<Div:i\d+>> Div [<<ArrayGet>>,<<Const7>>]
/// CHECK-NOT: IntermediateAddress
- /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Add>>]
+ /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Div>>]
/// CHECK-START-ARM: int Main.canMergeAfterBCE1() instruction_simplifier_arm (before)
- /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<Const7:i\d+>> IntConstant 7
/// CHECK: <<Array:l\d+>> NewArray
/// CHECK: <<Index:i\d+>> Phi
/// CHECK: If
// -------------- Loop
/// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
- /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>]
+ /// CHECK: <<Div:i\d+>> Div [<<ArrayGet>>,<<Const7>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Div>>]
/// CHECK-START-ARM: int Main.canMergeAfterBCE1() instruction_simplifier_arm (after)
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
/// CHECK: <<Index:i\d+>> Phi
@@ -382,12 +382,12 @@
// -------------- Loop
/// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: <<Div:i\d+>> Div [<<ArrayGet>>,<<Const7>>]
/// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
- /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+ /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Div>>]
/// CHECK-START-ARM: int Main.canMergeAfterBCE1() GVN$after_arch (after)
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
/// CHECK: <<Index:i\d+>> Phi
@@ -395,14 +395,14 @@
// -------------- Loop
/// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: <<Div:i\d+>> Div [<<ArrayGet>>,<<Const7>>]
/// CHECK-NOT: IntermediateAddress
- /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Add>>]
+ /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Div>>]
public static int canMergeAfterBCE1() {
- int[] array = {0, 1, 2, 3};
+ int[] array = {0, 7, 14, 21};
for (int i = 0; i < array.length; i++) {
- array[i] = array[i] + 1;
+ array[i] = array[i] / 7;
}
return array[array.length - 1];
}
@@ -421,8 +421,8 @@
/// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>]
/// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Array>>,<<Index>>]
/// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Array>>,<<Index1>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
- /// CHECK: ArraySet [<<Array>>,<<Index1>>,<<Add>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index1>>,<<Shl>>]
// Note that we do not care that the `DataOffset` is `12`. But if we do not
// specify it and any other `IntConstant` appears before that instruction,
@@ -441,9 +441,9 @@
/// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
/// CHECK-DAG: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address2>>,<<Index1>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<ArrayGetI>>,<<ArrayGetI1>>]
/// CHECK: <<Address3:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
- /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
+ /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Shl>>]
/// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -456,8 +456,8 @@
/// CHECK-DAG: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address>>,<<Index>>]
/// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address>>,<<Index1>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
- /// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Add>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Shl>>]
// There should be only one intermediate address computation in the loop.
@@ -475,8 +475,8 @@
/// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>]
/// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Array>>,<<Index>>]
/// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Array>>,<<Index1>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
- /// CHECK: ArraySet [<<Array>>,<<Index1>>,<<Add>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index1>>,<<Shl>>]
/// CHECK-START-ARM: int Main.canMergeAfterBCE2() instruction_simplifier_arm (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -490,9 +490,9 @@
/// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
/// CHECK-DAG: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address2>>,<<Index1>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<ArrayGetI>>,<<ArrayGetI1>>]
/// CHECK: <<Address3:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
- /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
+ /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Shl>>]
/// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -505,17 +505,17 @@
/// CHECK-DAG: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address>>,<<Index>>]
/// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address>>,<<Index1>>]
- /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
- /// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Add>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Shl>>]
/// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK: IntermediateAddress
/// CHECK-NOT: IntermediateAddress
public static int canMergeAfterBCE2() {
- int[] array = {0, 1, 2, 3};
+ int[] array = {64, 8, 4, 2 };
for (int i = 0; i < array.length - 1; i++) {
- array[i + 1] = array[i] + array[i + 1];
+ array[i + 1] = array[i] << array[i + 1];
}
return array[array.length - 1];
}
@@ -571,8 +571,8 @@
accrossGC(array, 0);
assertIntEquals(125, array[0]);
- assertIntEquals(4, canMergeAfterBCE1());
- assertIntEquals(6, canMergeAfterBCE2());
+ assertIntEquals(3, canMergeAfterBCE1());
+ assertIntEquals(1048576, canMergeAfterBCE2());
assertIntEquals(18, checkLongFloatDouble());
}
diff --git a/test/616-cha-abstract/src/Main.java b/test/616-cha-abstract/src/Main.java
index e1d7db1..b33f575 100644
--- a/test/616-cha-abstract/src/Main.java
+++ b/test/616-cha-abstract/src/Main.java
@@ -39,8 +39,8 @@
}
public class Main {
- static Main1 sMain1;
- static Main1 sMain2;
+ static Base sMain1;
+ static Base sMain2;
static boolean sIsOptimizing = true;
static boolean sHasJIT = true;
diff --git a/test/616-cha-interface-default/expected.txt b/test/616-cha-interface-default/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-interface-default/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-interface-default/info.txt b/test/616-cha-interface-default/info.txt
new file mode 100644
index 0000000..11baa1f
--- /dev/null
+++ b/test/616-cha-interface-default/info.txt
@@ -0,0 +1,2 @@
+Test for Class Hierarchy Analysis (CHA) on interface method.
+Test it under multidex configuration to check cross-dex inlining.
diff --git a/test/616-cha-interface-default/multidex.jpp b/test/616-cha-interface-default/multidex.jpp
new file mode 100644
index 0000000..b0d200e
--- /dev/null
+++ b/test/616-cha-interface-default/multidex.jpp
@@ -0,0 +1,3 @@
+Main:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Main
diff --git a/test/616-cha-interface-default/run b/test/616-cha-interface-default/run
new file mode 100644
index 0000000..d8b4f0d
--- /dev/null
+++ b/test/616-cha-interface-default/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run without an app image to prevent the classes to be loaded at startup.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/616-cha-interface-default/src-multidex/Base.java b/test/616-cha-interface-default/src-multidex/Base.java
new file mode 100644
index 0000000..2cbcb50
--- /dev/null
+++ b/test/616-cha-interface-default/src-multidex/Base.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Base {
+ default public int foo(int i) {
+ if (i != 1) {
+ return -2;
+ }
+ return i + 10;
+ }
+
+ // Test default method that's not inlined.
+ default public int $noinline$bar() {
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ return -1;
+ }
+
+ default void printError(String msg) {
+ System.out.println(msg);
+ }
+}
diff --git a/test/616-cha-interface-default/src/Main.java b/test/616-cha-interface-default/src/Main.java
new file mode 100644
index 0000000..951607d
--- /dev/null
+++ b/test/616-cha-interface-default/src/Main.java
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main1 implements Base {
+}
+
+class Main2 extends Main1 {
+ public void foobar() {}
+}
+
+class Main3 implements Base {
+ public int foo(int i) {
+ if (i != 3) {
+ printError("error3");
+ }
+ return -(i + 10);
+ }
+}
+
+public class Main {
+ static Base sMain1;
+ static Base sMain2;
+ static Base sMain3;
+
+ static boolean sIsOptimizing = true;
+ static boolean sHasJIT = true;
+ static volatile boolean sOtherThreadStarted;
+
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+
+ static int getValue(Class<?> cls) {
+ if (cls == Main1.class || cls == Main2.class) {
+ return 1;
+ }
+ return 3;
+ }
+
+ // sMain1.foo()/sMain2.foo() will be always be Base.foo() before Main3 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Base.foo() and be inlined.
+ // After Dummy.createMain3() which links in Main3, live testImplement() on stack
+ // should be deoptimized.
+ static void testImplement(boolean createMain3, boolean wait, boolean setHasJIT) {
+ if (setHasJIT) {
+ if (isInterpreted()) {
+ sHasJIT = false;
+ }
+ return;
+ }
+
+ if (createMain3 && (sIsOptimizing || sHasJIT)) {
+ assertIsManaged();
+ }
+
+ if (sMain1.foo(getValue(sMain1.getClass())) != 11) {
+ System.out.println("11 expected.");
+ }
+ if (sMain1.$noinline$bar() != -1) {
+ System.out.println("-1 expected.");
+ }
+ if (sMain2.foo(getValue(sMain2.getClass())) != 11) {
+ System.out.println("11 expected.");
+ }
+
+ if (createMain3) {
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain3 = Dummy.createMain3();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+ } else if (wait) {
+ // This is the other thread.
+ synchronized(Main.class) {
+ sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ // There should be a deoptimization here right after Main3 is linked by
+ // calling Dummy.createMain3(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ if (sMain1.foo(getValue(sMain1.getClass())) != 11) {
+ System.out.println("11 expected.");
+ }
+ if ((createMain3 || wait) && sHasJIT && !sIsOptimizing) {
+ // This method should be deoptimized right after Main3 is created.
+ assertIsInterpreted();
+ }
+
+ if (sMain3 != null) {
+ if (sMain3.foo(getValue(sMain3.getClass())) != -13) {
+ System.out.println("-13 expected.");
+ }
+ }
+ }
+
+ // Test scenarios under which CHA-based devirtualization happens,
+ // and class loading that implements a method can invalidate compiled code.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ if (isInterpreted()) {
+ sIsOptimizing = false;
+ }
+
+ // sMain1 is an instance of Main1.
+ // sMain2 is an instance of Main2.
+ // Neither Main1 nor Main2 override default method Base.foo().
+ // Main3 hasn't bee loaded yet.
+ sMain1 = new Main1();
+ sMain2 = new Main2();
+
+ ensureJitCompiled(Main.class, "testImplement");
+ testImplement(false, false, true);
+
+ if (sHasJIT && !sIsOptimizing) {
+ assertSingleImplementation(Base.class, "foo", true);
+ assertSingleImplementation(Main1.class, "foo", true);
+ } else {
+ // Main3 is verified ahead-of-time so it's linked in already.
+ }
+
+ // Create another thread that also calls sMain1.foo().
+ // Try to test suspend and deopt another thread.
+ new Thread() {
+ public void run() {
+ testImplement(false, true, false);
+ }
+ }.start();
+
+ // This will create Main3 instance in the middle of testImplement().
+ testImplement(true, false, false);
+ assertSingleImplementation(Base.class, "foo", false);
+ assertSingleImplementation(Main1.class, "foo", true);
+ assertSingleImplementation(sMain3.getClass(), "foo", true);
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+ private static native void assertIsInterpreted();
+ private static native void assertIsManaged();
+ private static native boolean isInterpreted();
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+}
+
+// Put createMain3() in another class to avoid class loading due to verifier.
+class Dummy {
+ static Base createMain3() {
+ return new Main3();
+ }
+}
diff --git a/test/616-cha-interface/expected.txt b/test/616-cha-interface/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-interface/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-interface/info.txt b/test/616-cha-interface/info.txt
new file mode 100644
index 0000000..1fd330a
--- /dev/null
+++ b/test/616-cha-interface/info.txt
@@ -0,0 +1 @@
+Test for Class Hierarchy Analysis (CHA) on interface method.
diff --git a/test/616-cha-interface/run b/test/616-cha-interface/run
new file mode 100644
index 0000000..d8b4f0d
--- /dev/null
+++ b/test/616-cha-interface/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run without an app image to prevent the classes to be loaded at startup.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/616-cha-interface/src/Main.java b/test/616-cha-interface/src/Main.java
new file mode 100644
index 0000000..3c93496
--- /dev/null
+++ b/test/616-cha-interface/src/Main.java
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Base {
+ void foo(int i);
+ void $noinline$bar();
+}
+
+class Main1 implements Base {
+ public void foo(int i) {
+ if (i != 1) {
+ printError("error1");
+ }
+ }
+
+ // Test rewriting invoke-interface into invoke-virtual when inlining fails.
+ public void $noinline$bar() {
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ System.out.print("");
+ }
+
+ void printError(String msg) {
+ System.out.println(msg);
+ }
+}
+
+class Main2 extends Main1 {
+ public void foo(int i) {
+ if (i != 2) {
+ printError("error2");
+ }
+ }
+}
+
+public class Main {
+ static Base sMain1;
+ static Base sMain2;
+
+ static boolean sIsOptimizing = true;
+ static boolean sHasJIT = true;
+ static volatile boolean sOtherThreadStarted;
+
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+
+ // sMain1.foo() will be always be Main1.foo() before Main2 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Main1.foo() and be inlined.
+ // After Dummy.createMain2() which links in Main2, live testImplement() on stack
+ // should be deoptimized.
+ static void testImplement(boolean createMain2, boolean wait, boolean setHasJIT) {
+ if (setHasJIT) {
+ if (isInterpreted()) {
+ sHasJIT = false;
+ }
+ return;
+ }
+
+ if (createMain2 && (sIsOptimizing || sHasJIT)) {
+ assertIsManaged();
+ }
+
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ sMain1.$noinline$bar();
+
+ if (createMain2) {
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain2 = Dummy.createMain2();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+ } else if (wait) {
+ // This is the other thread.
+ synchronized(Main.class) {
+ sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ // There should be a deoptimization here right after Main2 is linked by
+ // calling Dummy.createMain2(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ if ((createMain2 || wait) && sHasJIT && !sIsOptimizing) {
+ // This method should be deoptimized right after Main2 is created.
+ assertIsInterpreted();
+ }
+
+ if (sMain2 != null) {
+ sMain2.foo(sMain2.getClass() == Main1.class ? 1 : 2);
+ }
+ }
+
+ // Test scenarios under which CHA-based devirtualization happens,
+ // and class loading that overrides a method can invalidate compiled code.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ if (isInterpreted()) {
+ sIsOptimizing = false;
+ }
+
+ // sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
+ sMain1 = new Main1();
+
+ ensureJitCompiled(Main.class, "testImplement");
+ testImplement(false, false, true);
+
+ if (sHasJIT && !sIsOptimizing) {
+ assertSingleImplementation(Base.class, "foo", true);
+ assertSingleImplementation(Main1.class, "foo", true);
+ } else {
+ // Main2 is verified ahead-of-time so it's linked in already.
+ }
+
+ // Create another thread that also calls sMain1.foo().
+ // Try to test suspend and deopt another thread.
+ new Thread() {
+ public void run() {
+ testImplement(false, true, false);
+ }
+ }.start();
+
+ // This will create Main2 instance in the middle of testImplement().
+ testImplement(true, false, false);
+ assertSingleImplementation(Base.class, "foo", false);
+ assertSingleImplementation(Main1.class, "foo", false);
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+ private static native void assertIsInterpreted();
+ private static native void assertIsManaged();
+ private static native boolean isInterpreted();
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+}
+
+// Put createMain2() in another class to avoid class loading due to verifier.
+class Dummy {
+ static Main1 createMain2() {
+ return new Main2();
+ }
+}
diff --git a/test/616-cha-miranda/expected.txt b/test/616-cha-miranda/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-miranda/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-miranda/info.txt b/test/616-cha-miranda/info.txt
new file mode 100644
index 0000000..c46f33f
--- /dev/null
+++ b/test/616-cha-miranda/info.txt
@@ -0,0 +1 @@
+Test for Class Hierarchy Analysis (CHA) on miranda method.
diff --git a/test/616-cha-miranda/run b/test/616-cha-miranda/run
new file mode 100644
index 0000000..d8b4f0d
--- /dev/null
+++ b/test/616-cha-miranda/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run without an app image to prevent the classes to be loaded at startup.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/616-cha-miranda/src/Main.java b/test/616-cha-miranda/src/Main.java
new file mode 100644
index 0000000..e548482
--- /dev/null
+++ b/test/616-cha-miranda/src/Main.java
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Iface {
+ public void foo(int i);
+}
+
+abstract class Base implements Iface {
+ // Iface.foo(int) will be added as a miranda method.
+
+ void printError(String msg) {
+ System.out.println(msg);
+ }
+}
+
+class Main1 extends Base {
+ public void foo(int i) {
+ if (i != 1) {
+ printError("error1");
+ }
+ }
+}
+
+class Main2 extends Main1 {
+ public void foo(int i) {
+ if (i != 2) {
+ printError("error2");
+ }
+ }
+}
+
+public class Main {
+ static Base sMain1;
+ static Base sMain2;
+
+ static boolean sIsOptimizing = true;
+ static boolean sHasJIT = true;
+ static volatile boolean sOtherThreadStarted;
+
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+
+ // sMain1.foo() will be always be Main1.foo() before Main2 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Main1.foo() and be inlined.
+ // After Dummy.createMain2() which links in Main2, live testOverride() on stack
+ // should be deoptimized.
+ static void testOverride(boolean createMain2, boolean wait, boolean setHasJIT) {
+ if (setHasJIT) {
+ if (isInterpreted()) {
+ sHasJIT = false;
+ }
+ return;
+ }
+
+ if (createMain2 && (sIsOptimizing || sHasJIT)) {
+ assertIsManaged();
+ }
+
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+
+ if (createMain2) {
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain2 = Dummy.createMain2();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+ } else if (wait) {
+ // This is the other thread.
+ synchronized(Main.class) {
+ sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ // There should be a deoptimization here right after Main2 is linked by
+ // calling Dummy.createMain2(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ if ((createMain2 || wait) && sHasJIT && !sIsOptimizing) {
+ // This method should be deoptimized right after Main2 is created.
+ assertIsInterpreted();
+ }
+
+ if (sMain2 != null) {
+ sMain2.foo(sMain2.getClass() == Main1.class ? 1 : 2);
+ }
+ }
+
+ // Test scenarios under which CHA-based devirtualization happens,
+ // and class loading that overrides a method can invalidate compiled code.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ if (isInterpreted()) {
+ sIsOptimizing = false;
+ }
+
+ // sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
+ sMain1 = new Main1();
+
+ ensureJitCompiled(Main.class, "testOverride");
+ testOverride(false, false, true);
+
+ if (sHasJIT && !sIsOptimizing) {
+ assertSingleImplementation(Base.class, "foo", true);
+ assertSingleImplementation(Main1.class, "foo", true);
+ } else {
+ // Main2 is verified ahead-of-time so it's linked in already.
+ }
+
+ // Create another thread that also calls sMain1.foo().
+ // Try to test suspend and deopt another thread.
+ new Thread() {
+ public void run() {
+ testOverride(false, true, false);
+ }
+ }.start();
+
+ // This will create Main2 instance in the middle of testOverride().
+ testOverride(true, false, false);
+ assertSingleImplementation(Base.class, "foo", false);
+ assertSingleImplementation(Main1.class, "foo", false);
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+ private static native void assertIsInterpreted();
+ private static native void assertIsManaged();
+ private static native boolean isInterpreted();
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+}
+
+// Put createMain2() in another class to avoid class loading due to verifier.
+class Dummy {
+ static Main1 createMain2() {
+ return new Main2();
+ }
+}
diff --git a/test/616-cha-proxy-method-inline/expected.txt b/test/616-cha-proxy-method-inline/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-proxy-method-inline/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-proxy-method-inline/info.txt b/test/616-cha-proxy-method-inline/info.txt
new file mode 100644
index 0000000..0126855
--- /dev/null
+++ b/test/616-cha-proxy-method-inline/info.txt
@@ -0,0 +1 @@
+Test for Class Hierarchy Analysis (CHA) on inlining a cross-dex proxy method.
diff --git a/test/616-cha-proxy-method-inline/multidex.jpp b/test/616-cha-proxy-method-inline/multidex.jpp
new file mode 100644
index 0000000..b0d200e
--- /dev/null
+++ b/test/616-cha-proxy-method-inline/multidex.jpp
@@ -0,0 +1,3 @@
+Main:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Main
diff --git a/test/616-cha-proxy-method-inline/run b/test/616-cha-proxy-method-inline/run
new file mode 100644
index 0000000..d8b4f0d
--- /dev/null
+++ b/test/616-cha-proxy-method-inline/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run without an app image to prevent the classes to be loaded at startup.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/ti-agent/common_load.h b/test/616-cha-proxy-method-inline/src-multidex/Foo.java
similarity index 67%
copy from test/ti-agent/common_load.h
copy to test/616-cha-proxy-method-inline/src-multidex/Foo.java
index e79a006..9deca3e 100644
--- a/test/ti-agent/common_load.h
+++ b/test/616-cha-proxy-method-inline/src-multidex/Foo.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,6 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
-
-#include "jvmti.h"
-
-namespace art {
-
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+interface Foo {
+ public Object bar(Object obj);
+}
diff --git a/test/616-cha-proxy-method-inline/src/Main.java b/test/616-cha-proxy-method-inline/src/Main.java
new file mode 100644
index 0000000..be7bc82
--- /dev/null
+++ b/test/616-cha-proxy-method-inline/src/Main.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+class DebugProxy implements java.lang.reflect.InvocationHandler {
+ private Object obj;
+ static Class<?>[] interfaces = {Foo.class};
+
+ public static Object newInstance(Object obj) {
+ return java.lang.reflect.Proxy.newProxyInstance(
+ Foo.class.getClassLoader(),
+ interfaces,
+ new DebugProxy(obj));
+ }
+
+ private DebugProxy(Object obj) {
+ this.obj = obj;
+ }
+
+ public Object invoke(Object proxy, Method m, Object[] args) throws Throwable {
+ Object result;
+ if (obj == null) {
+ return null;
+ }
+ try {
+ System.out.println("before invoking method " + m.getName());
+ result = m.invoke(obj, args);
+ } catch (InvocationTargetException e) {
+ throw e.getTargetException();
+ } catch (Exception e) {
+ throw new RuntimeException("unexpected invocation exception: " + e.getMessage());
+ } finally {
+ System.out.println("after invoking method " + m.getName());
+ }
+ return result;
+ }
+}
+
+public class Main {
+ public static void call(Foo foo) {
+ if (foo == null) {
+ return;
+ }
+ foo.bar(null);
+ }
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ Foo foo = (Foo)DebugProxy.newInstance(null);
+ ensureJitCompiled(Main.class, "call");
+ call(foo);
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+}
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index eee90ab..f0b3278 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -270,6 +270,24 @@
}
}
+ // If vectorized, invariant stride should be recognized
+ // as a reduction, not a unit stride in outer loop.
+ static void reduc(int[] xx, int[] yy) {
+ for (int i0 = 0; i0 < 2; i0++) {
+ for (int i1 = 0; i1 < 469; i1++) {
+ xx[i0] -= (++yy[i1]);
+ }
+ }
+ }
+
+ // If vectorized, string encoding should be dealt with.
+ private static void string2Bytes(char[] a, String b) {
+ int min = Math.min(a.length, b.length());
+ for (int i = 0; i < min; i++) {
+ a[i] = b.charAt(i);
+ }
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -335,6 +353,22 @@
expectEquals(2.0f, a[i]);
}
+ int[] xx = new int[2];
+ int[] yy = new int[469];
+ reduc(xx, yy);
+ expectEquals(-469, xx[0]);
+ expectEquals(-938, xx[1]);
+ for (int i = 0; i < 469; i++) {
+ expectEquals(2, yy[i]);
+ }
+
+ char[] aa = new char[23];
+ String bb = "hello world how are you";
+ string2Bytes(aa, bb);
+ for (int i = 0; i < aa.length; i++) {
+ expectEquals(aa[i], bb.charAt(i));
+ }
+
System.out.println("passed");
}
diff --git a/test/638-checker-inline-caches/src/Main.java b/test/638-checker-inline-caches/src/Main.java
index 2cee47e..680bd14 100644
--- a/test/638-checker-inline-caches/src/Main.java
+++ b/test/638-checker-inline-caches/src/Main.java
@@ -40,10 +40,11 @@
/// CHECK-START: int Main.inlineMonomorphicSubA(Super) inliner (after)
/// CHECK: <<SubARet:i\d+>> IntConstant 42
- /// CHECK: <<ObjClass:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK: <<Obj:l\d+>> NullCheck
+ /// CHECK: <<ObjClass:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClass:l\d+>> LoadClass class_name:SubA
/// CHECK: <<Test:z\d+>> NotEqual [<<InlineClass>>,<<ObjClass>>]
- /// CHECK: Deoptimize [<<Test>>]
+ /// CHECK: Deoptimize [<<Test>>,<<Obj>>]
/// CHECK: Return [<<SubARet>>]
public static int inlineMonomorphicSubA(Super a) {
return a.getValue();
@@ -60,7 +61,8 @@
/// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (after)
/// CHECK-DAG: <<SubARet:i\d+>> IntConstant 42
/// CHECK-DAG: <<SubBRet:i\d+>> IntConstant 38
- /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK: <<Obj:l\d+>> NullCheck
+ /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
/// CHECK: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
/// CHECK: If [<<TestSubA>>]
@@ -68,7 +70,7 @@
/// CHECK: <<ObjClassSubB:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClassSubB:l\d+>> LoadClass class_name:SubB
/// CHECK: <<TestSubB:z\d+>> NotEqual [<<InlineClassSubB>>,<<ObjClassSubB>>]
- /// CHECK: Deoptimize [<<TestSubB>>]
+ /// CHECK: Deoptimize [<<TestSubB>>,<<Obj>>]
/// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<SubBRet>>]
/// CHECK: Return [<<Ret>>]
@@ -87,7 +89,8 @@
/// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (after)
/// CHECK-DAG: <<SubARet:i\d+>> IntConstant 42
/// CHECK-DAG: <<SubCRet:i\d+>> IntConstant 24
- /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK: <<Obj:l\d+>> NullCheck
+ /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
/// CHECK: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
/// CHECK: If [<<TestSubA>>]
@@ -95,7 +98,7 @@
/// CHECK: <<ObjClassSubC:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClassSubC:l\d+>> LoadClass class_name:SubC
/// CHECK: <<TestSubC:z\d+>> NotEqual [<<InlineClassSubC>>,<<ObjClassSubC>>]
- /// CHECK: Deoptimize [<<TestSubC>>]
+ /// CHECK: Deoptimize [<<TestSubC>>,<<Obj>>]
/// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<SubCRet>>]
/// CHECK: Return [<<Ret>>]
diff --git a/test/640-checker-boolean-simd/expected.txt b/test/640-checker-boolean-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-boolean-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-boolean-simd/info.txt b/test/640-checker-boolean-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-boolean-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-boolean-simd/src/Main.java b/test/640-checker-boolean-simd/src/Main.java
new file mode 100644
index 0000000..f8239fa
--- /dev/null
+++ b/test/640-checker-boolean-simd/src/Main.java
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class Main {
+
+ static boolean[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.and(boolean) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.and(boolean) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void and(boolean x) {
+ for (int i = 0; i < 128; i++)
+ a[i] &= x; // NOTE: bitwise and, not the common &&
+ }
+
+ /// CHECK-START: void Main.or(boolean) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.or(boolean) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void or(boolean x) {
+ for (int i = 0; i < 128; i++)
+ a[i] |= x; // NOTE: bitwise or, not the common ||
+ }
+
+ /// CHECK-START: void Main.xor(boolean) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.xor(boolean) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void xor(boolean x) {
+ for (int i = 0; i < 128; i++)
+ a[i] ^= x; // NOTE: bitwise xor
+ }
+
+ /// CHECK-START: void Main.not() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void not() {
+ for (int i = 0; i < 128; i++)
+ a[i] = !a[i];
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new boolean[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = (i & 1) == 0;
+ }
+ // Arithmetic operations.
+ and(true);
+ for (int i = 0; i < 128; i++) {
+ expectEquals((i & 1) == 0, a[i], "and-true");
+ }
+ xor(true);
+ for (int i = 0; i < 128; i++) {
+ expectEquals((i & 1) != 0, a[i], "xor-true");
+ }
+ xor(false);
+ for (int i = 0; i < 128; i++) {
+ expectEquals((i & 1) != 0, a[i], "xor-false");
+ }
+ not();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((i & 1) == 0, a[i], "not");
+ }
+ or(true);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(true, a[i], "or-true");
+ }
+ and(false);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(false, a[i], "and-false");
+ }
+ or(false);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(false, a[i], "or-false");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(boolean expected, boolean result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-byte-simd/expected.txt b/test/640-checker-byte-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-byte-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-byte-simd/info.txt b/test/640-checker-byte-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-byte-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
new file mode 100644
index 0000000..0f7452b
--- /dev/null
+++ b/test/640-checker-byte-simd/src/Main.java
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class Main {
+
+ static byte[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void mul(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.div(int) loop_optimization (after)
+ //
+ // Not supported on any architecture.
+ //
+ static void div(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = (byte) -a[i];
+ }
+
+ /// CHECK-START: void Main.not() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void not() {
+ for (int i = 0; i < 128; i++)
+ a[i] = (byte) ~a[i];
+ }
+
+ /// CHECK-START: void Main.shl4() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void shl4() {
+ for (int i = 0; i < 128; i++)
+ a[i] <<= 4;
+ }
+
+ /// CHECK-START: void Main.sar2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void sar2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 2;
+ }
+
+ /// CHECK-START: void Main.shr2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void shr2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 2;
+ }
+
+ //
+ // Shift sanity.
+ //
+
+ static void sar31() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 31;
+ }
+
+ static void shr31() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 31;
+ }
+
+ static void shr32() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 32; // 0, since & 31
+ }
+
+ static void shr33() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 33; // 1, since & 31
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void bounds() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new byte[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = (byte) i;
+ }
+ // Arithmetic operations.
+ add(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte)(i + 2), a[i], "add");
+ }
+ sub(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte)(i + i), a[i], "mul");
+ }
+ div(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(((byte)(i + i)) >> 1, a[i], "div");
+ a[i] = (byte) i; // undo arithmetic wrap-around effects
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(-i, a[i], "neg");
+ }
+ // Loop bounds.
+ bounds();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals(11 - i, a[i], "bounds");
+ }
+ expectEquals(-127, a[127], "bounds127");
+ // Shifts.
+ for (int i = 0; i < 128; i++) {
+ a[i] = (byte) 0xff;
+ }
+ shl4();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0xf0, a[i], "shl4");
+ }
+ sar2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0xfc, a[i], "sar2");
+ }
+ shr2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0xff, a[i], "shr2"); // sic!
+ }
+ sar31();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0xff, a[i], "sar31");
+ }
+ shr31();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x01, a[i], "shr31");
+ a[i] = (byte) 0x12; // reset
+ }
+ shr32();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0x12, a[i], "shr32");
+ }
+ shr33();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0x09, a[i], "shr33");
+ a[i] = (byte) 0xf0; // reset
+ }
+ not();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((byte) 0x0f, a[i], "not");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-char-simd/expected.txt b/test/640-checker-char-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-char-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-char-simd/info.txt b/test/640-checker-char-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-char-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
new file mode 100644
index 0000000..0628b36
--- /dev/null
+++ b/test/640-checker-char-simd/src/Main.java
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class Main {
+
+ static char[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void mul(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.div(int) loop_optimization (after)
+ //
+ // Not supported on any architecture.
+ //
+ static void div(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = (char) -a[i];
+ }
+
+ /// CHECK-START: void Main.not() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void not() {
+ for (int i = 0; i < 128; i++)
+ a[i] = (char) ~a[i];
+ }
+
+ /// CHECK-START: void Main.shl4() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void shl4() {
+ for (int i = 0; i < 128; i++)
+ a[i] <<= 4;
+ }
+
+ /// CHECK-START: void Main.sar2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void sar2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 2;
+ }
+
+ /// CHECK-START: void Main.shr2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void shr2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 2;
+ }
+
+ //
+ // Shift sanity.
+ //
+
+ static void sar31() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 31;
+ }
+
+ static void shr31() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 31;
+ }
+
+ static void shr32() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 32; // 0, since & 31
+ }
+
+ static void shr33() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 33; // 1, since & 31
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void bounds() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new char[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = (char) i;
+ }
+ // Arithmetic operations.
+ add(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + 2, a[i], "add");
+ }
+ sub(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + i, a[i], "mul");
+ }
+ div(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "div");
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char)-i, a[i], "neg");
+ }
+ // Loop bounds.
+ bounds();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals((char)(11 - i), a[i], "bounds");
+ }
+ expectEquals((char)-127, a[127], "bounds127");
+ // Shifts.
+ for (int i = 0; i < 128; i++) {
+ a[i] = (char) 0xffff;
+ }
+ shl4();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char) 0xfff0, a[i], "shl4");
+ }
+ sar2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char) 0x3ffc, a[i], "sar2");
+ }
+ shr2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char) 0x0fff, a[i], "shr2");
+ a[i] = (char) 0xffff; // reset
+ }
+ sar31();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0, a[i], "sar31");
+ a[i] = (char) 0xffff; // reset
+ }
+ shr31();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0, a[i], "shr31");
+ a[i] = (char) 0x1200; // reset
+ }
+ shr32();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char) 0x1200, a[i], "shr32");
+ }
+ shr33();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char) 0x0900, a[i], "shr33");
+ a[i] = (char) 0xf1f0; // reset
+ }
+ not();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((char) 0x0e0f, a[i], "not");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-double-simd/expected.txt b/test/640-checker-double-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-double-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-double-simd/info.txt b/test/640-checker-double-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-double-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
new file mode 100644
index 0000000..0d4f87a
--- /dev/null
+++ b/test/640-checker-double-simd/src/Main.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization. Note that this class provides a mere
+ * functional test, not a precise numerical verifier.
+ */
+public class Main {
+
+ static double[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(double) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(double x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(double) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(double x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(double) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.mul(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void mul(double x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(double) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.div(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void div(double x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = -a[i];
+ }
+
+ /// CHECK-START: void Main.abs() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void abs() {
+ for (int i = 0; i < 128; i++)
+ a[i] = Math.abs(a[i]);
+ }
+
+ /// CHECK-START: void Main.conv(long[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.conv(long[]) loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void conv(long[] b) {
+ for (int i = 0; i < 128; i++)
+ a[i] = b[i];
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void bounds() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new double[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = i;
+ }
+ // Arithmetic operations.
+ add(2.0);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + 2, a[i], "add");
+ }
+ sub(2.0);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2.0);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + i, a[i], "mul");
+ }
+ div(2.0);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "div");
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(-i, a[i], "neg");
+ }
+ // Loop bounds.
+ bounds();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals(11 - i, a[i], "bounds");
+ }
+ expectEquals(-127, a[127], "bounds127");
+ // Abs.
+ abs();
+ expectEquals(0, a[0], "abs0");
+ for (int i = 1; i <= 11; i++) {
+ expectEquals(11 - i, a[i], "abs_lo");
+ }
+ for (int i = 12; i < 127; i++) {
+ expectEquals(i - 11, a[i], "abs_hi");
+ }
+ expectEquals(127, a[127], "abs127");
+ // Conversion.
+ long[] b = new long[128];
+ for (int i = 0; i < 128; i++) {
+ b[i] = 1000 * i;
+ }
+ conv(b);
+ for (int i = 1; i < 127; i++) {
+ expectEquals(1000.0 * i, a[i], "conv");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(double expected, double result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-float-simd/expected.txt b/test/640-checker-float-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-float-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-float-simd/info.txt b/test/640-checker-float-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-float-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
new file mode 100644
index 0000000..4bcb7e2
--- /dev/null
+++ b/test/640-checker-float-simd/src/Main.java
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization. Note that this class provides a mere
+ * functional test, not a precise numerical verifier.
+ */
+public class Main {
+
+ static float[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(float) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(float x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(float) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(float x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(float) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.mul(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void mul(float x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(float) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.div(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void div(float x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = -a[i];
+ }
+
+ /// CHECK-START: void Main.abs() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void abs() {
+ for (int i = 0; i < 128; i++)
+ a[i] = Math.abs(a[i]);
+ }
+
+ /// CHECK-START: void Main.conv(int[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.conv(int[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void conv(int[] b) {
+ for (int i = 0; i < 128; i++)
+ a[i] = b[i];
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void bounds() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new float[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = i;
+ }
+ // Arithmetic operations.
+ add(2.0f);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + 2, a[i], "add");
+ }
+ sub(2.0f);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2.0f);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + i, a[i], "mul");
+ }
+ div(2.0f);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "div");
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(-i, a[i], "neg");
+ }
+ // Loop bounds.
+ bounds();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals(11 - i, a[i], "bounds");
+ }
+ expectEquals(-127, a[127], "bounds127");
+ // Abs.
+ abs();
+ expectEquals(0, a[0], "abs0");
+ for (int i = 1; i <= 11; i++) {
+ expectEquals(11 - i, a[i], "abs_lo");
+ }
+ for (int i = 12; i < 127; i++) {
+ expectEquals(i - 11, a[i], "abs_hi");
+ }
+ expectEquals(127, a[127], "abs127");
+ // Conversion.
+ int[] b = new int[128];
+ for (int i = 0; i < 128; i++) {
+ b[i] = 1000 * i;
+ }
+ conv(b);
+ for (int i = 1; i < 127; i++) {
+ expectEquals(1000.0f * i, a[i], "conv");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(float expected, float result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-int-simd/expected.txt b/test/640-checker-int-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-int-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-int-simd/info.txt b/test/640-checker-int-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-int-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
new file mode 100644
index 0000000..ba1e142
--- /dev/null
+++ b/test/640-checker-int-simd/src/Main.java
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class Main {
+
+ static int[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void mul(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.div(int) loop_optimization (after)
+ //
+ // Not supported on any architecture.
+ //
+ static void div(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = -a[i];
+ }
+
+ /// CHECK-START: void Main.not() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void not() {
+ for (int i = 0; i < 128; i++)
+ a[i] = ~a[i];
+ }
+
+ /// CHECK-START: void Main.shl4() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void shl4() {
+ for (int i = 0; i < 128; i++)
+ a[i] <<= 4;
+ }
+
+ /// CHECK-START: void Main.sar2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void sar2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 2;
+ }
+
+ /// CHECK-START: void Main.shr2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void shr2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 2;
+ }
+
+ //
+ // Shift sanity.
+ //
+
+ static void shr32() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 32; // 0, since & 31
+ }
+
+ static void shr33() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 33; // 1, since & 31
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void bounds() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new int[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = i;
+ }
+ // Arithmetic operations.
+ add(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + 2, a[i], "add");
+ }
+ sub(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + i, a[i], "mul");
+ }
+ div(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "div");
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(-i, a[i], "neg");
+ }
+ // Loop bounds.
+ bounds();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals(11 - i, a[i], "bounds");
+ }
+ expectEquals(-127, a[127], "bounds127");
+ // Shifts.
+ for (int i = 0; i < 128; i++) {
+ a[i] = 0xffffffff;
+ }
+ shl4();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0xfffffff0, a[i], "shl4");
+ }
+ sar2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0xfffffffc, a[i], "sar2");
+ }
+ shr2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x3fffffff, a[i], "shr2");
+ }
+ shr32();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x3fffffff, a[i], "shr32");
+ }
+ shr33();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x1fffffff, a[i], "shr33");
+ }
+ not();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0xe0000000, a[i], "not");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-long-simd/expected.txt b/test/640-checker-long-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-long-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-long-simd/info.txt b/test/640-checker-long-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-long-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
new file mode 100644
index 0000000..5641182
--- /dev/null
+++ b/test/640-checker-long-simd/src/Main.java
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class Main {
+
+ static long[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(long) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(long x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(long) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(long x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ // Not supported for longs.
+ /// CHECK-START-ARM64: void Main.mul(long) loop_optimization (after)
+ /// CHECK-NOT: VecMul
+ static void mul(long x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.div(long) loop_optimization (after)
+ //
+ // Not supported on any architecture.
+ //
+ static void div(long x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = -a[i];
+ }
+
+ /// CHECK-START: void Main.not() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void not() {
+ for (int i = 0; i < 128; i++)
+ a[i] = ~a[i];
+ }
+
+ /// CHECK-START: void Main.shl4() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void shl4() {
+ for (int i = 0; i < 128; i++)
+ a[i] <<= 4;
+ }
+
+ /// CHECK-START: void Main.sar2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void sar2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 2;
+ }
+
+ /// CHECK-START: void Main.shr2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void shr2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 2;
+ }
+
+ //
+ // Shift sanity.
+ //
+
+ static void shr64() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 64; // 0, since & 63
+ }
+
+ static void shr65() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 65; // 1, since & 63
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void bounds() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new long[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = i;
+ }
+ // Arithmetic operations.
+ add(2L);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + 2, a[i], "add");
+ }
+ sub(2L);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2L);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + i, a[i], "mul");
+ }
+ div(2L);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "div");
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(-i, a[i], "neg");
+ }
+ // Loop bounds.
+ bounds();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals(11 - i, a[i], "bounds");
+ }
+ expectEquals(-127, a[127], "bounds127");
+ // Shifts.
+ for (int i = 0; i < 128; i++) {
+ a[i] = 0xffffffffffffffffL;
+ }
+ shl4();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0xfffffffffffffff0L, a[i], "shl4");
+ }
+ sar2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0xfffffffffffffffcL, a[i], "sar2");
+ }
+ shr2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x3fffffffffffffffL, a[i], "shr2");
+ }
+ shr64();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x3fffffffffffffffL, a[i], "shr64");
+ }
+ shr65();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x1fffffffffffffffL, a[i], "shr65");
+ }
+ not();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0xe000000000000000L, a[i], "not");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(long expected, long result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/640-checker-short-simd/expected.txt b/test/640-checker-short-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/640-checker-short-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/640-checker-short-simd/info.txt b/test/640-checker-short-simd/info.txt
new file mode 100644
index 0000000..c9c6d5e
--- /dev/null
+++ b/test/640-checker-short-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on SIMD vectorization.
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
new file mode 100644
index 0000000..241f8e6
--- /dev/null
+++ b/test/640-checker-short-simd/src/Main.java
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class Main {
+
+ static short[] a;
+
+ //
+ // Arithmetic operations.
+ //
+
+ /// CHECK-START: void Main.add(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void add(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] += x;
+ }
+
+ /// CHECK-START: void Main.sub(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void sub(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] -= x;
+ }
+
+ /// CHECK-START: void Main.mul(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void mul(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] *= x;
+ }
+
+ /// CHECK-START: void Main.div(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.div(int) loop_optimization (after)
+ //
+ // Not supported on any architecture.
+ //
+ static void div(int x) {
+ for (int i = 0; i < 128; i++)
+ a[i] /= x;
+ }
+
+ /// CHECK-START: void Main.neg() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void neg() {
+ for (int i = 0; i < 128; i++)
+ a[i] = (short) -a[i];
+ }
+
+ /// CHECK-START: void Main.not() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void not() {
+ for (int i = 0; i < 128; i++)
+ a[i] = (short) ~a[i];
+ }
+
+ /// CHECK-START: void Main.shl4() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ static void shl4() {
+ for (int i = 0; i < 128; i++)
+ a[i] <<= 4;
+ }
+
+ /// CHECK-START: void Main.sar2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void sar2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 2;
+ }
+
+ /// CHECK-START: void Main.shr2() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
+ static void shr2() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 2;
+ }
+
+ //
+ // Shift sanity.
+ //
+
+ static void sar31() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>= 31;
+ }
+
+ static void shr31() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 31;
+ }
+
+ static void shr32() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 32; // 0, since & 31
+ }
+
+
+ static void shr33() {
+ for (int i = 0; i < 128; i++)
+ a[i] >>>= 33; // 1, since & 31
+ }
+
+ //
+ // Loop bounds.
+ //
+
+ static void add() {
+ for (int i = 1; i < 127; i++)
+ a[i] += 11;
+ }
+
+ //
+ // Test Driver.
+ //
+
+ public static void main(String[] args) {
+ // Set up.
+ a = new short[128];
+ for (int i = 0; i < 128; i++) {
+ a[i] = (short) i;
+ }
+ // Arithmetic operations.
+ add(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + 2, a[i], "add");
+ }
+ sub(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "sub");
+ }
+ mul(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i + i, a[i], "mul");
+ }
+ div(2);
+ for (int i = 0; i < 128; i++) {
+ expectEquals(i, a[i], "div");
+ }
+ neg();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(-i, a[i], "neg");
+ }
+ // Loop bounds.
+ add();
+ expectEquals(0, a[0], "bounds0");
+ for (int i = 1; i < 127; i++) {
+ expectEquals(11 - i, a[i], "bounds");
+ }
+ expectEquals(-127, a[127], "bounds127");
+ // Shifts.
+ for (int i = 0; i < 128; i++) {
+ a[i] = (short) 0xffff;
+ }
+ shl4();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0xfff0, a[i], "shl4");
+ }
+ sar2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0xfffc, a[i], "sar2");
+ }
+ shr2();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0xffff, a[i], "shr2"); // sic!
+ }
+ sar31();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0xffff, a[i], "sar31");
+ }
+ shr31();
+ for (int i = 0; i < 128; i++) {
+ expectEquals(0x0001, a[i], "shr31");
+ a[i] = (short) 0x1200; // reset
+ }
+ shr32();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0x1200, a[i], "shr32");
+ }
+ shr33();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0x0900, a[i], "shr33");
+ a[i] = (short) 0xf0f1; // reset
+ }
+ not();
+ for (int i = 0; i < 128; i++) {
+ expectEquals((short) 0x0f0e, a[i], "not");
+ }
+ // Done.
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result, String action) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+ }
+ }
+}
diff --git a/test/643-checker-bogus-ic/expected.txt b/test/643-checker-bogus-ic/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/643-checker-bogus-ic/expected.txt
diff --git a/test/643-checker-bogus-ic/info.txt b/test/643-checker-bogus-ic/info.txt
new file mode 100644
index 0000000..d5dfff4
--- /dev/null
+++ b/test/643-checker-bogus-ic/info.txt
@@ -0,0 +1 @@
+Verify the compiler can handle a bogus inline cache in a profile.
diff --git a/test/643-checker-bogus-ic/profile b/test/643-checker-bogus-ic/profile
new file mode 100644
index 0000000..cbf7796
--- /dev/null
+++ b/test/643-checker-bogus-ic/profile
@@ -0,0 +1,2 @@
+LMain;->inlineMonomorphic(LMain;)I+LUnrelated;
+LMain;->inlinePolymorphic(LMain;)I+LUnrelated;,LMain;
diff --git a/test/643-checker-bogus-ic/run b/test/643-checker-bogus-ic/run
new file mode 100644
index 0000000..146e180
--- /dev/null
+++ b/test/643-checker-bogus-ic/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile
diff --git a/test/643-checker-bogus-ic/src/Main.java b/test/643-checker-bogus-ic/src/Main.java
new file mode 100644
index 0000000..0aa8477
--- /dev/null
+++ b/test/643-checker-bogus-ic/src/Main.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Unrelated {
+}
+
+public class Main {
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (before)
+ /// CHECK: InvokeVirtual method_name:Main.getValue
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (after)
+ /// CHECK: InvokeVirtual method_name:Main.getValue
+
+ public static int inlineMonomorphic(Main a) {
+ return a.getValue();
+ }
+
+ /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (before)
+ /// CHECK: InvokeVirtual method_name:Main.getValue
+
+ /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (after)
+ /// CHECK: InvokeVirtual method_name:Main.getValue
+ public static int inlinePolymorphic(Main a) {
+ return a.getValue();
+ }
+
+ public int getValue() {
+ return 42;
+ }
+
+ public static void main(String[] args) {
+ inlineMonomorphic(new Main());
+ }
+
+}
diff --git a/test/644-checker-deopt/expected.txt b/test/644-checker-deopt/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/644-checker-deopt/expected.txt
diff --git a/test/644-checker-deopt/info.txt b/test/644-checker-deopt/info.txt
new file mode 100644
index 0000000..c5fb12c
--- /dev/null
+++ b/test/644-checker-deopt/info.txt
@@ -0,0 +1,2 @@
+Regression test for making sure HDeoptimize is executed before
+the code it should have prevented executing.
diff --git a/test/644-checker-deopt/profile b/test/644-checker-deopt/profile
new file mode 100644
index 0000000..cb261cc
--- /dev/null
+++ b/test/644-checker-deopt/profile
@@ -0,0 +1,2 @@
+LMain;->inlineMonomorphic(LMain;)I+LMain;
+LMain;->inlinePolymorphic(LMain;)I+LMain;,LSubMain;
diff --git a/test/644-checker-deopt/run b/test/644-checker-deopt/run
new file mode 100644
index 0000000..146e180
--- /dev/null
+++ b/test/644-checker-deopt/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile
diff --git a/test/644-checker-deopt/src/Main.java b/test/644-checker-deopt/src/Main.java
new file mode 100644
index 0000000..17c80a6
--- /dev/null
+++ b/test/644-checker-deopt/src/Main.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (before)
+ /// CHECK: InvokeVirtual method_name:Main.getValue
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (after)
+ /// CHECK-NOT: InvokeVirtual method_name:Main.getValue
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) licm (before)
+ /// CHECK: <<Deopt:l\d+>> Deoptimize
+ /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) licm (after)
+ /// CHECK: <<Deopt:l\d+>> Deoptimize
+ /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
+
+ public static int inlineMonomorphic(Main a) {
+ if (a == null) {
+ return 42;
+ }
+ int i = 0;
+ while (i < 100) {
+ i += a.getValue();
+ }
+ return i;
+ }
+
+ /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (before)
+ /// CHECK: InvokeVirtual method_name:Main.getValue
+
+ /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (after)
+ /// CHECK-NOT: InvokeVirtual method_name:Main.getValue
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) licm (before)
+ /// CHECK: <<Deopt:l\d+>> Deoptimize
+ /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
+
+ /// CHECK-START: int Main.inlineMonomorphic(Main) licm (after)
+ /// CHECK: <<Deopt:l\d+>> Deoptimize
+ /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
+ public static int inlinePolymorphic(Main a) {
+ return a.getValue();
+ }
+
+ public int getValue() {
+ return value;
+ }
+
+ public static void main(String[] args) {
+ inlineMonomorphic(new Main());
+ }
+
+ int value = 1;
+}
+
+// Add a subclass of 'Main' to write the polymorphic inline cache in the profile.
+class SubMain extends Main {
+}
diff --git a/test/645-checker-abs-simd/expected.txt b/test/645-checker-abs-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/645-checker-abs-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/645-checker-abs-simd/info.txt b/test/645-checker-abs-simd/info.txt
new file mode 100644
index 0000000..8fa4066
--- /dev/null
+++ b/test/645-checker-abs-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on abs SIMD vectorization.
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
new file mode 100644
index 0000000..76850ab
--- /dev/null
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for ABS vectorization.
+ */
+public class Main {
+
+ private static final int SPQUIET = 1 << 22;
+ private static final long DPQUIET = 1L << 51;
+
+ /// CHECK-START: void Main.doitInt(int[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ private static void doitInt(int[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitLong(long[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitLong(long[]) loop_optimization (after)
+ //
+ // TODO: Not supported yet.
+ private static void doitLong(long[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitFloat(float[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitFloat(float[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ private static void doitFloat(float[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitDouble(double[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitDouble(double[]) loop_optimization (after)
+ //
+ // TODO: Not supported yet.
+ private static void doitDouble(double[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Set up minint32, maxint32 and some others.
+ int[] xi = new int[8];
+ xi[0] = 0x80000000;
+ xi[1] = 0x7fffffff;
+ xi[2] = 0x80000001;
+ xi[3] = -13;
+ xi[4] = -1;
+ xi[5] = 0;
+ xi[6] = 1;
+ xi[7] = 999;
+ doitInt(xi);
+ expectEquals32(0x80000000, xi[0]);
+ expectEquals32(0x7fffffff, xi[1]);
+ expectEquals32(0x7fffffff, xi[2]);
+ expectEquals32(13, xi[3]);
+ expectEquals32(1, xi[4]);
+ expectEquals32(0, xi[5]);
+ expectEquals32(1, xi[6]);
+ expectEquals32(999, xi[7]);
+
+ // Set up minint64, maxint64 and some others.
+ long[] xl = new long[8];
+ xl[0] = 0x8000000000000000L;
+ xl[1] = 0x7fffffffffffffffL;
+ xl[2] = 0x8000000000000001L;
+ xl[3] = -13;
+ xl[4] = -1;
+ xl[5] = 0;
+ xl[6] = 1;
+ xl[7] = 999;
+ doitLong(xl);
+ expectEquals64(0x8000000000000000L, xl[0]);
+ expectEquals64(0x7fffffffffffffffL, xl[1]);
+ expectEquals64(0x7fffffffffffffffL, xl[2]);
+ expectEquals64(13, xl[3]);
+ expectEquals64(1, xl[4]);
+ expectEquals64(0, xl[5]);
+ expectEquals64(1, xl[6]);
+ expectEquals64(999, xl[7]);
+
+ // Set up float NaN and some others.
+ float[] xf = new float[16];
+ xf[0] = Float.intBitsToFloat(0x7f800001);
+ xf[1] = Float.intBitsToFloat(0x7fa00000);
+ xf[2] = Float.intBitsToFloat(0x7fc00000);
+ xf[3] = Float.intBitsToFloat(0x7fffffff);
+ xf[4] = Float.intBitsToFloat(0xff800001);
+ xf[5] = Float.intBitsToFloat(0xffa00000);
+ xf[6] = Float.intBitsToFloat(0xffc00000);
+ xf[7] = Float.intBitsToFloat(0xffffffff);
+ xf[8] = Float.NEGATIVE_INFINITY;
+ xf[9] = -99.2f;
+ xf[10] = -1.0f;
+ xf[11] = -0.0f;
+ xf[12] = +0.0f;
+ xf[13] = +1.0f;
+ xf[14] = +99.2f;
+ xf[15] = Float.POSITIVE_INFINITY;
+ doitFloat(xf);
+ expectEqualsNaN32(0x7f800001, Float.floatToRawIntBits(xf[0]));
+ expectEqualsNaN32(0x7fa00000, Float.floatToRawIntBits(xf[1]));
+ expectEqualsNaN32(0x7fc00000, Float.floatToRawIntBits(xf[2]));
+ expectEqualsNaN32(0x7fffffff, Float.floatToRawIntBits(xf[3]));
+ expectEqualsNaN32(0x7f800001, Float.floatToRawIntBits(xf[4]));
+ expectEqualsNaN32(0x7fa00000, Float.floatToRawIntBits(xf[5]));
+ expectEqualsNaN32(0x7fc00000, Float.floatToRawIntBits(xf[6]));
+ expectEqualsNaN32(0x7fffffff, Float.floatToRawIntBits(xf[7]));
+ expectEquals32(
+ Float.floatToRawIntBits(Float.POSITIVE_INFINITY),
+ Float.floatToRawIntBits(xf[8]));
+ expectEquals32(
+ Float.floatToRawIntBits(99.2f),
+ Float.floatToRawIntBits(xf[9]));
+ expectEquals32(
+ Float.floatToRawIntBits(1.0f),
+ Float.floatToRawIntBits(xf[10]));
+ expectEquals32(0, Float.floatToRawIntBits(xf[11]));
+ expectEquals32(0, Float.floatToRawIntBits(xf[12]));
+ expectEquals32(
+ Float.floatToRawIntBits(1.0f),
+ Float.floatToRawIntBits(xf[13]));
+ expectEquals32(
+ Float.floatToRawIntBits(99.2f),
+ Float.floatToRawIntBits(xf[14]));
+ expectEquals32(
+ Float.floatToRawIntBits(Float.POSITIVE_INFINITY),
+ Float.floatToRawIntBits(xf[15]));
+
+ // Set up double NaN and some others.
+ double[] xd = new double[16];
+ xd[0] = Double.longBitsToDouble(0x7ff0000000000001L);
+ xd[1] = Double.longBitsToDouble(0x7ff4000000000000L);
+ xd[2] = Double.longBitsToDouble(0x7ff8000000000000L);
+ xd[3] = Double.longBitsToDouble(0x7fffffffffffffffL);
+ xd[4] = Double.longBitsToDouble(0xfff0000000000001L);
+ xd[5] = Double.longBitsToDouble(0xfff4000000000000L);
+ xd[6] = Double.longBitsToDouble(0xfff8000000000000L);
+ xd[7] = Double.longBitsToDouble(0xffffffffffffffffL);
+ xd[8] = Double.NEGATIVE_INFINITY;
+ xd[9] = -99.2f;
+ xd[10] = -1.0f;
+ xd[11] = -0.0f;
+ xd[12] = +0.0f;
+ xd[13] = +1.0f;
+ xd[14] = +99.2f;
+ xd[15] = Double.POSITIVE_INFINITY;
+ doitDouble(xd);
+ expectEqualsNaN64(0x7ff0000000000001L, Double.doubleToRawLongBits(xd[0]));
+ expectEqualsNaN64(0x7ff4000000000000L, Double.doubleToRawLongBits(xd[1]));
+ expectEqualsNaN64(0x7ff8000000000000L, Double.doubleToRawLongBits(xd[2]));
+ expectEqualsNaN64(0x7fffffffffffffffL, Double.doubleToRawLongBits(xd[3]));
+ expectEqualsNaN64(0x7ff0000000000001L, Double.doubleToRawLongBits(xd[4]));
+ expectEqualsNaN64(0x7ff4000000000000L, Double.doubleToRawLongBits(xd[5]));
+ expectEqualsNaN64(0x7ff8000000000000L, Double.doubleToRawLongBits(xd[6]));
+ expectEqualsNaN64(0x7fffffffffffffffL, Double.doubleToRawLongBits(xd[7]));
+ expectEquals64(
+ Double.doubleToRawLongBits(Double.POSITIVE_INFINITY),
+ Double.doubleToRawLongBits(xd[8]));
+ expectEquals64(
+ Double.doubleToRawLongBits(99.2f),
+ Double.doubleToRawLongBits(xd[9]));
+ expectEquals64(
+ Double.doubleToRawLongBits(1.0f),
+ Double.doubleToRawLongBits(xd[10]));
+ expectEquals64(0, Double.doubleToRawLongBits(xd[11]));
+ expectEquals64(0, Double.doubleToRawLongBits(xd[12]));
+ expectEquals64(
+ Double.doubleToRawLongBits(1.0f),
+ Double.doubleToRawLongBits(xd[13]));
+ expectEquals64(
+ Double.doubleToRawLongBits(99.2f),
+ Double.doubleToRawLongBits(xd[14]));
+ expectEquals64(
+ Double.doubleToRawLongBits(Double.POSITIVE_INFINITY),
+ Double.doubleToRawLongBits(xd[15]));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals32(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals64(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN32(int expected, int result) {
+ if (expected != result && (expected | SPQUIET) != result) {
+ throw new Error("Expected: 0x" + Integer.toHexString(expected)
+ + ", found: 0x" + Integer.toHexString(result));
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN64(long expected, long result) {
+ if (expected != result && (expected | DPQUIET) != result) {
+ throw new Error("Expected: 0x" + Long.toHexString(expected)
+ + ", found: 0x" + Long.toHexString(result));
+ }
+ }
+}
diff --git a/test/900-hello-plugin/expected.txt b/test/900-hello-plugin/expected.txt
index 43db31c..c160f65 100644
--- a/test/900-hello-plugin/expected.txt
+++ b/test/900-hello-plugin/expected.txt
@@ -3,6 +3,8 @@
GetEnvHandler called in test 900
GetEnvHandler called with version 0x900fffff
GetEnv returned '900' environment!
+Agent_OnLoad called with options "test_900_round_2"
Hello, world!
Agent_OnUnload called
+Agent_OnUnload called
ArtPlugin_Deinitialize called in test 900
diff --git a/test/900-hello-plugin/load_unload.cc b/test/900-hello-plugin/load_unload.cc
index a38cc3d..290997a 100644
--- a/test/900-hello-plugin/load_unload.cc
+++ b/test/900-hello-plugin/load_unload.cc
@@ -52,6 +52,9 @@
char* options,
void* reserved ATTRIBUTE_UNUSED) {
printf("Agent_OnLoad called with options \"%s\"\n", options);
+ if (strcmp("test_900_round_2", options) == 0) {
+ return 0;
+ }
uintptr_t env = 0;
jint res = vm->GetEnv(reinterpret_cast<void**>(&env), TEST_900_ENV_VERSION_NUMBER);
if (res != JNI_OK) {
diff --git a/test/900-hello-plugin/run b/test/900-hello-plugin/run
index 50835f8..c633f6d 100755
--- a/test/900-hello-plugin/run
+++ b/test/900-hello-plugin/run
@@ -19,4 +19,5 @@
plugin=libartagent.so
fi
./default-run "$@" --runtime-option -agentpath:${plugin}=test_900 \
+ --runtime-option -agentpath:${plugin}=test_900_round_2 \
--android-runtime-option -Xplugin:${plugin}
diff --git a/test/901-hello-ti-agent/basics.cc b/test/901-hello-ti-agent/basics.cc
index 9166277..8695e0c 100644
--- a/test/901-hello-ti-agent/basics.cc
+++ b/test/901-hello-ti-agent/basics.cc
@@ -16,14 +16,17 @@
#include "901-hello-ti-agent/basics.h"
+#include <thread>
+
#include <jni.h>
#include <stdio.h>
#include <string.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
#include "jvmti.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test901HelloTi {
@@ -142,22 +145,36 @@
return JNI_OK;
}
-extern "C" JNIEXPORT void JNICALL Java_Main_setVerboseFlag(
+extern "C" JNIEXPORT void JNICALL Java_art_Test901_setVerboseFlag(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jint iflag, jboolean val) {
jvmtiVerboseFlag flag = static_cast<jvmtiVerboseFlag>(iflag);
jvmtiError result = jvmti_env->SetVerboseFlag(flag, val);
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkLivePhase(
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test901_checkLivePhase(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jvmtiPhase current_phase;
jvmtiError phase_result = jvmti_env->GetPhase(¤t_phase);
- if (JvmtiErrorToException(env, phase_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, phase_result)) {
return JNI_FALSE;
}
return (current_phase == JVMTI_PHASE_LIVE) ? JNI_TRUE : JNI_FALSE;
}
+static void CallJvmtiFunction(jvmtiEnv* env, jclass klass, jvmtiError* err) {
+ jint n;
+ jmethodID* methods = nullptr;
+ *err = env->GetClassMethods(klass, &n, &methods);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test901_checkUnattached(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass Main_klass) {
+ jvmtiError res = JVMTI_ERROR_NONE;
+ std::thread t1(CallJvmtiFunction, jvmti_env, Main_klass, &res);
+ t1.join();
+ return res == JVMTI_ERROR_UNATTACHED_THREAD;
+}
+
} // namespace Test901HelloTi
} // namespace art
diff --git a/test/901-hello-ti-agent/expected.txt b/test/901-hello-ti-agent/expected.txt
index c4b24cb..eb5b6a2 100644
--- a/test/901-hello-ti-agent/expected.txt
+++ b/test/901-hello-ti-agent/expected.txt
@@ -3,6 +3,7 @@
VMInit
Hello, world!
Agent in live phase.
+Received expected error for unattached JVMTI calls
0
1
2
diff --git a/test/901-hello-ti-agent/src/Main.java b/test/901-hello-ti-agent/src/Main.java
index 4d62ed3..7441b10 100644
--- a/test/901-hello-ti-agent/src/Main.java
+++ b/test/901-hello-ti-agent/src/Main.java
@@ -16,29 +16,6 @@
public class Main {
public static void main(String[] args) {
- System.out.println("Hello, world!");
-
- if (checkLivePhase()) {
- System.out.println("Agent in live phase.");
- }
-
- set(0); // OTHER
- set(1); // GC
- set(2); // CLASS
- set(4); // JNI
- set(8); // Error.
+ art.Test901.run();
}
-
- private static void set(int i) {
- System.out.println(i);
- try {
- setVerboseFlag(i, true);
- setVerboseFlag(i, false);
- } catch (RuntimeException e) {
- System.out.println(e.getMessage());
- }
- }
-
- private static native boolean checkLivePhase();
- private static native void setVerboseFlag(int flag, boolean value);
}
diff --git a/test/901-hello-ti-agent/src/art/Main.java b/test/901-hello-ti-agent/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/901-hello-ti-agent/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/901-hello-ti-agent/src/art/Test901.java b/test/901-hello-ti-agent/src/art/Test901.java
new file mode 100644
index 0000000..26f7399
--- /dev/null
+++ b/test/901-hello-ti-agent/src/art/Test901.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Test901 {
+ public static void run() {
+ Main.bindAgentJNIForClass(Test901.class);
+
+ System.out.println("Hello, world!");
+
+ if (checkLivePhase()) {
+ System.out.println("Agent in live phase.");
+ }
+ if (checkUnattached()) {
+ System.out.println("Received expected error for unattached JVMTI calls");
+ }
+
+ set(0); // OTHER
+ set(1); // GC
+ set(2); // CLASS
+ set(4); // JNI
+ set(8); // Error.
+ }
+
+ private static void set(int i) {
+ System.out.println(i);
+ try {
+ setVerboseFlag(i, true);
+ setVerboseFlag(i, false);
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ private static native boolean checkLivePhase();
+ private static native void setVerboseFlag(int flag, boolean value);
+ private static native boolean checkUnattached();
+}
diff --git a/test/902-hello-transformation/src/Main.java b/test/902-hello-transformation/src/Main.java
index 471c82b..ed8a500 100644
--- a/test/902-hello-transformation/src/Main.java
+++ b/test/902-hello-transformation/src/Main.java
@@ -49,6 +49,7 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/902-hello-transformation/src/art/Main.java b/test/902-hello-transformation/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/902-hello-transformation/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/903-hello-tagging/src/Main.java b/test/903-hello-tagging/src/Main.java
index 48896b2..b98d081 100644
--- a/test/903-hello-tagging/src/Main.java
+++ b/test/903-hello-tagging/src/Main.java
@@ -14,164 +14,8 @@
* limitations under the License.
*/
-import java.lang.ref.WeakReference;
-import java.util.ArrayList;
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) {
- doTest();
- testGetTaggedObjects();
- testTags();
+ art.Test903.run();
}
-
- public static void doTest() {
- WeakReference<Object> weak = test();
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- if (weak.get() != null) {
- throw new RuntimeException("WeakReference not cleared");
- }
- }
-
- public static void testTags() {
- Object o = new Object();
- long[] res = testTagsInDifferentEnvs(o, 100, 10);
- System.out.println(Arrays.toString(res));
- }
-
- private static WeakReference<Object> test() {
- Object o1 = new Object();
- setTag(o1, 1);
-
- Object o2 = new Object();
- setTag(o2, 2);
-
- checkTag(o1, 1);
- checkTag(o2, 2);
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- checkTag(o1, 1);
- checkTag(o2, 2);
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- setTag(o1, 10);
- setTag(o2, 20);
-
- checkTag(o1, 10);
- checkTag(o2, 20);
-
- return new WeakReference<Object>(o1);
- }
-
- private static void checkTag(Object o, long expectedTag) {
- long tag = getTag(o);
- if (expectedTag != tag) {
- throw new RuntimeException("Unexpected tag " + tag + ", expected " + expectedTag);
- }
- }
-
- private static void testGetTaggedObjects() {
- // Use an array list to ensure that the objects stay live for a bit. Also gives us a source
- // to compare to. We use index % 10 as the tag.
- ArrayList<Object> l = new ArrayList<>();
-
- for (int i = 0; i < 20; i++) {
- Integer o = new Integer(i);
- l.add(o);
- if (i % 10 != 0) {
- setTag(o, i % 10);
- }
- }
-
- testGetTaggedObjectsRun(l, null, false, false);
- testGetTaggedObjectsRun(l, null, true, true);
- testGetTaggedObjectsRun(l, new long[] { 2, 5 }, true, true);
- testGetTaggedObjectsRun(l, null, false, true);
- testGetTaggedObjectsRun(l, null, true, false);
- }
-
- private static void testGetTaggedObjectsRun(ArrayList<Object> l, long[] searchTags,
- boolean returnObjects, boolean returnTags) {
- Object[] result = getTaggedObjects(searchTags, returnObjects, returnTags);
-
- Object[] objects = (Object[])result[0];
- long[] tags = (long[])result[1];
- int count = (int)result[2];
-
- System.out.println(count);
- printArraysSorted(objects, tags);
- }
-
- private static void printArraysSorted(Object[] objects, long[] tags) {
- if (objects == null && tags == null) {
- System.out.println("<nothing>");
- return;
- }
-
- int l1 = objects == null ? 0 : objects.length;
- int l2 = tags == null ? 0 : tags.length;
- int l = Math.max(l1, l2);
- Pair[] tmp = new Pair[l];
- for (int i = 0; i < l; i++) {
- tmp[i] = new Pair(objects == null ? null : objects[i], tags == null ? 0 : tags[i]);
- }
-
- Arrays.sort(tmp);
-
- System.out.println(Arrays.toString(tmp));
- }
-
- private static class Pair implements Comparable<Pair> {
- Object obj;
- long tag;
- public Pair(Object o, long t) {
- obj = o;
- tag = t;
- }
-
- public int compareTo(Pair p) {
- if (tag != p.tag) {
- return Long.compare(tag, p.tag);
- }
-
- if ((obj instanceof Comparable) && (p.obj instanceof Comparable)) {
- // It's not really correct, but w/e, best effort.
- int result = ((Comparable<Object>)obj).compareTo(p.obj);
- if (result != 0) {
- return result;
- }
- }
-
- if (obj != null && p.obj != null) {
- return obj.hashCode() - p.obj.hashCode();
- }
-
- if (obj != null) {
- return 1;
- }
-
- if (p.obj != null) {
- return -1;
- }
-
- return hashCode() - p.hashCode();
- }
-
- public String toString() {
- return "<" + obj + ";" + tag + ">";
- }
- }
-
- private static native void setTag(Object o, long tag);
- private static native long getTag(Object o);
- private static native Object[] getTaggedObjects(long[] searchTags, boolean returnObjects,
- boolean returnTags);
- private static native long[] testTagsInDifferentEnvs(Object o, long baseTag, int n);
}
diff --git a/test/903-hello-tagging/src/art/Main.java b/test/903-hello-tagging/src/art/Main.java
new file mode 100644
index 0000000..aa5498b
--- /dev/null
+++ b/test/903-hello-tagging/src/art/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+
+ // Common infrastructure.
+ public static native void setTag(Object o, long tag);
+ public static native long getTag(Object o);
+}
diff --git a/test/903-hello-tagging/src/art/Test903.java b/test/903-hello-tagging/src/art/Test903.java
new file mode 100644
index 0000000..1691a94
--- /dev/null
+++ b/test/903-hello-tagging/src/art/Test903.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class Test903 {
+ public static void run() {
+ Main.bindAgentJNIForClass(Test903.class);
+
+ doTest();
+ testGetTaggedObjects();
+ testTags();
+ }
+
+ public static void doTest() {
+ WeakReference<Object> weak = test();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ if (weak.get() != null) {
+ throw new RuntimeException("WeakReference not cleared");
+ }
+ }
+
+ public static void testTags() {
+ Object o = new Object();
+ long[] res = testTagsInDifferentEnvs(o, 100, 10);
+ System.out.println(Arrays.toString(res));
+ }
+
+ private static WeakReference<Object> test() {
+ Object o1 = new Object();
+ Main.setTag(o1, 1);
+
+ Object o2 = new Object();
+ Main.setTag(o2, 2);
+
+ checkTag(o1, 1);
+ checkTag(o2, 2);
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ checkTag(o1, 1);
+ checkTag(o2, 2);
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ Main.setTag(o1, 10);
+ Main.setTag(o2, 20);
+
+ checkTag(o1, 10);
+ checkTag(o2, 20);
+
+ return new WeakReference<Object>(o1);
+ }
+
+ private static void checkTag(Object o, long expectedTag) {
+ long tag = Main.getTag(o);
+ if (expectedTag != tag) {
+ throw new RuntimeException("Unexpected tag " + tag + ", expected " + expectedTag);
+ }
+ }
+
+ private static void testGetTaggedObjects() {
+ // Use an array list to ensure that the objects stay live for a bit. Also gives us a source
+ // to compare to. We use index % 10 as the tag.
+ ArrayList<Object> l = new ArrayList<>();
+
+ for (int i = 0; i < 20; i++) {
+ Integer o = new Integer(i);
+ l.add(o);
+ if (i % 10 != 0) {
+ Main.setTag(o, i % 10);
+ }
+ }
+
+ testGetTaggedObjectsRun(l, null, false, false);
+ testGetTaggedObjectsRun(l, null, true, true);
+ testGetTaggedObjectsRun(l, new long[] { 2, 5 }, true, true);
+ testGetTaggedObjectsRun(l, null, false, true);
+ testGetTaggedObjectsRun(l, null, true, false);
+ }
+
+ private static void testGetTaggedObjectsRun(ArrayList<Object> l, long[] searchTags,
+ boolean returnObjects, boolean returnTags) {
+ Object[] result = getTaggedObjects(searchTags, returnObjects, returnTags);
+
+ Object[] objects = (Object[])result[0];
+ long[] tags = (long[])result[1];
+ int count = (int)result[2];
+
+ System.out.println(count);
+ printArraysSorted(objects, tags);
+ }
+
+ private static void printArraysSorted(Object[] objects, long[] tags) {
+ if (objects == null && tags == null) {
+ System.out.println("<nothing>");
+ return;
+ }
+
+ int l1 = objects == null ? 0 : objects.length;
+ int l2 = tags == null ? 0 : tags.length;
+ int l = Math.max(l1, l2);
+ Pair[] tmp = new Pair[l];
+ for (int i = 0; i < l; i++) {
+ tmp[i] = new Pair(objects == null ? null : objects[i], tags == null ? 0 : tags[i]);
+ }
+
+ Arrays.sort(tmp);
+
+ System.out.println(Arrays.toString(tmp));
+ }
+
+ private static class Pair implements Comparable<Pair> {
+ Object obj;
+ long tag;
+ public Pair(Object o, long t) {
+ obj = o;
+ tag = t;
+ }
+
+ public int compareTo(Pair p) {
+ if (tag != p.tag) {
+ return Long.compare(tag, p.tag);
+ }
+
+ if ((obj instanceof Comparable) && (p.obj instanceof Comparable)) {
+ // It's not really correct, but w/e, best effort.
+ int result = ((Comparable<Object>)obj).compareTo(p.obj);
+ if (result != 0) {
+ return result;
+ }
+ }
+
+ if (obj != null && p.obj != null) {
+ return obj.hashCode() - p.obj.hashCode();
+ }
+
+ if (obj != null) {
+ return 1;
+ }
+
+ if (p.obj != null) {
+ return -1;
+ }
+
+ return hashCode() - p.hashCode();
+ }
+
+ public String toString() {
+ return "<" + obj + ";" + tag + ">";
+ }
+ }
+
+ private static native Object[] getTaggedObjects(long[] searchTags, boolean returnObjects,
+ boolean returnTags);
+ private static native long[] testTagsInDifferentEnvs(Object o, long baseTag, int n);
+}
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index b85ed48..a2694e7 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -19,52 +19,22 @@
#include <stdio.h>
#include <vector>
+#include "android-base/logging.h"
#include "jni.h"
-#include "ScopedLocalRef.h"
-#include "ScopedPrimitiveArray.h"
+#include "scoped_local_ref.h"
+#include "scoped_primitive_array.h"
-#include "art_method-inl.h"
-#include "base/logging.h"
#include "jvmti.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
-#include "utils.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test903HelloTagging {
-extern "C" JNIEXPORT void JNICALL Java_Main_setTag(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass,
- jobject obj,
- jlong tag) {
- jvmtiError ret = jvmti_env->SetTag(obj, tag);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error setting tag: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
-}
-
-extern "C" JNIEXPORT jlong JNICALL Java_Main_getTag(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass,
- jobject obj) {
- jlong tag = 0;
- jvmtiError ret = jvmti_env->GetTag(obj, &tag);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error getting tag: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
- return tag;
-}
-
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTaggedObjects(JNIEnv* env,
- jclass,
- jlongArray searchTags,
- jboolean returnObjects,
- jboolean returnTags) {
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test903_getTaggedObjects(
+ JNIEnv* env, jclass, jlongArray searchTags, jboolean returnObjects, jboolean returnTags) {
ScopedLongArrayRO scoped_array(env);
if (searchTags != nullptr) {
scoped_array.reset(searchTags);
@@ -86,11 +56,7 @@
&result_count,
result_object_array_ptr,
result_tag_array_ptr);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Failure running GetLoadedClasses: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
@@ -167,7 +133,7 @@
return tag;
}
-extern "C" JNIEXPORT jlongArray JNICALL Java_Main_testTagsInDifferentEnvs(
+extern "C" JNIEXPORT jlongArray JNICALL Java_art_Test903_testTagsInDifferentEnvs(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jlong base_tag, jint count) {
std::unique_ptr<jvmtiEnv*[]> envs = std::unique_ptr<jvmtiEnv*[]>(new jvmtiEnv*[count]);
envs[0] = jvmti_env;
@@ -197,4 +163,3 @@
} // namespace Test903HelloTagging
} // namespace art
-
diff --git a/test/904-object-allocation/src/Main.java b/test/904-object-allocation/src/Main.java
index df59179..0affe7c 100644
--- a/test/904-object-allocation/src/Main.java
+++ b/test/904-object-allocation/src/Main.java
@@ -14,128 +14,8 @@
* limitations under the License.
*/
-import java.util.ArrayList;
-
public class Main {
public static void main(String[] args) throws Exception {
- // Use a list to ensure objects must be allocated.
- ArrayList<Object> l = new ArrayList<>(100);
-
- prefetchClassNames();
-
- doTest(l);
+ art.Test904.run();
}
-
- // Pre-resolve class names so the strings don't have to be allocated as a side effect of
- // callback printing.
- private static void prefetchClassNames() {
- Object.class.getName();
- Integer.class.getName();
- Float.class.getName();
- Short.class.getName();
- Byte.class.getName();
- Double.class.getName();
- }
-
- public static void doTest(ArrayList<Object> l) throws Exception {
- // Disable the global registration from OnLoad, to get into a known state.
- enableAllocationTracking(null, false);
-
- // Enable actual logging callback.
- setupObjectAllocCallback(true);
-
- enableAllocationTracking(null, true);
-
- l.add(new Object());
- l.add(new Integer(1));
-
- enableAllocationTracking(null, false);
-
- l.add(new Float(1.0f));
-
- enableAllocationTracking(Thread.currentThread(), true);
-
- l.add(new Short((short)0));
-
- enableAllocationTracking(Thread.currentThread(), false);
-
- l.add(new Byte((byte)0));
-
- System.out.println("Tracking on same thread");
-
- testThread(l, true, true);
-
- l.add(new Byte((byte)0));
-
- System.out.println("Tracking on same thread, not disabling tracking");
-
- testThread(l, true, false);
-
- System.out.println("Tracking on different thread");
-
- testThread(l, false, true);
-
- l.add(new Byte((byte)0));
-
- // Disable actual logging callback and re-enable tracking, so we can keep the event enabled and
- // check that shutdown works correctly.
- setupObjectAllocCallback(false);
- enableAllocationTracking(null, true);
- }
-
- private static void testThread(final ArrayList<Object> l, final boolean sameThread,
- final boolean disableTracking) throws Exception {
- final SimpleBarrier startBarrier = new SimpleBarrier(1);
- final SimpleBarrier trackBarrier = new SimpleBarrier(1);
- final SimpleBarrier disableBarrier = new SimpleBarrier(1);
-
- final Thread thisThread = Thread.currentThread();
-
- Thread t = new Thread() {
- public void run() {
- try {
- startBarrier.dec();
- trackBarrier.waitFor();
- } catch (Exception e) {
- e.printStackTrace(System.out);
- System.exit(1);
- }
-
- l.add(new Double(0.0));
-
- if (disableTracking) {
- enableAllocationTracking(sameThread ? this : thisThread, false);
- }
- }
- };
-
- t.start();
- startBarrier.waitFor();
- enableAllocationTracking(sameThread ? t : Thread.currentThread(), true);
- trackBarrier.dec();
-
- t.join();
- }
-
- private static class SimpleBarrier {
- int count;
-
- public SimpleBarrier(int i) {
- count = i;
- }
-
- public synchronized void dec() throws Exception {
- count--;
- notifyAll();
- }
-
- public synchronized void waitFor() throws Exception {
- while (count != 0) {
- wait();
- }
- }
- }
-
- private static native void setupObjectAllocCallback(boolean enable);
- private static native void enableAllocationTracking(Thread thread, boolean enable);
}
diff --git a/test/904-object-allocation/src/art/Main.java b/test/904-object-allocation/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/904-object-allocation/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/904-object-allocation/src/art/Test904.java b/test/904-object-allocation/src/art/Test904.java
new file mode 100644
index 0000000..31e0c8c
--- /dev/null
+++ b/test/904-object-allocation/src/art/Test904.java
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+
+public class Test904 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test904.class);
+
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ prefetchClassNames();
+
+ doTest(l);
+ }
+
+ // Pre-resolve class names so the strings don't have to be allocated as a side effect of
+ // callback printing.
+ private static void prefetchClassNames() {
+ Object.class.getName();
+ Integer.class.getName();
+ Float.class.getName();
+ Short.class.getName();
+ Byte.class.getName();
+ Double.class.getName();
+ }
+
+ public static void doTest(ArrayList<Object> l) throws Exception {
+ // Disable the global registration from OnLoad, to get into a known state.
+ enableAllocationTracking(null, false);
+
+ // Enable actual logging callback.
+ setupObjectAllocCallback(true);
+
+ enableAllocationTracking(null, true);
+
+ l.add(new Object());
+ l.add(new Integer(1));
+
+ enableAllocationTracking(null, false);
+
+ l.add(new Float(1.0f));
+
+ enableAllocationTracking(Thread.currentThread(), true);
+
+ l.add(new Short((short)0));
+
+ enableAllocationTracking(Thread.currentThread(), false);
+
+ l.add(new Byte((byte)0));
+
+ System.out.println("Tracking on same thread");
+
+ testThread(l, true, true);
+
+ l.add(new Byte((byte)0));
+
+ System.out.println("Tracking on same thread, not disabling tracking");
+
+ testThread(l, true, false);
+
+ System.out.println("Tracking on different thread");
+
+ testThread(l, false, true);
+
+ l.add(new Byte((byte)0));
+
+ // Disable actual logging callback and re-enable tracking, so we can keep the event enabled and
+ // check that shutdown works correctly.
+ setupObjectAllocCallback(false);
+ enableAllocationTracking(null, true);
+ }
+
+ private static void testThread(final ArrayList<Object> l, final boolean sameThread,
+ final boolean disableTracking) throws Exception {
+ final SimpleBarrier startBarrier = new SimpleBarrier(1);
+ final SimpleBarrier trackBarrier = new SimpleBarrier(1);
+ final SimpleBarrier disableBarrier = new SimpleBarrier(1);
+
+ final Thread thisThread = Thread.currentThread();
+
+ Thread t = new Thread() {
+ public void run() {
+ try {
+ startBarrier.dec();
+ trackBarrier.waitFor();
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
+ System.exit(1);
+ }
+
+ l.add(new Double(0.0));
+
+ if (disableTracking) {
+ enableAllocationTracking(sameThread ? this : thisThread, false);
+ }
+ }
+ };
+
+ t.start();
+ startBarrier.waitFor();
+ enableAllocationTracking(sameThread ? t : Thread.currentThread(), true);
+ trackBarrier.dec();
+
+ t.join();
+ }
+
+ private static class SimpleBarrier {
+ int count;
+
+ public SimpleBarrier(int i) {
+ count = i;
+ }
+
+ public synchronized void dec() throws Exception {
+ count--;
+ notifyAll();
+ }
+
+ public synchronized void waitFor() throws Exception {
+ while (count != 0) {
+ wait();
+ }
+ }
+ }
+
+ private static native void setupObjectAllocCallback(boolean enable);
+ private static native void enableAllocationTracking(Thread thread, boolean enable);
+}
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index cc6f681..8de350b 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -19,14 +19,15 @@
#include <stdio.h>
#include <vector>
-#include "base/logging.h"
+#include "android-base/logging.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
-#include "utils.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test904ObjectAllocation {
@@ -56,37 +57,24 @@
static_cast<size_t>(size));
}
-extern "C" JNIEXPORT void JNICALL Java_Main_setupObjectAllocCallback(
- JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jboolean enable) {
+extern "C" JNIEXPORT void JNICALL Java_art_Test904_setupObjectAllocCallback(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jboolean enable) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
callbacks.VMObjectAlloc = enable ? ObjectAllocated : nullptr;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error setting callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
+ JvmtiErrorToException(env, jvmti_env, ret);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_enableAllocationTracking(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass,
- jthread thread,
- jboolean enable) {
+extern "C" JNIEXPORT void JNICALL Java_art_Test904_enableAllocationTracking(
+ JNIEnv* env, jclass, jthread thread, jboolean enable) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_VM_OBJECT_ALLOC,
thread);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error enabling/disabling allocation tracking: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
+ JvmtiErrorToException(env, jvmti_env, ret);
}
} // namespace Test904ObjectAllocation
} // namespace art
-
diff --git a/test/905-object-free/expected.txt b/test/905-object-free/expected.txt
index 436ca11..c226df7 100644
--- a/test/905-object-free/expected.txt
+++ b/test/905-object-free/expected.txt
@@ -10,3 +10,4 @@
---
[]
---
+Free counts 100000 100000
diff --git a/test/905-object-free/src/Main.java b/test/905-object-free/src/Main.java
index e41e378..66bb08e 100644
--- a/test/905-object-free/src/Main.java
+++ b/test/905-object-free/src/Main.java
@@ -14,68 +14,8 @@
* limitations under the License.
*/
-import java.util.ArrayList;
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test905.run();
}
-
- public static void doTest() throws Exception {
- // Use a list to ensure objects must be allocated.
- ArrayList<Object> l = new ArrayList<>(100);
-
- setupObjectFreeCallback();
-
- enableFreeTracking(true);
- run(l);
-
- enableFreeTracking(false);
- run(l);
- }
-
- private static void run(ArrayList<Object> l) {
- allocate(l, 1);
- l.clear();
-
- Runtime.getRuntime().gc();
-
- getAndPrintTags();
- System.out.println("---");
-
- // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
- // the tag table should give us a stable output order.
- for (int i = 10; i <= 1000; i *= 10) {
- allocate(l, i);
- }
- l.clear();
-
- Runtime.getRuntime().gc();
-
- getAndPrintTags();
- System.out.println("---");
-
- Runtime.getRuntime().gc();
-
- getAndPrintTags();
- System.out.println("---");
- }
-
- private static void allocate(ArrayList<Object> l, long tag) {
- Object obj = new Object();
- l.add(obj);
- setTag(obj, tag);
- }
-
- private static void getAndPrintTags() {
- long[] freedTags = getCollectedTags();
- Arrays.sort(freedTags);
- System.out.println(Arrays.toString(freedTags));
- }
-
- private static native void setupObjectFreeCallback();
- private static native void enableFreeTracking(boolean enable);
- private static native void setTag(Object o, long tag);
- private static native long[] getCollectedTags();
}
diff --git a/test/905-object-free/src/art/Main.java b/test/905-object-free/src/art/Main.java
new file mode 100644
index 0000000..aa5498b
--- /dev/null
+++ b/test/905-object-free/src/art/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+
+ // Common infrastructure.
+ public static native void setTag(Object o, long tag);
+ public static native long getTag(Object o);
+}
diff --git a/test/905-object-free/src/art/Test905.java b/test/905-object-free/src/art/Test905.java
new file mode 100644
index 0000000..1ed7a0e
--- /dev/null
+++ b/test/905-object-free/src/art/Test905.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class Test905 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test905.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ setupObjectFreeCallback();
+
+ enableFreeTracking(true);
+ run(l);
+
+ enableFreeTracking(false);
+ run(l);
+
+ enableFreeTracking(true);
+ stress();
+ }
+
+ private static void run(ArrayList<Object> l) {
+ allocate(l, 1);
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ getAndPrintTags();
+ System.out.println("---");
+
+ // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
+ // the tag table should give us a stable output order.
+ for (int i = 10; i <= 1000; i *= 10) {
+ allocate(l, i);
+ }
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ getAndPrintTags();
+ System.out.println("---");
+
+ Runtime.getRuntime().gc();
+
+ getAndPrintTags();
+ System.out.println("---");
+ }
+
+ private static void stressAllocate(int i) {
+ Object obj = new Object();
+ Main.setTag(obj, i);
+ setTag2(obj, i + 1);
+ }
+
+ private static void stress() {
+ getCollectedTags(0);
+ getCollectedTags(1);
+ // Allocate objects.
+ for (int i = 1; i <= 100000; ++i) {
+ stressAllocate(i);
+ }
+ Runtime.getRuntime().gc();
+ long[] freedTags1 = getCollectedTags(0);
+ long[] freedTags2 = getCollectedTags(1);
+ System.out.println("Free counts " + freedTags1.length + " " + freedTags2.length);
+ for (int i = 0; i < freedTags1.length; ++i) {
+ if (freedTags1[i] + 1 != freedTags2[i]) {
+ System.out.println("Mismatched tags " + freedTags1[i] + " " + freedTags2[i]);
+ }
+ }
+ }
+
+ private static void allocate(ArrayList<Object> l, long tag) {
+ Object obj = new Object();
+ l.add(obj);
+ Main.setTag(obj, tag);
+ }
+
+ private static void getAndPrintTags() {
+ long[] freedTags = getCollectedTags(0);
+ Arrays.sort(freedTags);
+ System.out.println(Arrays.toString(freedTags));
+ }
+
+ private static native void setupObjectFreeCallback();
+ private static native void enableFreeTracking(boolean enable);
+ private static native long[] getCollectedTags(int index);
+ private static native void setTag2(Object o, long tag);
+}
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
index 5eed472..998194a 100644
--- a/test/905-object-free/tracking_free.cc
+++ b/test/905-object-free/tracking_free.cc
@@ -19,66 +19,87 @@
#include <stdio.h>
#include <vector>
-#include "base/logging.h"
+#include "android-base/logging.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
-#include "utils.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test905ObjectFree {
-static std::vector<jlong> collected_tags;
+static std::vector<jlong> collected_tags1;
+static std::vector<jlong> collected_tags2;
-static void JNICALL ObjectFree(jvmtiEnv* ti_env ATTRIBUTE_UNUSED, jlong tag) {
- collected_tags.push_back(tag);
+jvmtiEnv* jvmti_env2;
+
+static void JNICALL ObjectFree1(jvmtiEnv* ti_env, jlong tag) {
+ CHECK_EQ(ti_env, jvmti_env);
+ collected_tags1.push_back(tag);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_setupObjectFreeCallback(
- JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+static void JNICALL ObjectFree2(jvmtiEnv* ti_env, jlong tag) {
+ CHECK_EQ(ti_env, jvmti_env2);
+ collected_tags2.push_back(tag);
+}
+
+static void setupObjectFreeCallback(JNIEnv* env, jvmtiEnv* jenv, jvmtiEventObjectFree callback) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
- callbacks.ObjectFree = ObjectFree;
-
- jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error setting callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
+ callbacks.ObjectFree = callback;
+ jvmtiError ret = jenv->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ JvmtiErrorToException(env, jenv, ret);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_enableFreeTracking(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED,
- jboolean enable) {
+extern "C" JNIEXPORT void JNICALL Java_art_Test905_setupObjectFreeCallback(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ setupObjectFreeCallback(env, jvmti_env, ObjectFree1);
+ JavaVM* jvm = nullptr;
+ env->GetJavaVM(&jvm);
+ CHECK_EQ(jvm->GetEnv(reinterpret_cast<void**>(&jvmti_env2), JVMTI_VERSION_1_2), 0);
+ SetAllCapabilities(jvmti_env2);
+ setupObjectFreeCallback(env, jvmti_env2, ObjectFree2);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test905_enableFreeTracking(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jboolean enable) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_OBJECT_FREE,
nullptr);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error enabling/disabling object-free callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
+ return;
}
+ ret = jvmti_env2->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_OBJECT_FREE,
+ nullptr);
+ JvmtiErrorToException(env, jvmti_env, ret);
}
-extern "C" JNIEXPORT jlongArray JNICALL Java_Main_getCollectedTags(JNIEnv* env,
- jclass klass ATTRIBUTE_UNUSED) {
- jlongArray ret = env->NewLongArray(collected_tags.size());
+extern "C" JNIEXPORT jlongArray JNICALL Java_art_Test905_getCollectedTags(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint index) {
+ std::vector<jlong>& tags = (index == 0) ? collected_tags1 : collected_tags2;
+ jlongArray ret = env->NewLongArray(tags.size());
if (ret == nullptr) {
return ret;
}
- env->SetLongArrayRegion(ret, 0, collected_tags.size(), collected_tags.data());
- collected_tags.clear();
+ env->SetLongArrayRegion(ret, 0, tags.size(), tags.data());
+ tags.clear();
return ret;
}
+extern "C" JNIEXPORT void JNICALL Java_art_Test905_setTag2(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jlong tag) {
+ jvmtiError ret = jvmti_env2->SetTag(obj, tag);
+ JvmtiErrorToException(env, jvmti_env, ret);
+}
+
} // namespace Test905ObjectFree
} // namespace art
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index f2532de..6534b4c 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -23,14 +23,18 @@
#include <stdio.h>
#include <vector>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "base/logging.h"
+
#include "jni.h"
#include "jvmti.h"
-#include "ScopedPrimitiveArray.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
-#include "utf.h"
+#include "scoped_primitive_array.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+#include "ti_utf.h"
namespace art {
namespace Test906IterateHeap {
@@ -52,7 +56,7 @@
return config->Handle(class_tag, size, tag_ptr, length);
}
-static bool Run(jint heap_filter, jclass klass_filter, IterationConfig* config) {
+static bool Run(JNIEnv* env, jint heap_filter, jclass klass_filter, IterationConfig* config) {
jvmtiHeapCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
callbacks.heap_iteration_callback = HeapIterationCallback;
@@ -61,21 +65,18 @@
klass_filter,
&callbacks,
config);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Failure running IterateThroughHeap: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return false;
}
return true;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_iterateThroughHeapCount(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED,
- jint heap_filter,
- jclass klass_filter,
- jint stop_after) {
+extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateThroughHeapCount(
+ JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass_filter,
+ jint stop_after) {
class CountIterationConfig : public IterationConfig {
public:
CountIterationConfig(jint _counter, jint _stop_after)
@@ -99,7 +100,7 @@
};
CountIterationConfig config(0, stop_after);
- Run(heap_filter, klass_filter, &config);
+ Run(env, heap_filter, klass_filter, &config);
if (config.counter > config.stop_after) {
printf("Error: more objects visited than signaled.");
@@ -108,15 +109,15 @@
return config.counter;
}
-
-extern "C" JNIEXPORT jint JNICALL Java_Main_iterateThroughHeapData(JNIEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jint heap_filter,
- jclass klass_filter,
- jlongArray class_tags,
- jlongArray sizes,
- jlongArray tags,
- jintArray lengths) {
+extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateThroughHeapData(
+ JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass_filter,
+ jlongArray class_tags,
+ jlongArray sizes,
+ jlongArray tags,
+ jintArray lengths) {
class DataIterationConfig : public IterationConfig {
public:
jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) OVERRIDE {
@@ -135,7 +136,7 @@
};
DataIterationConfig config;
- if (!Run(heap_filter, klass_filter, &config)) {
+ if (!Run(env, heap_filter, klass_filter, &config)) {
return -1;
}
@@ -154,10 +155,8 @@
return static_cast<jint>(config.class_tags_.size());
}
-extern "C" JNIEXPORT void JNICALL Java_Main_iterateThroughHeapAdd(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED,
- jint heap_filter,
- jclass klass_filter) {
+extern "C" JNIEXPORT void JNICALL Java_art_Test906_iterateThroughHeapAdd(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint heap_filter, jclass klass_filter) {
class AddIterationConfig : public IterationConfig {
public:
AddIterationConfig() {}
@@ -175,10 +174,10 @@
};
AddIterationConfig config;
- Run(heap_filter, klass_filter, &config);
+ Run(env, heap_filter, klass_filter, &config);
}
-extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapString(
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test906_iterateThroughHeapString(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
struct FindStringCallbacks {
explicit FindStringCallbacks(jlong t) : tag_to_find(t) {}
@@ -199,10 +198,10 @@
void* user_data) {
FindStringCallbacks* p = reinterpret_cast<FindStringCallbacks*>(user_data);
if (*tag_ptr == p->tag_to_find) {
- size_t utf_byte_count = CountUtf8Bytes(value, value_length);
+ size_t utf_byte_count = ti::CountUtf8Bytes(value, value_length);
std::unique_ptr<char[]> mod_utf(new char[utf_byte_count + 1]);
memset(mod_utf.get(), 0, utf_byte_count + 1);
- ConvertUtf16ToModifiedUtf8(mod_utf.get(), utf_byte_count, value, value_length);
+ ti::ConvertUtf16ToModifiedUtf8(mod_utf.get(), utf_byte_count, value, value_length);
if (!p->data.empty()) {
p->data += "\n";
}
@@ -228,13 +227,13 @@
FindStringCallbacks fsc(tag);
jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &fsc);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
return env->NewStringUTF(fsc.data.c_str());
}
-extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveArray(
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test906_iterateThroughHeapPrimitiveArray(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
struct FindArrayCallbacks {
explicit FindArrayCallbacks(jlong t) : tag_to_find(t) {}
@@ -316,7 +315,7 @@
FindArrayCallbacks fac(tag);
jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &fac);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
return env->NewStringUTF(fac.data.c_str());
@@ -345,7 +344,7 @@
UNREACHABLE();
}
-extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveFields(
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test906_iterateThroughHeapPrimitiveFields(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
struct FindFieldCallbacks {
explicit FindFieldCallbacks(jlong t) : tag_to_find(t) {}
@@ -403,7 +402,7 @@
FindFieldCallbacks ffc(tag);
jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &ffc);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
return env->NewStringUTF(ffc.data.c_str());
diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java
index 365ce0f..005c1ef 100644
--- a/test/906-iterate-heap/src/Main.java
+++ b/test/906-iterate-heap/src/Main.java
@@ -14,258 +14,8 @@
* limitations under the License.
*/
-import java.util.ArrayList;
-import java.util.Collections;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test906.run();
}
-
- public static void doTest() throws Exception {
- A a = new A();
- B b = new B();
- B b2 = new B();
- C c = new C();
- A[] aArray = new A[5];
- String s = "Hello World";
-
- setTag(a, 1);
- setTag(b, 2);
- setTag(b2, 3);
- setTag(aArray, 4);
- setTag(s, 5);
- setTag(B.class, 100);
-
- int all = iterateThroughHeapCount(0, null, Integer.MAX_VALUE);
- int tagged = iterateThroughHeapCount(HEAP_FILTER_OUT_UNTAGGED, null, Integer.MAX_VALUE);
- int untagged = iterateThroughHeapCount(HEAP_FILTER_OUT_TAGGED, null, Integer.MAX_VALUE);
- int taggedClass = iterateThroughHeapCount(HEAP_FILTER_OUT_CLASS_UNTAGGED, null,
- Integer.MAX_VALUE);
- int untaggedClass = iterateThroughHeapCount(HEAP_FILTER_OUT_CLASS_TAGGED, null,
- Integer.MAX_VALUE);
-
- if (all != tagged + untagged) {
- throw new IllegalStateException("Instances: " + all + " != " + tagged + " + " + untagged);
- }
- if (all != taggedClass + untaggedClass) {
- throw new IllegalStateException("By class: " + all + " != " + taggedClass + " + " +
- untaggedClass);
- }
- if (tagged != 6) {
- throw new IllegalStateException(tagged + " tagged objects");
- }
- if (taggedClass != 2) {
- throw new IllegalStateException(tagged + " objects with tagged class");
- }
- if (all == tagged) {
- throw new IllegalStateException("All objects tagged");
- }
- if (all == taggedClass) {
- throw new IllegalStateException("All objects have tagged class");
- }
-
- long classTags[] = new long[100];
- long sizes[] = new long[100];
- long tags[] = new long[100];
- int lengths[] = new int[100];
-
- int n = iterateThroughHeapData(HEAP_FILTER_OUT_UNTAGGED, null, classTags, sizes, tags, lengths);
- System.out.println(sort(n, classTags, sizes, tags, lengths));
-
- iterateThroughHeapAdd(HEAP_FILTER_OUT_UNTAGGED, null);
- n = iterateThroughHeapData(HEAP_FILTER_OUT_UNTAGGED, null, classTags, sizes, tags, lengths);
- System.out.println(sort(n, classTags, sizes, tags, lengths));
-
- System.out.println(iterateThroughHeapString(getTag(s)));
- System.out.println(getTag(s));
-
- boolean[] zArray = new boolean[] { false, true };
- setTag(zArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(zArray)));
- System.out.println(getTag(zArray));
-
- byte[] bArray = new byte[] { 1, 2, 3 };
- setTag(bArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(bArray)));
- System.out.println(getTag(bArray));
-
- char[] cArray = new char[] { 'A', 'Z' };
- setTag(cArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(cArray)));
- System.out.println(getTag(cArray));
-
- short[] sArray = new short[] { 1, 2, 3 };
- setTag(sArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(sArray)));
- System.out.println(getTag(sArray));
-
- int[] iArray = new int[] { 1, 2, 3 };
- setTag(iArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(iArray)));
- System.out.println(getTag(iArray));
-
- float[] fArray = new float[] { 0.0f, 1.0f };
- setTag(fArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(fArray)));
- System.out.println(getTag(fArray));
-
- long[] lArray = new long[] { 1, 2, 3 };
- setTag(lArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(lArray)));
- System.out.println(getTag(lArray));
-
- double[] dArray = new double[] { 0.0, 1.0 };
- setTag(dArray, 1);
- System.out.println(iterateThroughHeapPrimitiveArray(getTag(dArray)));
- System.out.println(getTag(dArray));
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doTestPrimitiveFieldsClasses();
-
- doTestPrimitiveFieldsIntegral();
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doTestPrimitiveFieldsFloat();
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
- }
-
- private static void doTestPrimitiveFieldsClasses() {
- setTag(IntObject.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
- System.out.println(getTag(IntObject.class));
- setTag(IntObject.class, 0);
-
- setTag(FloatObject.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
- System.out.println(getTag(FloatObject.class));
- setTag(FloatObject.class, 0);
-
- setTag(Inf1.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
- System.out.println(getTag(Inf1.class));
- setTag(Inf1.class, 0);
-
- setTag(Inf2.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
- System.out.println(getTag(Inf2.class));
- setTag(Inf2.class, 0);
- }
-
- private static void doTestPrimitiveFieldsIntegral() {
- IntObject intObject = new IntObject();
- setTag(intObject, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
- System.out.println(getTag(intObject));
- }
-
- private static void doTestPrimitiveFieldsFloat() {
- FloatObject floatObject = new FloatObject();
- setTag(floatObject, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
- System.out.println(getTag(floatObject));
- }
-
- static class A {
- }
-
- static class B {
- }
-
- static class C {
- }
-
- static class HeapElem implements Comparable<HeapElem> {
- long classTag;
- long size;
- long tag;
- int length;
-
- public int compareTo(HeapElem other) {
- if (tag != other.tag) {
- return Long.compare(tag, other.tag);
- }
- if (classTag != other.classTag) {
- return Long.compare(classTag, other.classTag);
- }
- if (size != other.size) {
- return Long.compare(size, other.size);
- }
- return Integer.compare(length, other.length);
- }
-
- public String toString() {
- return "{tag=" + tag + ", class-tag=" + classTag + ", size=" +
- (tag >= 100 ? "<class>" : size) // Class size is dependent on 32-bit vs 64-bit,
- // so strip it.
- + ", length=" + length + "}";
- }
- }
-
- private static ArrayList<HeapElem> sort(int n, long classTags[], long sizes[], long tags[],
- int lengths[]) {
- ArrayList<HeapElem> ret = new ArrayList<HeapElem>(n);
- for (int i = 0; i < n; i++) {
- HeapElem elem = new HeapElem();
- elem.classTag = classTags[i];
- elem.size = sizes[i];
- elem.tag = tags[i];
- elem.length = lengths[i];
- ret.add(elem);
- }
- Collections.sort(ret);
- return ret;
- }
-
- private static interface Inf1 {
- public final static int A = 1;
- }
-
- private static interface Inf2 extends Inf1 {
- public final static int B = 1;
- }
-
- private static class IntObject implements Inf1 {
- byte b = (byte)1;
- char c= 'a';
- short s = (short)2;
- int i = 3;
- long l = 4;
- Object o = new Object();
- static int sI = 5;
- }
-
- private static class FloatObject extends IntObject implements Inf2 {
- float f = 1.23f;
- double d = 1.23;
- Object p = new Object();
- static int sI = 6;
- }
-
- private static native void setTag(Object o, long tag);
- private static native long getTag(Object o);
-
- private final static int HEAP_FILTER_OUT_TAGGED = 0x4;
- private final static int HEAP_FILTER_OUT_UNTAGGED = 0x8;
- private final static int HEAP_FILTER_OUT_CLASS_TAGGED = 0x10;
- private final static int HEAP_FILTER_OUT_CLASS_UNTAGGED = 0x20;
-
- private static native int iterateThroughHeapCount(int heapFilter,
- Class<?> klassFilter, int stopAfter);
- private static native int iterateThroughHeapData(int heapFilter,
- Class<?> klassFilter, long classTags[], long sizes[], long tags[], int lengths[]);
- private static native int iterateThroughHeapAdd(int heapFilter,
- Class<?> klassFilter);
- private static native String iterateThroughHeapString(long tag);
- private static native String iterateThroughHeapPrimitiveArray(long tag);
- private static native String iterateThroughHeapPrimitiveFields(long tag);
}
diff --git a/test/906-iterate-heap/src/art/Main.java b/test/906-iterate-heap/src/art/Main.java
new file mode 100644
index 0000000..aa5498b
--- /dev/null
+++ b/test/906-iterate-heap/src/art/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+
+ // Common infrastructure.
+ public static native void setTag(Object o, long tag);
+ public static native long getTag(Object o);
+}
diff --git a/test/906-iterate-heap/src/art/Test906.java b/test/906-iterate-heap/src/art/Test906.java
new file mode 100644
index 0000000..feebf9c
--- /dev/null
+++ b/test/906-iterate-heap/src/art/Test906.java
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+public class Test906 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test906.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ A a = new A();
+ B b = new B();
+ B b2 = new B();
+ C c = new C();
+ A[] aArray = new A[5];
+ String s = "Hello World";
+
+ setTag(a, 1);
+ setTag(b, 2);
+ setTag(b2, 3);
+ setTag(aArray, 4);
+ setTag(s, 5);
+ setTag(B.class, 100);
+
+ int all = iterateThroughHeapCount(0, null, Integer.MAX_VALUE);
+ int tagged = iterateThroughHeapCount(HEAP_FILTER_OUT_UNTAGGED, null, Integer.MAX_VALUE);
+ int untagged = iterateThroughHeapCount(HEAP_FILTER_OUT_TAGGED, null, Integer.MAX_VALUE);
+ int taggedClass = iterateThroughHeapCount(HEAP_FILTER_OUT_CLASS_UNTAGGED, null,
+ Integer.MAX_VALUE);
+ int untaggedClass = iterateThroughHeapCount(HEAP_FILTER_OUT_CLASS_TAGGED, null,
+ Integer.MAX_VALUE);
+
+ if (all != tagged + untagged) {
+ throw new IllegalStateException("Instances: " + all + " != " + tagged + " + " + untagged);
+ }
+ if (all != taggedClass + untaggedClass) {
+ throw new IllegalStateException("By class: " + all + " != " + taggedClass + " + " +
+ untaggedClass);
+ }
+ if (tagged != 6) {
+ throw new IllegalStateException(tagged + " tagged objects");
+ }
+ if (taggedClass != 2) {
+ throw new IllegalStateException(tagged + " objects with tagged class");
+ }
+ if (all == tagged) {
+ throw new IllegalStateException("All objects tagged");
+ }
+ if (all == taggedClass) {
+ throw new IllegalStateException("All objects have tagged class");
+ }
+
+ long classTags[] = new long[100];
+ long sizes[] = new long[100];
+ long tags[] = new long[100];
+ int lengths[] = new int[100];
+
+ int n = iterateThroughHeapData(HEAP_FILTER_OUT_UNTAGGED, null, classTags, sizes, tags, lengths);
+ System.out.println(sort(n, classTags, sizes, tags, lengths));
+
+ iterateThroughHeapAdd(HEAP_FILTER_OUT_UNTAGGED, null);
+ n = iterateThroughHeapData(HEAP_FILTER_OUT_UNTAGGED, null, classTags, sizes, tags, lengths);
+ System.out.println(sort(n, classTags, sizes, tags, lengths));
+
+ System.out.println(iterateThroughHeapString(getTag(s)));
+ System.out.println(getTag(s));
+
+ boolean[] zArray = new boolean[] { false, true };
+ setTag(zArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(zArray)));
+ System.out.println(getTag(zArray));
+
+ byte[] bArray = new byte[] { 1, 2, 3 };
+ setTag(bArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(bArray)));
+ System.out.println(getTag(bArray));
+
+ char[] cArray = new char[] { 'A', 'Z' };
+ setTag(cArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(cArray)));
+ System.out.println(getTag(cArray));
+
+ short[] sArray = new short[] { 1, 2, 3 };
+ setTag(sArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(sArray)));
+ System.out.println(getTag(sArray));
+
+ int[] iArray = new int[] { 1, 2, 3 };
+ setTag(iArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(iArray)));
+ System.out.println(getTag(iArray));
+
+ float[] fArray = new float[] { 0.0f, 1.0f };
+ setTag(fArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(fArray)));
+ System.out.println(getTag(fArray));
+
+ long[] lArray = new long[] { 1, 2, 3 };
+ setTag(lArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(lArray)));
+ System.out.println(getTag(lArray));
+
+ double[] dArray = new double[] { 0.0, 1.0 };
+ setTag(dArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(dArray)));
+ System.out.println(getTag(dArray));
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsClasses();
+
+ doTestPrimitiveFieldsIntegral();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsFloat();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private static void doTestPrimitiveFieldsClasses() {
+ setTag(IntObject.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(IntObject.class));
+ setTag(IntObject.class, 0);
+
+ setTag(FloatObject.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(FloatObject.class));
+ setTag(FloatObject.class, 0);
+
+ setTag(Inf1.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(Inf1.class));
+ setTag(Inf1.class, 0);
+
+ setTag(Inf2.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(Inf2.class));
+ setTag(Inf2.class, 0);
+ }
+
+ private static void doTestPrimitiveFieldsIntegral() {
+ IntObject intObject = new IntObject();
+ setTag(intObject, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(intObject));
+ }
+
+ private static void doTestPrimitiveFieldsFloat() {
+ FloatObject floatObject = new FloatObject();
+ setTag(floatObject, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(floatObject));
+ }
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ static class C {
+ }
+
+ static class HeapElem implements Comparable<HeapElem> {
+ long classTag;
+ long size;
+ long tag;
+ int length;
+
+ public int compareTo(HeapElem other) {
+ if (tag != other.tag) {
+ return Long.compare(tag, other.tag);
+ }
+ if (classTag != other.classTag) {
+ return Long.compare(classTag, other.classTag);
+ }
+ if (size != other.size) {
+ return Long.compare(size, other.size);
+ }
+ return Integer.compare(length, other.length);
+ }
+
+ public String toString() {
+ return "{tag=" + tag + ", class-tag=" + classTag + ", size=" +
+ (tag >= 100 ? "<class>" : size) // Class size is dependent on 32-bit vs 64-bit,
+ // so strip it.
+ + ", length=" + length + "}";
+ }
+ }
+
+ private static ArrayList<HeapElem> sort(int n, long classTags[], long sizes[], long tags[],
+ int lengths[]) {
+ ArrayList<HeapElem> ret = new ArrayList<HeapElem>(n);
+ for (int i = 0; i < n; i++) {
+ HeapElem elem = new HeapElem();
+ elem.classTag = classTags[i];
+ elem.size = sizes[i];
+ elem.tag = tags[i];
+ elem.length = lengths[i];
+ ret.add(elem);
+ }
+ Collections.sort(ret);
+ return ret;
+ }
+
+ private static interface Inf1 {
+ public final static int A = 1;
+ }
+
+ private static interface Inf2 extends Inf1 {
+ public final static int B = 1;
+ }
+
+ private static class IntObject implements Inf1 {
+ byte b = (byte)1;
+ char c= 'a';
+ short s = (short)2;
+ int i = 3;
+ long l = 4;
+ Object o = new Object();
+ static int sI = 5;
+ }
+
+ private static class FloatObject extends IntObject implements Inf2 {
+ float f = 1.23f;
+ double d = 1.23;
+ Object p = new Object();
+ static int sI = 6;
+ }
+
+ private final static int HEAP_FILTER_OUT_TAGGED = 0x4;
+ private final static int HEAP_FILTER_OUT_UNTAGGED = 0x8;
+ private final static int HEAP_FILTER_OUT_CLASS_TAGGED = 0x10;
+ private final static int HEAP_FILTER_OUT_CLASS_UNTAGGED = 0x20;
+
+ private static void setTag(Object o, long tag) {
+ Main.setTag(o, tag);
+ }
+ private static long getTag(Object o) {
+ return Main.getTag(o);
+ }
+
+ private static native int iterateThroughHeapCount(int heapFilter,
+ Class<?> klassFilter, int stopAfter);
+ private static native int iterateThroughHeapData(int heapFilter,
+ Class<?> klassFilter, long classTags[], long sizes[], long tags[], int lengths[]);
+ private static native int iterateThroughHeapAdd(int heapFilter,
+ Class<?> klassFilter);
+ private static native String iterateThroughHeapString(long tag);
+ private static native String iterateThroughHeapPrimitiveArray(long tag);
+ private static native String iterateThroughHeapPrimitiveFields(long tag);
+}
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index 48ce2e2..1eadf15 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -19,14 +19,16 @@
#include <stdio.h>
#include <vector>
-#include "base/macros.h"
+#include "android-base/macros.h"
+
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "test_env.h"
namespace art {
namespace Test907GetLoadedClasses {
@@ -37,7 +39,7 @@
return reinterpret_cast<jstring>(jni_env->CallObjectMethod(cls, mid));
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test907_getLoadedClasses(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
jint count = -1;
jclass* classes = nullptr;
diff --git a/test/907-get-loaded-classes/src/Main.java b/test/907-get-loaded-classes/src/Main.java
index 370185a..185cfa9 100644
--- a/test/907-get-loaded-classes/src/Main.java
+++ b/test/907-get-loaded-classes/src/Main.java
@@ -14,46 +14,8 @@
* limitations under the License.
*/
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test907.run();
}
-
- public static void doTest() throws Exception {
- // Ensure some classes are loaded.
- A a = new A();
- B b = new B();
- A[] aArray = new A[5];
-
- String[] classes = getLoadedClasses();
- HashSet<String> classesSet = new HashSet<>(Arrays.asList(classes));
-
- String[] shouldBeLoaded = new String[] {
- "java.lang.Object", "java.lang.Class", "java.lang.String", "Main$A", "Main$B", "[LMain$A;"
- };
-
- boolean error = false;
- for (String s : shouldBeLoaded) {
- if (!classesSet.contains(s)) {
- System.out.println("Did not find " + s);
- error = true;
- }
- }
-
- if (error) {
- System.out.println(Arrays.toString(classes));
- }
- }
-
- static class A {
- }
-
- static class B {
- }
-
- private static native String[] getLoadedClasses();
-}
+}
\ No newline at end of file
diff --git a/test/907-get-loaded-classes/src/art/Main.java b/test/907-get-loaded-classes/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/907-get-loaded-classes/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/907-get-loaded-classes/src/art/Test907.java b/test/907-get-loaded-classes/src/art/Test907.java
new file mode 100644
index 0000000..d654428
--- /dev/null
+++ b/test/907-get-loaded-classes/src/art/Test907.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+
+public class Test907 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test907.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Ensure some classes are loaded.
+ A a = new A();
+ B b = new B();
+ A[] aArray = new A[5];
+
+ String[] classes = getLoadedClasses();
+ HashSet<String> classesSet = new HashSet<>(Arrays.asList(classes));
+
+ String[] shouldBeLoaded = new String[] {
+ "java.lang.Object", "java.lang.Class", "java.lang.String", "art.Test907$A",
+ "art.Test907$B", "[Lart.Test907$A;"
+ };
+
+ boolean error = false;
+ for (String s : shouldBeLoaded) {
+ if (!classesSet.contains(s)) {
+ System.out.println("Did not find " + s);
+ error = true;
+ }
+ }
+
+ if (error) {
+ System.out.println(Arrays.toString(classes));
+ }
+ }
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ private static native String[] getLoadedClasses();
+}
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index 45148f8..ddd2ba7 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -17,11 +17,14 @@
#include <stdio.h>
#include <string.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
+
#include "jni.h"
#include "jvmti.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test908GcStartFinish {
@@ -37,7 +40,7 @@
starts++;
}
-extern "C" JNIEXPORT void JNICALL Java_Main_setupGcCallback(
+extern "C" JNIEXPORT void JNICALL Java_art_Test908_setupGcCallback(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
@@ -45,37 +48,37 @@
callbacks.GarbageCollectionStart = GarbageCollectionStart;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- JvmtiErrorToException(env, ret);
+ JvmtiErrorToException(env, jvmti_env, ret);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jboolean enable) {
+extern "C" JNIEXPORT void JNICALL Java_art_Test908_enableGcTracking(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jboolean enable) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_GARBAGE_COLLECTION_START,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getGcStarts(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED) {
+extern "C" JNIEXPORT jint JNICALL Java_art_Test908_getGcStarts(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
jint result = static_cast<jint>(starts);
starts = 0;
return result;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getGcFinishes(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED) {
+extern "C" JNIEXPORT jint JNICALL Java_art_Test908_getGcFinishes(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
jint result = static_cast<jint>(finishes);
finishes = 0;
return result;
diff --git a/test/908-gc-start-finish/src/Main.java b/test/908-gc-start-finish/src/Main.java
index 05388c9..9acfbea 100644
--- a/test/908-gc-start-finish/src/Main.java
+++ b/test/908-gc-start-finish/src/Main.java
@@ -14,64 +14,8 @@
* limitations under the License.
*/
-import java.util.ArrayList;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test908.run();
}
-
- public static void doTest() throws Exception {
- // Use a list to ensure objects must be allocated.
- ArrayList<Object> l = new ArrayList<>(100);
-
- setupGcCallback();
-
- enableGcTracking(true);
- run(l);
-
- enableGcTracking(false);
- run(l);
- }
-
- private static void run(ArrayList<Object> l) {
- allocate(l, 1);
- l.clear();
-
- Runtime.getRuntime().gc();
-
- printStats();
-
- // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
- // the tag table should give us a stable output order.
- for (int i = 10; i <= 1000; i *= 10) {
- allocate(l, i);
- }
- l.clear();
-
- Runtime.getRuntime().gc();
-
- printStats();
-
- Runtime.getRuntime().gc();
-
- printStats();
- }
-
- private static void allocate(ArrayList<Object> l, long tag) {
- Object obj = new Object();
- l.add(obj);
- }
-
- private static void printStats() {
- System.out.println("---");
- int s = getGcStarts();
- int f = getGcFinishes();
- System.out.println((s > 0) + " " + (f > 0));
- }
-
- private static native void setupGcCallback();
- private static native void enableGcTracking(boolean enable);
- private static native int getGcStarts();
- private static native int getGcFinishes();
}
diff --git a/test/908-gc-start-finish/src/art/Main.java b/test/908-gc-start-finish/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/908-gc-start-finish/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/908-gc-start-finish/src/art/Test908.java b/test/908-gc-start-finish/src/art/Test908.java
new file mode 100644
index 0000000..ceaa98c
--- /dev/null
+++ b/test/908-gc-start-finish/src/art/Test908.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+
+public class Test908 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test908.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ setupGcCallback();
+
+ enableGcTracking(true);
+ run(l);
+
+ enableGcTracking(false);
+ run(l);
+ }
+
+ private static void run(ArrayList<Object> l) {
+ allocate(l, 1);
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+
+ // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
+ // the tag table should give us a stable output order.
+ for (int i = 10; i <= 1000; i *= 10) {
+ allocate(l, i);
+ }
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+ }
+
+ private static void allocate(ArrayList<Object> l, long tag) {
+ Object obj = new Object();
+ l.add(obj);
+ }
+
+ private static void printStats() {
+ System.out.println("---");
+ int s = getGcStarts();
+ int f = getGcFinishes();
+ System.out.println((s > 0) + " " + (f > 0));
+ }
+
+ private static native void setupGcCallback();
+ private static native void enableGcTracking(boolean enable);
+ private static native int getGcStarts();
+ private static native int getGcFinishes();
+}
diff --git a/test/909-attach-agent/attach.cc b/test/909-attach-agent/attach.cc
index 67c7567..0150e09 100644
--- a/test/909-attach-agent/attach.cc
+++ b/test/909-attach-agent/attach.cc
@@ -19,7 +19,9 @@
#include <jni.h>
#include <stdio.h>
#include <string.h>
-#include "base/macros.h"
+
+#include "android-base/macros.h"
+
#include "jvmti.h"
namespace art {
diff --git a/test/909-attach-agent/src/art/Main.java b/test/909-attach-agent/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/909-attach-agent/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/910-methods/expected.txt b/test/910-methods/expected.txt
index e87929f..8e6b6e7 100644
--- a/test/910-methods/expected.txt
+++ b/test/910-methods/expected.txt
@@ -48,7 +48,7 @@
Is native: false
Is obsolete: false
Is synthetic: false
-class Main$NestedSynthetic
+class art.Test910$NestedSynthetic
4104
Max locals: 1
Argument size: 0
diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc
index fdc4cdb..9c726e1 100644
--- a/test/910-methods/methods.cc
+++ b/test/910-methods/methods.cc
@@ -16,18 +16,21 @@
#include <stdio.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
+
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test910Methods {
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getMethodName(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test910_getMethodName(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
@@ -35,11 +38,7 @@
char* sig;
char* gen;
jvmtiError result = jvmti_env->GetMethodName(id, &name, &sig, &gen);
- if (result != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result, &err);
- printf("Failure running GetMethodName: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -67,138 +66,126 @@
// Also run GetMethodName with all parameter pointers null to check for segfaults.
jvmtiError result2 = jvmti_env->GetMethodName(id, nullptr, nullptr, nullptr);
- if (result2 != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result2, &err);
- printf("Failure running GetMethodName(null, null, null): %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, result2)) {
return nullptr;
}
return ret;
}
-extern "C" JNIEXPORT jclass JNICALL Java_Main_getMethodDeclaringClass(
+extern "C" JNIEXPORT jclass JNICALL Java_art_Test910_getMethodDeclaringClass(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jclass declaring_class;
jvmtiError result = jvmti_env->GetMethodDeclaringClass(id, &declaring_class);
- if (result != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result, &err);
- printf("Failure running GetMethodDeclaringClass: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
return declaring_class;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getMethodModifiers(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test910_getMethodModifiers(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jint modifiers;
jvmtiError result = jvmti_env->GetMethodModifiers(id, &modifiers);
- if (result != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result, &err);
- printf("Failure running GetMethodModifiers: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return 0;
}
return modifiers;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getMaxLocals(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test910_getMaxLocals(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jint max_locals;
jvmtiError result = jvmti_env->GetMaxLocals(id, &max_locals);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return max_locals;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getArgumentsSize(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test910_getArgumentsSize(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jint arguments;
jvmtiError result = jvmti_env->GetArgumentsSize(id, &arguments);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return arguments;
}
-extern "C" JNIEXPORT jlong JNICALL Java_Main_getMethodLocationStart(
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test910_getMethodLocationStart(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jlong start;
jlong end;
jvmtiError result = jvmti_env->GetMethodLocation(id, &start, &end);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return start;
}
-extern "C" JNIEXPORT jlong JNICALL Java_Main_getMethodLocationEnd(
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test910_getMethodLocationEnd(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jlong start;
jlong end;
jvmtiError result = jvmti_env->GetMethodLocation(id, &start, &end);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return end;
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodNative(
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test910_isMethodNative(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jboolean is_native;
jvmtiError result = jvmti_env->IsMethodNative(id, &is_native);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return JNI_FALSE;
}
return is_native;
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodObsolete(
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test910_isMethodObsolete(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jboolean is_obsolete;
jvmtiError result = jvmti_env->IsMethodObsolete(id, &is_obsolete);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return JNI_FALSE;
}
return is_obsolete;
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodSynthetic(
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test910_isMethodSynthetic(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
jmethodID id = env->FromReflectedMethod(method);
jboolean is_synthetic;
jvmtiError result = jvmti_env->IsMethodSynthetic(id, &is_synthetic);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return JNI_FALSE;
}
diff --git a/test/910-methods/src/Main.java b/test/910-methods/src/Main.java
index 932a1ea..d2238b5 100644
--- a/test/910-methods/src/Main.java
+++ b/test/910-methods/src/Main.java
@@ -14,132 +14,8 @@
* limitations under the License.
*/
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test910.run();
}
-
- public static void doTest() throws Exception {
- testMethod("java.lang.Object", "toString");
- testMethod("java.lang.String", "charAt", int.class);
- testMethod("java.lang.Math", "sqrt", double.class);
- testMethod("java.util.List", "add", Object.class);
-
- testMethod(getProxyClass(), "run");
-
- // Find a synthetic method in the dummy inner class. Do not print the name. Javac and Jack
- // disagree on the naming of synthetic accessors.
- testMethod(findSyntheticMethod(), NestedSynthetic.class, false);
- }
-
- private static Class<?> proxyClass = null;
-
- private static Class<?> getProxyClass() throws Exception {
- if (proxyClass != null) {
- return proxyClass;
- }
-
- proxyClass = Proxy.getProxyClass(Main.class.getClassLoader(), new Class[] { Runnable.class });
- return proxyClass;
- }
-
- private static void testMethod(String className, String methodName, Class<?>... types)
- throws Exception {
- Class<?> base = Class.forName(className);
- testMethod(base, methodName, types);
- }
-
- private static void testMethod(Class<?> base, String methodName, Class<?>... types)
- throws Exception {
- Method m = base.getDeclaredMethod(methodName, types);
- testMethod(m, base, true);
- }
-
- private static void testMethod(Method m, Class<?> base, boolean printName) {
- String[] result = getMethodName(m);
- if (!result[0].equals(m.getName())) {
- throw new RuntimeException("Name not equal: " + m.getName() + " vs " + result[0]);
- }
- if (printName) {
- System.out.println(Arrays.toString(result));
- }
-
- Class<?> declClass = getMethodDeclaringClass(m);
- if (base != declClass) {
- throw new RuntimeException("Declaring class not equal: " + base + " vs " + declClass);
- }
- System.out.println(declClass);
-
- int modifiers = getMethodModifiers(m);
- if (modifiers != m.getModifiers()) {
- throw new RuntimeException("Modifiers not equal: " + m.getModifiers() + " vs " + modifiers);
- }
- System.out.println(modifiers);
-
- System.out.print("Max locals: ");
- try {
- System.out.println(getMaxLocals(m));
- } catch (RuntimeException e) {
- System.out.println(e.getMessage());
- }
-
- System.out.print("Argument size: ");
- try {
- System.out.println(getArgumentsSize(m));
- } catch (RuntimeException e) {
- System.out.println(e.getMessage());
- }
-
- System.out.print("Location start: ");
- try {
- System.out.println(getMethodLocationStart(m));
- } catch (RuntimeException e) {
- System.out.println(e.getMessage());
- }
-
- System.out.print("Location end: ");
- try {
- System.out.println(getMethodLocationEnd(m));
- } catch (RuntimeException e) {
- System.out.println(e.getMessage());
- }
-
- System.out.println("Is native: " + isMethodNative(m));
- System.out.println("Is obsolete: " + isMethodObsolete(m));
- System.out.println("Is synthetic: " + isMethodSynthetic(m));
- }
-
- private static class NestedSynthetic {
- // Accessing this private field will create a synthetic accessor method;
- private static String dummy;
- }
-
- private static void dummyAccess() {
- System.out.println(NestedSynthetic.dummy);
- }
-
- private static Method findSyntheticMethod() throws Exception {
- Method methods[] = NestedSynthetic.class.getDeclaredMethods();
- for (Method m : methods) {
- if (m.isSynthetic()) {
- return m;
- }
- }
- throw new RuntimeException("Could not find synthetic method");
- }
-
- private static native String[] getMethodName(Method m);
- private static native Class<?> getMethodDeclaringClass(Method m);
- private static native int getMethodModifiers(Method m);
- private static native int getMaxLocals(Method m);
- private static native int getArgumentsSize(Method m);
- private static native long getMethodLocationStart(Method m);
- private static native long getMethodLocationEnd(Method m);
- private static native boolean isMethodNative(Method m);
- private static native boolean isMethodObsolete(Method m);
- private static native boolean isMethodSynthetic(Method m);
}
diff --git a/test/910-methods/src/art/Main.java b/test/910-methods/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/910-methods/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/910-methods/src/art/Test910.java b/test/910-methods/src/art/Test910.java
new file mode 100644
index 0000000..b3490e9
--- /dev/null
+++ b/test/910-methods/src/art/Test910.java
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
+
+public class Test910 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test910.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ testMethod("java.lang.Object", "toString");
+ testMethod("java.lang.String", "charAt", int.class);
+ testMethod("java.lang.Math", "sqrt", double.class);
+ testMethod("java.util.List", "add", Object.class);
+
+ testMethod(getProxyClass(), "run");
+
+ // Find a synthetic method in the dummy inner class. Do not print the name. Javac and Jack
+ // disagree on the naming of synthetic accessors.
+ testMethod(findSyntheticMethod(), NestedSynthetic.class, false);
+ }
+
+ private static Class<?> proxyClass = null;
+
+ private static Class<?> getProxyClass() throws Exception {
+ if (proxyClass != null) {
+ return proxyClass;
+ }
+
+ proxyClass = Proxy.getProxyClass(Main.class.getClassLoader(), new Class[] { Runnable.class });
+ return proxyClass;
+ }
+
+ private static void testMethod(String className, String methodName, Class<?>... types)
+ throws Exception {
+ Class<?> base = Class.forName(className);
+ testMethod(base, methodName, types);
+ }
+
+ private static void testMethod(Class<?> base, String methodName, Class<?>... types)
+ throws Exception {
+ Method m = base.getDeclaredMethod(methodName, types);
+ testMethod(m, base, true);
+ }
+
+ private static void testMethod(Method m, Class<?> base, boolean printName) {
+ String[] result = getMethodName(m);
+ if (!result[0].equals(m.getName())) {
+ throw new RuntimeException("Name not equal: " + m.getName() + " vs " + result[0]);
+ }
+ if (printName) {
+ System.out.println(Arrays.toString(result));
+ }
+
+ Class<?> declClass = getMethodDeclaringClass(m);
+ if (base != declClass) {
+ throw new RuntimeException("Declaring class not equal: " + base + " vs " + declClass);
+ }
+ System.out.println(declClass);
+
+ int modifiers = getMethodModifiers(m);
+ if (modifiers != m.getModifiers()) {
+ throw new RuntimeException("Modifiers not equal: " + m.getModifiers() + " vs " + modifiers);
+ }
+ System.out.println(modifiers);
+
+ System.out.print("Max locals: ");
+ try {
+ System.out.println(getMaxLocals(m));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ System.out.print("Argument size: ");
+ try {
+ System.out.println(getArgumentsSize(m));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ System.out.print("Location start: ");
+ try {
+ System.out.println(getMethodLocationStart(m));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ System.out.print("Location end: ");
+ try {
+ System.out.println(getMethodLocationEnd(m));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ System.out.println("Is native: " + isMethodNative(m));
+ System.out.println("Is obsolete: " + isMethodObsolete(m));
+ System.out.println("Is synthetic: " + isMethodSynthetic(m));
+ }
+
+ private static class NestedSynthetic {
+ // Accessing this private field will create a synthetic accessor method;
+ private static String dummy;
+ }
+
+ private static void dummyAccess() {
+ System.out.println(NestedSynthetic.dummy);
+ }
+
+ private static Method findSyntheticMethod() throws Exception {
+ Method methods[] = NestedSynthetic.class.getDeclaredMethods();
+ for (Method m : methods) {
+ if (m.isSynthetic()) {
+ return m;
+ }
+ }
+ throw new RuntimeException("Could not find synthetic method");
+ }
+
+ private static native String[] getMethodName(Method m);
+ private static native Class<?> getMethodDeclaringClass(Method m);
+ private static native int getMethodModifiers(Method m);
+ private static native int getMaxLocals(Method m);
+ private static native int getArgumentsSize(Method m);
+ private static native long getMethodLocationStart(Method m);
+ private static native long getMethodLocationEnd(Method m);
+ private static native boolean isMethodNative(Method m);
+ private static native boolean isMethodObsolete(Method m);
+ private static native boolean isMethodSynthetic(Method m);
+}
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index feabb20..2318414 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -4,72 +4,74 @@
From top
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 34
- printOrWait (IILControlData;)V 6 39
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- doTest ()V 38 23
- main ([Ljava/lang/String;)V 3 21
+ print (Ljava/lang/Thread;II)V 0 36
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 38 25
+ run ()V 20 26
+ main ([Ljava/lang/String;)V 0 19
---------
- print (Ljava/lang/Thread;II)V 0 34
- printOrWait (IILControlData;)V 6 39
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- doTest ()V 42 24
- main ([Ljava/lang/String;)V 3 21
+ print (Ljava/lang/Thread;II)V 0 36
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 42 26
+ run ()V 20 26
+ main ([Ljava/lang/String;)V 0 19
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 34
- printOrWait (IILControlData;)V 6 39
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
+ print (Ljava/lang/Thread;II)V 0 36
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
---------
- printOrWait (IILControlData;)V 6 39
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
From bottom
---------
- main ([Ljava/lang/String;)V 3 21
+ main ([Ljava/lang/String;)V 0 19
---------
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- doTest ()V 65 30
- main ([Ljava/lang/String;)V 3 21
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 65 32
+ run ()V 20 26
+ main ([Ljava/lang/String;)V 0 19
---------
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 69 33
################################
### Other thread (suspended) ###
@@ -77,135 +79,135 @@
From top
---------
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 26
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 28
---------
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 26
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 28
---------
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
From bottom
---------
- run ()V 4 26
+ run ()V 4 28
---------
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 28
---------
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
###########################
### Other thread (live) ###
###########################
From top
---------
- printOrWait (IILControlData;)V 44 52
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 59
+ printOrWait (IILart/ControlData;)V 44 54
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 61
---------
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 59
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 61
---------
- printOrWait (IILControlData;)V 44 52
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
+ printOrWait (IILart/ControlData;)V 44 54
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
---------
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
From bottom
---------
- run ()V 4 59
+ run ()V 4 61
---------
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 59
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 61
---------
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
################################
### Other threads (suspended) ###
@@ -261,82 +263,82 @@
---------
AllTraces Thread 0
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 1
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 2
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 3
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 4
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 5
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 6
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 7
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 8
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
AllTraces Thread 9
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
FinalizerDaemon
@@ -356,219 +358,220 @@
---------
main
getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
- printAll (I)V 0 73
- doTest ()V 128 57
- main ([Ljava/lang/String;)V 27 33
+ printAll (I)V 0 75
+ doTest ()V 128 59
+ run ()V 44 38
+ main ([Ljava/lang/String;)V 0 19
---------
AllTraces Thread 0
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 1
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 2
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 3
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 4
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 5
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 6
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 7
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 8
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
AllTraces Thread 9
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 45
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 47
---------
FinalizerDaemon
@@ -588,9 +591,10 @@
---------
main
getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
- printAll (I)V 0 73
- doTest ()V 133 59
- main ([Ljava/lang/String;)V 27 33
+ printAll (I)V 0 75
+ doTest ()V 133 61
+ run ()V 44 38
+ main ([Ljava/lang/String;)V 0 19
########################################
@@ -617,172 +621,175 @@
---------
ThreadListTraces Thread 0
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
ThreadListTraces Thread 2
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
ThreadListTraces Thread 4
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
ThreadListTraces Thread 6
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
ThreadListTraces Thread 8
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
---------
main
getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
- printList ([Ljava/lang/Thread;I)V 0 66
- doTest ()V 116 52
- main ([Ljava/lang/String;)V 35 37
+ printList ([Ljava/lang/Thread;I)V 0 68
+ doTest ()V 116 54
+ run ()V 52 42
+ main ([Ljava/lang/String;)V 0 19
---------
ThreadListTraces Thread 0
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 35
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
---------
ThreadListTraces Thread 2
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 35
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
---------
ThreadListTraces Thread 4
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 35
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
---------
ThreadListTraces Thread 6
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 35
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
---------
ThreadListTraces Thread 8
wait ()V -1 -2
- printOrWait (IILControlData;)V 24 45
- baz (IIILControlData;)Ljava/lang/Object; 2 30
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- baz (IIILControlData;)Ljava/lang/Object; 9 32
- bar (IIILControlData;)J 0 24
- foo (IIILControlData;)I 0 19
- run ()V 4 35
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
---------
main
getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
- printList ([Ljava/lang/Thread;I)V 0 66
- doTest ()V 121 54
- main ([Ljava/lang/String;)V 35 37
+ printList ([Ljava/lang/Thread;I)V 0 68
+ doTest ()V 121 56
+ run ()V 52 42
+ main ([Ljava/lang/String;)V 0 19
###################
### Same thread ###
###################
-4
+5
JVMTI_ERROR_ILLEGAL_ARGUMENT
-[public static native java.lang.Object[] Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
-[public static void Frames.doTestSameThread(), 38]
-[public static void Frames.doTest() throws java.lang.Exception, 0]
-[public static void Main.main(java.lang.String[]) throws java.lang.Exception, 2b]
+[public static native java.lang.Object[] art.Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
+[public static void art.Frames.doTestSameThread(), 38]
+[public static void art.Frames.doTest() throws java.lang.Exception, 0]
+[public static void art.Test911.run() throws java.lang.Exception, 3c]
+[public static void Main.main(java.lang.String[]) throws java.lang.Exception, 0]
JVMTI_ERROR_NO_MORE_FRAMES
################################
@@ -791,23 +798,23 @@
18
JVMTI_ERROR_ILLEGAL_ARGUMENT
[public final native void java.lang.Object.wait() throws java.lang.InterruptedException, ffffffff]
-[private static void Recurse.printOrWait(int,int,ControlData), 18]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 2]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[public void Frames$1.run(), 4]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 18]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[public void art.Frames$1.run(), 4]
JVMTI_ERROR_NO_MORE_FRAMES
###########################
@@ -815,22 +822,22 @@
###########################
17
JVMTI_ERROR_ILLEGAL_ARGUMENT
-[private static void Recurse.printOrWait(int,int,ControlData), 2c]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 2]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
-[private static long Recurse.bar(int,int,int,ControlData), 0]
-[public static int Recurse.foo(int,int,int,ControlData), 0]
-[public void Frames$2.run(), 4]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2c]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[public void art.Frames$2.run(), 4]
JVMTI_ERROR_NO_MORE_FRAMES
Done
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index 96a427d..96705bc 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -16,32 +16,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- bindTest911Classes();
-
- SameThread.doTest();
-
- System.out.println();
-
- OtherThread.doTestOtherThreadWait();
-
- System.out.println();
-
- OtherThread.doTestOtherThreadBusyLoop();
-
- System.out.println();
-
- AllTraces.doTest();
-
- System.out.println();
-
- ThreadListTraces.doTest();
-
- System.out.println();
-
- Frames.doTest();
-
- System.out.println("Done");
+ art.Test911.run();
}
-
- private static native void bindTest911Classes();
}
diff --git a/test/911-get-stack-trace/src/AllTraces.java b/test/911-get-stack-trace/src/art/AllTraces.java
similarity index 99%
rename from test/911-get-stack-trace/src/AllTraces.java
rename to test/911-get-stack-trace/src/art/AllTraces.java
index 1d9aa96..d73f78b 100644
--- a/test/911-get-stack-trace/src/AllTraces.java
+++ b/test/911-get-stack-trace/src/art/AllTraces.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
import java.util.ArrayList;
import java.util.List;
diff --git a/test/911-get-stack-trace/src/ControlData.java b/test/911-get-stack-trace/src/art/ControlData.java
similarity index 98%
rename from test/911-get-stack-trace/src/ControlData.java
rename to test/911-get-stack-trace/src/art/ControlData.java
index 76ac4b8..b5f3291 100644
--- a/test/911-get-stack-trace/src/ControlData.java
+++ b/test/911-get-stack-trace/src/art/ControlData.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
import java.util.concurrent.CountDownLatch;
public class ControlData {
diff --git a/test/911-get-stack-trace/src/Frames.java b/test/911-get-stack-trace/src/art/Frames.java
similarity index 99%
rename from test/911-get-stack-trace/src/Frames.java
rename to test/911-get-stack-trace/src/art/Frames.java
index 54d4165..b3d81bf 100644
--- a/test/911-get-stack-trace/src/Frames.java
+++ b/test/911-get-stack-trace/src/art/Frames.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
import java.util.Arrays;
public class Frames {
diff --git a/test/911-get-stack-trace/src/art/Main.java b/test/911-get-stack-trace/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/911-get-stack-trace/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/911-get-stack-trace/src/OtherThread.java b/test/911-get-stack-trace/src/art/OtherThread.java
similarity index 99%
rename from test/911-get-stack-trace/src/OtherThread.java
rename to test/911-get-stack-trace/src/art/OtherThread.java
index 0a78523..675bff5 100644
--- a/test/911-get-stack-trace/src/OtherThread.java
+++ b/test/911-get-stack-trace/src/art/OtherThread.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
public class OtherThread {
public static void doTestOtherThreadWait() throws Exception {
System.out.println("################################");
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/art/PrintThread.java
similarity index 99%
rename from test/911-get-stack-trace/src/PrintThread.java
rename to test/911-get-stack-trace/src/art/PrintThread.java
index 136fd80..de1da9c 100644
--- a/test/911-get-stack-trace/src/PrintThread.java
+++ b/test/911-get-stack-trace/src/art/PrintThread.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
diff --git a/test/911-get-stack-trace/src/Recurse.java b/test/911-get-stack-trace/src/art/Recurse.java
similarity index 98%
rename from test/911-get-stack-trace/src/Recurse.java
rename to test/911-get-stack-trace/src/art/Recurse.java
index 439fbaa..438fdfd 100644
--- a/test/911-get-stack-trace/src/Recurse.java
+++ b/test/911-get-stack-trace/src/art/Recurse.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
public class Recurse {
public static int foo(int x, int start, int max, ControlData data) {
bar(x, start, max, data);
diff --git a/test/911-get-stack-trace/src/SameThread.java b/test/911-get-stack-trace/src/art/SameThread.java
similarity index 98%
rename from test/911-get-stack-trace/src/SameThread.java
rename to test/911-get-stack-trace/src/art/SameThread.java
index f1e19e3..c9afad5 100644
--- a/test/911-get-stack-trace/src/SameThread.java
+++ b/test/911-get-stack-trace/src/art/SameThread.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
public class SameThread {
public static void doTest() throws Exception {
System.out.println("###################");
diff --git a/test/911-get-stack-trace/src/art/Test911.java b/test/911-get-stack-trace/src/art/Test911.java
new file mode 100644
index 0000000..71a5196
--- /dev/null
+++ b/test/911-get-stack-trace/src/art/Test911.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Test911 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(AllTraces.class);
+ Main.bindAgentJNIForClass(Frames.class);
+ Main.bindAgentJNIForClass(PrintThread.class);
+ Main.bindAgentJNIForClass(ThreadListTraces.class);
+
+ SameThread.doTest();
+
+ System.out.println();
+
+ OtherThread.doTestOtherThreadWait();
+
+ System.out.println();
+
+ OtherThread.doTestOtherThreadBusyLoop();
+
+ System.out.println();
+
+ AllTraces.doTest();
+
+ System.out.println();
+
+ ThreadListTraces.doTest();
+
+ System.out.println();
+
+ Frames.doTest();
+
+ System.out.println("Done");
+ }
+}
diff --git a/test/911-get-stack-trace/src/ThreadListTraces.java b/test/911-get-stack-trace/src/art/ThreadListTraces.java
similarity index 99%
rename from test/911-get-stack-trace/src/ThreadListTraces.java
rename to test/911-get-stack-trace/src/art/ThreadListTraces.java
index 14868e9..0de93de 100644
--- a/test/911-get-stack-trace/src/ThreadListTraces.java
+++ b/test/911-get-stack-trace/src/art/ThreadListTraces.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+package art;
+
public class ThreadListTraces {
public static void doTest() throws Exception {
System.out.println("########################################");
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index 5a3a311..985120c 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -18,30 +18,25 @@
#include <memory>
#include <stdio.h>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "android-base/stringprintf.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
namespace art {
namespace Test911GetStackTrace {
using android::base::StringPrintf;
-extern "C" JNIEXPORT void JNICALL Java_Main_bindTest911Classes(
- JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
- BindFunctions(jvmti_env, env, "AllTraces");
- BindFunctions(jvmti_env, env, "Frames");
- BindFunctions(jvmti_env, env, "PrintThread");
- BindFunctions(jvmti_env, env, "ThreadListTraces");
-}
-
static jint FindLineNumber(jint line_number_count,
jvmtiLineNumberEntry* line_number_table,
jlocation location) {
@@ -68,7 +63,7 @@
char* gen;
{
jvmtiError result2 = jvmti_env->GetMethodName(frames[method_index].method, &name, &sig, &gen);
- if (JvmtiErrorToException(env, result2)) {
+ if (JvmtiErrorToException(env, jvmti_env, result2)) {
return nullptr;
}
}
@@ -83,10 +78,7 @@
// Accept absent info and native method errors.
if (line_result != JVMTI_ERROR_ABSENT_INFORMATION &&
line_result != JVMTI_ERROR_NATIVE_METHOD) {
- char* err;
- jvmti_env->GetErrorName(line_result, &err);
- printf("Failure running GetLineNumberTable: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ JvmtiErrorToException(env, jvmti_env, line_result);
return nullptr;
}
line_number_table = nullptr;
@@ -132,14 +124,14 @@
return CreateObjectArray(env, count, "[Ljava/lang/String;", callback);
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_PrintThread_getStackTrace(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_PrintThread_getStackTrace(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
jint count;
{
jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
}
@@ -147,13 +139,13 @@
return TranslateJvmtiFrameInfoArray(env, frames.get(), count);
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_AllTraces_getAllStackTraces(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_AllTraces_getAllStackTraces(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint max) {
jint thread_count;
jvmtiStackInfo* stack_infos;
{
jvmtiError result = jvmti_env->GetAllStackTraces(max, &stack_infos, &thread_count);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
}
@@ -175,7 +167,7 @@
return ret;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_ThreadListTraces_getThreadListStackTraces(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_ThreadListTraces_getThreadListStackTraces(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobjectArray jthreads, jint max) {
jint thread_count = env->GetArrayLength(jthreads);
std::unique_ptr<jthread[]> threads(new jthread[thread_count]);
@@ -189,7 +181,7 @@
threads.get(),
max,
&stack_infos);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
}
@@ -211,23 +203,23 @@
return ret;
}
-extern "C" JNIEXPORT jint JNICALL Java_Frames_getFrameCount(
+extern "C" JNIEXPORT jint JNICALL Java_art_Frames_getFrameCount(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread) {
jint count;
jvmtiError result = jvmti_env->GetFrameCount(thread, &count);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return count;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Frames_getFrameLocation(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Frames_getFrameLocation(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint depth) {
jmethodID method;
jlocation location;
jvmtiError result = jvmti_env->GetFrameLocation(thread, depth, &method, &location);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -237,12 +229,12 @@
{
jclass decl_class;
jvmtiError class_result = jvmti_env->GetMethodDeclaringClass(method, &decl_class);
- if (JvmtiErrorToException(env, class_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, class_result)) {
return nullptr;
}
jint modifiers;
jvmtiError mod_result = jvmti_env->GetMethodModifiers(method, &modifiers);
- if (JvmtiErrorToException(env, mod_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, mod_result)) {
return nullptr;
}
constexpr jint kStatic = 0x8;
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index 5bd34f6..2636367 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -16,19 +16,22 @@
#include <stdio.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
+
#include "class_linker.h"
#include "jni.h"
#include "mirror/class_loader.h"
#include "jvmti.h"
#include "runtime.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test912Classes {
@@ -233,7 +236,7 @@
jint count = 0;
jclass* classes = nullptr;
jvmtiError result = jvmti_env->GetClassLoaderClasses(jclassloader, &count, &classes);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -251,7 +254,7 @@
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jclass klass) {
jint major, minor;
jvmtiError result = jvmti_env->GetClassVersionNumbers(klass, &minor, &major);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -270,7 +273,7 @@
jvmtiError result = jenv->GetClassSignature(klass, &name, nullptr);
if (result != JVMTI_ERROR_NONE) {
if (jni_env != nullptr) {
- JvmtiErrorToException(jni_env, result);
+ JvmtiErrorToException(jni_env, jenv, result);
} else {
printf("Failed to get class signature.\n");
}
@@ -291,13 +294,13 @@
jvmtiError ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_CLASS_LOAD,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_CLASS_PREPARE,
nullptr);
- JvmtiErrorToException(env, ret);
+ JvmtiErrorToException(env, jvmti_env, ret);
return;
}
@@ -306,20 +309,20 @@
callbacks.ClassLoad = class_load;
callbacks.ClassPrepare = class_prepare;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_CLASS_LOAD,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_CLASS_PREPARE,
nullptr);
- JvmtiErrorToException(env, ret);
+ JvmtiErrorToException(env, jvmti_env, ret);
}
class ClassLoadPreparePrinter {
@@ -364,7 +367,7 @@
jvmtiError result = jenv->GetThreadInfo(thread, &info);
if (result != JVMTI_ERROR_NONE) {
if (jni_env != nullptr) {
- JvmtiErrorToException(jni_env, result);
+ JvmtiErrorToException(jni_env, jenv, result);
} else {
printf("Failed to get thread name.\n");
}
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index 6b86ac9..0f2920a 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -92,3 +92,7 @@
Prepare: LA; on TestRunner (cur=TestRunner)
Load: LC; on TestRunner
Prepare: LC; on TestRunner (cur=TestRunner)
+Load: L$Proxy1; on main
+Prepare: L$Proxy1; on main (cur=main)
+Load: [LMain; on main
+Prepare: [LMain; on main (cur=main)
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index 5d25d76..6c8858a 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -23,6 +23,7 @@
public class Main {
public static void main(String[] args) throws Exception {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest();
}
@@ -282,6 +283,10 @@
t.start();
t.join();
+ // Check creation of arrays and proxies.
+ Proxy.getProxyClass(Main.class.getClassLoader(), new Class[] { Comparable.class });
+ Class.forName("[LMain;");
+
enableClassLoadPreparePrintEvents(false);
// Note: the JIT part of this test is about the JIT pulling in a class not yet touched by
diff --git a/test/912-classes/src/art/Main.java b/test/912-classes/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/912-classes/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index fc2761e..2a183ee 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -79,6 +79,37 @@
5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+---
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+---
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+---
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+---
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+---
[1@0 (32, 'HelloWorld'), 2@0 (16, '')]
2
3
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 66fc7be..6a06b29 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -21,10 +21,10 @@
#include <iostream>
#include <vector>
+#include "android-base/macros.h"
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "jit/jit.h"
#include "jni.h"
#include "native_stack_dump.h"
@@ -34,16 +34,18 @@
#include "thread-inl.h"
#include "thread_list.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test913Heaps {
using android::base::StringPrintf;
-extern "C" JNIEXPORT void JNICALL Java_Main_forceGarbageCollection(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED) {
+extern "C" JNIEXPORT void JNICALL Java_art_Test913_forceGarbageCollection(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
jvmtiError ret = jvmti_env->ForceGarbageCollection();
if (ret != JVMTI_ERROR_NONE) {
char* err;
@@ -113,14 +115,15 @@
return true;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferences(JNIEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jint heap_filter,
- jclass klass_filter,
- jobject initial_object,
- jint stop_after,
- jint follow_set,
- jobject jniRef) {
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
+ JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass_filter,
+ jobject initial_object,
+ jint stop_after,
+ jint follow_set,
+ jobject jniRef) {
class PrintIterationConfig FINAL : public IterationConfig {
public:
PrintIterationConfig(jint _stop_after, jint _follow_set)
@@ -501,7 +504,7 @@
return ret;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferencesString(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferencesString(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) {
struct FindStringCallbacks {
static jint JNICALL FollowReferencesCallback(
@@ -550,7 +553,7 @@
FindStringCallbacks fsc;
jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &fsc);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
@@ -564,7 +567,7 @@
}
-extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveArray(
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test913_followReferencesPrimitiveArray(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) {
struct FindArrayCallbacks {
static jint JNICALL FollowReferencesCallback(
@@ -648,7 +651,7 @@
FindArrayCallbacks fac;
jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &fac);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
return env->NewStringUTF(fac.data.c_str());
@@ -677,7 +680,7 @@
UNREACHABLE();
}
-extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveFields(
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test913_followReferencesPrimitiveFields(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) {
struct FindFieldCallbacks {
static jint JNICALL FollowReferencesCallback(
@@ -738,11 +741,68 @@
FindFieldCallbacks ffc;
jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &ffc);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return nullptr;
}
return env->NewStringUTF(ffc.data.c_str());
}
+// This is copied from test 908. Consider moving this to the main shim.
+
+static size_t starts = 0;
+static size_t finishes = 0;
+
+static void JNICALL GarbageCollectionFinish(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
+ finishes++;
+}
+
+static void JNICALL GarbageCollectionStart(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
+ starts++;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test913_setupGcCallback(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.GarbageCollectionFinish = GarbageCollectionFinish;
+ callbacks.GarbageCollectionStart = GarbageCollectionStart;
+
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ JvmtiErrorToException(env, jvmti_env, ret);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test913_enableGcTracking(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jboolean enable) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_GARBAGE_COLLECTION_START,
+ nullptr);
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
+ return;
+ }
+ ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
+ nullptr);
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
+ return;
+ }
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_art_Test913_getGcStarts(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jint result = static_cast<jint>(starts);
+ starts = 0;
+ return result;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_art_Test913_getGcFinishes(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jint result = static_cast<jint>(finishes);
+ finishes = 0;
+ return result;
+}
+
} // namespace Test913Heaps
} // namespace art
diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java
index 66f6883..286a917 100644
--- a/test/913-heaps/src/Main.java
+++ b/test/913-heaps/src/Main.java
@@ -14,581 +14,8 @@
* limitations under the License.
*/
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
- new TestConfig().doFollowReferencesTest();
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doStringTest();
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doPrimitiveArrayTest();
- doPrimitiveFieldTest();
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- // Test klass filter.
- System.out.println("--- klass ---");
- new TestConfig(A.class, 0).doFollowReferencesTest();
-
- // Test heap filter.
- System.out.println("--- heap_filter ---");
- System.out.println("---- tagged objects");
- new TestConfig(null, 0x4).doFollowReferencesTest();
- System.out.println("---- untagged objects");
- new TestConfig(null, 0x8).doFollowReferencesTest();
- System.out.println("---- tagged classes");
- new TestConfig(null, 0x10).doFollowReferencesTest();
- System.out.println("---- untagged classes");
- new TestConfig(null, 0x20).doFollowReferencesTest();
+ art.Test913.run();
}
-
- public static void doTest() throws Exception {
- setupGcCallback();
-
- enableGcTracking(true);
- run();
- enableGcTracking(false);
- }
-
- public static void doStringTest() throws Exception {
- final String str = new String("HelloWorld");
- final String str2 = new String("");
- Object o = new Object() {
- String s = str;
- String s2 = str2;
- };
-
- setTag(str, 1);
- setTag(str2, 2);
- System.out.println(Arrays.toString(followReferencesString(o)));
- System.out.println(getTag(str));
- System.out.println(getTag(str2));
- }
-
- public static void doPrimitiveArrayTest() throws Exception {
- final boolean[] zArray = new boolean[] { false, true };
- setTag(zArray, 1);
-
- final byte[] bArray = new byte[] { 1, 2, 3 };
- setTag(bArray, 2);
-
- final char[] cArray = new char[] { 'A', 'Z' };
- setTag(cArray, 3);
-
- final short[] sArray = new short[] { 1, 2, 3 };
- setTag(sArray, 4);
-
- final int[] iArray = new int[] { 1, 2, 3 };
- setTag(iArray, 5);
-
- final float[] fArray = new float[] { 0.0f, 1.0f };
- setTag(fArray, 6);
-
- final long[] lArray = new long[] { 1, 2, 3 };
- setTag(lArray, 7);
-
- final double[] dArray = new double[] { 0.0, 1.0 };
- setTag(dArray, 8);
-
- Object o = new Object() {
- Object z = zArray;
- Object b = bArray;
- Object c = cArray;
- Object s = sArray;
- Object i = iArray;
- Object f = fArray;
- Object l = lArray;
- Object d = dArray;
- };
-
- System.out.println(followReferencesPrimitiveArray(o));
- System.out.print(getTag(zArray));
- System.out.print(getTag(bArray));
- System.out.print(getTag(cArray));
- System.out.print(getTag(sArray));
- System.out.print(getTag(iArray));
- System.out.print(getTag(fArray));
- System.out.print(getTag(lArray));
- System.out.println(getTag(dArray));
- }
-
- public static void doPrimitiveFieldTest() throws Exception {
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doTestPrimitiveFieldsClasses();
-
- doTestPrimitiveFieldsIntegral();
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doTestPrimitiveFieldsFloat();
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
- }
-
- private static void doTestPrimitiveFieldsClasses() {
- setTag(IntObject.class, 10000);
- System.out.println(followReferencesPrimitiveFields(IntObject.class));
- System.out.println(getTag(IntObject.class));
- setTag(IntObject.class, 0);
-
- setTag(FloatObject.class, 10000);
- System.out.println(followReferencesPrimitiveFields(FloatObject.class));
- System.out.println(getTag(FloatObject.class));
- setTag(FloatObject.class, 0);
-
- setTag(Inf1.class, 10000);
- System.out.println(followReferencesPrimitiveFields(Inf1.class));
- System.out.println(getTag(Inf1.class));
- setTag(Inf1.class, 0);
-
- setTag(Inf2.class, 10000);
- System.out.println(followReferencesPrimitiveFields(Inf2.class));
- System.out.println(getTag(Inf2.class));
- setTag(Inf2.class, 0);
- }
-
- private static void doTestPrimitiveFieldsIntegral() {
- IntObject intObject = new IntObject();
- setTag(intObject, 10000);
- System.out.println(followReferencesPrimitiveFields(intObject));
- System.out.println(getTag(intObject));
- }
-
- private static void doTestPrimitiveFieldsFloat() {
- FloatObject floatObject = new FloatObject();
- setTag(floatObject, 10000);
- System.out.println(followReferencesPrimitiveFields(floatObject));
- System.out.println(getTag(floatObject));
- }
-
- private static void run() {
- clearStats();
- forceGarbageCollection();
- printStats();
- }
-
- private static void clearStats() {
- getGcStarts();
- getGcFinishes();
- }
-
- private static void printStats() {
- System.out.println("---");
- int s = getGcStarts();
- int f = getGcFinishes();
- System.out.println((s > 0) + " " + (f > 0));
- }
-
- private static class TestConfig {
- private Class<?> klass = null;
- private int heapFilter = 0;
-
- public TestConfig() {
- }
- public TestConfig(Class<?> klass, int heapFilter) {
- this.klass = klass;
- this.heapFilter = heapFilter;
- }
-
- public void doFollowReferencesTest() throws Exception {
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- setTag(Thread.currentThread(), 3000);
-
- {
- ArrayList<Object> tmpStorage = new ArrayList<>();
- doFollowReferencesTestNonRoot(tmpStorage);
- tmpStorage = null;
- }
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- doFollowReferencesTestRoot();
-
- // Force GCs to clean up dirt.
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
- }
-
- private void doFollowReferencesTestNonRoot(ArrayList<Object> tmpStorage) {
- Verifier v = new Verifier();
- tagClasses(v);
- A a = createTree(v);
- tmpStorage.add(a);
- v.add("0@0", "1@1000"); // tmpStorage[0] --(array-element)--> a.
-
- doFollowReferencesTestImpl(null, Integer.MAX_VALUE, -1, null, v, null);
- doFollowReferencesTestImpl(a.foo2, Integer.MAX_VALUE, -1, null, v, "3@1001");
-
- tmpStorage.clear();
- }
-
- private void doFollowReferencesTestRoot() {
- Verifier v = new Verifier();
- tagClasses(v);
- A a = createTree(v);
-
- doFollowReferencesTestImpl(null, Integer.MAX_VALUE, -1, a, v, null);
- doFollowReferencesTestImpl(a.foo2, Integer.MAX_VALUE, -1, a, v, "3@1001");
- }
-
- private void doFollowReferencesTestImpl(A root, int stopAfter, int followSet,
- Object asRoot, Verifier v, String additionalEnabled) {
- String[] lines =
- followReferences(heapFilter, klass, root, stopAfter, followSet, asRoot);
-
- v.process(lines, additionalEnabled, heapFilter != 0 || klass != null);
- }
-
- private static void tagClasses(Verifier v) {
- setTag(A.class, 1000);
-
- setTag(B.class, 1001);
- v.add("1001@0", "1000@0"); // B.class --(superclass)--> A.class.
-
- setTag(C.class, 1002);
- v.add("1002@0", "1001@0"); // C.class --(superclass)--> B.class.
- v.add("1002@0", "2001@0"); // C.class --(interface)--> I2.class.
-
- setTag(I1.class, 2000);
-
- setTag(I2.class, 2001);
- v.add("2001@0", "2000@0"); // I2.class --(interface)--> I1.class.
- }
-
- private static A createTree(Verifier v) {
- A aInst = new A();
- setTag(aInst, 1);
- String aInstStr = "1@1000";
- String aClassStr = "1000@0";
- v.add(aInstStr, aClassStr); // A -->(class) --> A.class.
-
- A a2Inst = new A();
- setTag(a2Inst, 2);
- aInst.foo = a2Inst;
- String a2InstStr = "2@1000";
- v.add(a2InstStr, aClassStr); // A2 -->(class) --> A.class.
- v.add(aInstStr, a2InstStr); // A -->(field) --> A2.
-
- B bInst = new B();
- setTag(bInst, 3);
- aInst.foo2 = bInst;
- String bInstStr = "3@1001";
- String bClassStr = "1001@0";
- v.add(bInstStr, bClassStr); // B -->(class) --> B.class.
- v.add(aInstStr, bInstStr); // A -->(field) --> B.
-
- A a3Inst = new A();
- setTag(a3Inst, 4);
- bInst.bar = a3Inst;
- String a3InstStr = "4@1000";
- v.add(a3InstStr, aClassStr); // A3 -->(class) --> A.class.
- v.add(bInstStr, a3InstStr); // B -->(field) --> A3.
-
- C cInst = new C();
- setTag(cInst, 5);
- bInst.bar2 = cInst;
- String cInstStr = "5@1000";
- String cClassStr = "1002@0";
- v.add(cInstStr, cClassStr); // C -->(class) --> C.class.
- v.add(bInstStr, cInstStr); // B -->(field) --> C.
-
- A a4Inst = new A();
- setTag(a4Inst, 6);
- cInst.baz = a4Inst;
- String a4InstStr = "6@1000";
- v.add(a4InstStr, aClassStr); // A4 -->(class) --> A.class.
- v.add(cInstStr, a4InstStr); // C -->(field) --> A4.
-
- cInst.baz2 = aInst;
- v.add(cInstStr, aInstStr); // C -->(field) --> A.
-
- return aInst;
- }
- }
-
- public static class A {
- public A foo;
- public A foo2;
-
- public A() {}
- public A(A a, A b) {
- foo = a;
- foo2 = b;
- }
- }
-
- public static class B extends A {
- public A bar;
- public A bar2;
-
- public B() {}
- public B(A a, A b) {
- bar = a;
- bar2 = b;
- }
- }
-
- public static interface I1 {
- public final static int i1Field = 1;
- }
-
- public static interface I2 extends I1 {
- public final static int i2Field = 2;
- }
-
- public static class C extends B implements I2 {
- public A baz;
- public A baz2;
-
- public C() {}
- public C(A a, A b) {
- baz = a;
- baz2 = b;
- }
- }
-
- private static interface Inf1 {
- public final static int A = 1;
- }
-
- private static interface Inf2 extends Inf1 {
- public final static int B = 1;
- }
-
- private static class IntObject implements Inf1 {
- byte b = (byte)1;
- char c= 'a';
- short s = (short)2;
- int i = 3;
- long l = 4;
- Object o = new Object();
- static int sI = 5;
- }
-
- private static class FloatObject extends IntObject implements Inf2 {
- float f = 1.23f;
- double d = 1.23;
- Object p = new Object();
- static int sI = 6;
- }
-
- public static class Verifier {
- // Should roots with vreg=-1 be printed?
- public final static boolean PRINT_ROOTS_WITH_UNKNOWN_VREG = false;
-
- public static class Node {
- public String referrer;
-
- public HashSet<String> referrees = new HashSet<>();
-
- public Node(String r) {
- referrer = r;
- }
-
- public boolean isRoot() {
- return referrer.startsWith("root@");
- }
- }
-
- HashMap<String, Node> nodes = new HashMap<>();
-
- public Verifier() {
- }
-
- public void add(String referrer, String referree) {
- if (!nodes.containsKey(referrer)) {
- nodes.put(referrer, new Node(referrer));
- }
- if (referree != null) {
- nodes.get(referrer).referrees.add(referree);
- }
- }
-
- public void process(String[] lines, String additionalEnabledReferrer, boolean filtered) {
- // This method isn't optimal. The loops could be merged. However, it's more readable if
- // the different parts are separated.
-
- ArrayList<String> rootLines = new ArrayList<>();
- ArrayList<String> nonRootLines = new ArrayList<>();
-
- // Check for consecutive chunks of referrers. Also ensure roots come first.
- {
- String currentHead = null;
- boolean rootsDone = false;
- HashSet<String> completedReferrers = new HashSet<>();
- for (String l : lines) {
- String referrer = getReferrer(l);
-
- if (isRoot(referrer)) {
- if (rootsDone) {
- System.out.println("ERROR: Late root " + l);
- print(lines);
- return;
- }
- rootLines.add(l);
- continue;
- }
-
- rootsDone = true;
-
- if (currentHead == null) {
- currentHead = referrer;
- } else {
- if (!currentHead.equals(referrer)) {
- completedReferrers.add(currentHead);
- currentHead = referrer;
- if (completedReferrers.contains(referrer)) {
- System.out.println("Non-contiguous referrer " + l);
- print(lines);
- return;
- }
- }
- }
- nonRootLines.add(l);
- }
- }
-
- // Sort (root order is not specified) and print the roots.
- // TODO: What about extra roots? JNI and the interpreter seem to introduce those (though it
- // isn't clear why a debuggable-AoT test doesn't have the same, at least for locals).
- // For now, swallow duplicates, and resolve once we have the metadata for the roots.
- {
- Collections.sort(rootLines);
- String lastRoot = null;
- for (String l : rootLines) {
- if (lastRoot != null && lastRoot.equals(l)) {
- continue;
- }
- lastRoot = l;
- if (!PRINT_ROOTS_WITH_UNKNOWN_VREG && l.indexOf("vreg=-1") > 0) {
- continue;
- }
- System.out.println(l);
- }
- }
-
- if (filtered) {
- // If we aren't tracking dependencies, just sort the lines and print.
- // TODO: As the verifier is currently using the output lines to track dependencies, we
- // cannot verify that output is correct when parts of it are suppressed by filters.
- // To correctly track this we need to take node information into account, and
- // actually analyze the graph.
- Collections.sort(nonRootLines);
- for (String l : nonRootLines) {
- System.out.println(l);
- }
-
- System.out.println("---");
- return;
- }
-
- // Iterate through the lines, keeping track of which referrers are visited, to ensure the
- // order is acceptable.
- HashSet<String> enabled = new HashSet<>();
- if (additionalEnabledReferrer != null) {
- enabled.add(additionalEnabledReferrer);
- }
- // Always add "0@0".
- enabled.add("0@0");
-
- for (String l : lines) {
- String referrer = getReferrer(l);
- String referree = getReferree(l);
- if (isRoot(referrer)) {
- // For a root src, just enable the referree.
- enabled.add(referree);
- } else {
- // Check that the referrer is enabled (may be visited).
- if (!enabled.contains(referrer)) {
- System.out.println("Referrer " + referrer + " not enabled: " + l);
- print(lines);
- return;
- }
- enabled.add(referree);
- }
- }
-
- // Now just sort the non-root lines and output them
- Collections.sort(nonRootLines);
- for (String l : nonRootLines) {
- System.out.println(l);
- }
-
- System.out.println("---");
- }
-
- public static boolean isRoot(String ref) {
- return ref.startsWith("root@");
- }
-
- private static String getReferrer(String line) {
- int i = line.indexOf(" --");
- if (i <= 0) {
- throw new IllegalArgumentException(line);
- }
- int j = line.indexOf(' ');
- if (i != j) {
- throw new IllegalArgumentException(line);
- }
- return line.substring(0, i);
- }
-
- private static String getReferree(String line) {
- int i = line.indexOf("--> ");
- if (i <= 0) {
- throw new IllegalArgumentException(line);
- }
- int j = line.indexOf(' ', i + 4);
- if (j < 0) {
- throw new IllegalArgumentException(line);
- }
- return line.substring(i + 4, j);
- }
-
- private static void print(String[] lines) {
- for (String l : lines) {
- System.out.println(l);
- }
- }
- }
-
- private static native void setupGcCallback();
- private static native void enableGcTracking(boolean enable);
- private static native int getGcStarts();
- private static native int getGcFinishes();
- private static native void forceGarbageCollection();
-
- public static native void setTag(Object o, long tag);
- public static native long getTag(Object o);
-
- public static native String[] followReferences(int heapFilter, Class<?> klassFilter,
- Object initialObject, int stopAfter, int followSet, Object jniRef);
- public static native String[] followReferencesString(Object initialObject);
- public static native String followReferencesPrimitiveArray(Object initialObject);
- public static native String followReferencesPrimitiveFields(Object initialObject);
}
diff --git a/test/913-heaps/src/art/Main.java b/test/913-heaps/src/art/Main.java
new file mode 100644
index 0000000..aa5498b
--- /dev/null
+++ b/test/913-heaps/src/art/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+
+ // Common infrastructure.
+ public static native void setTag(Object o, long tag);
+ public static native long getTag(Object o);
+}
diff --git a/test/913-heaps/src/art/Test913.java b/test/913-heaps/src/art/Test913.java
new file mode 100644
index 0000000..c54ecb0
--- /dev/null
+++ b/test/913-heaps/src/art/Test913.java
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+
+public class Test913 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test913.class);
+
+ doTest();
+ new TestConfig().doFollowReferencesTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ new TestConfig(null, 0, 1, -1).doFollowReferencesTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ new TestConfig(null, 0, Integer.MAX_VALUE, 1).doFollowReferencesTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doStringTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doPrimitiveArrayTest();
+ doPrimitiveFieldTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ // Test klass filter.
+ System.out.println("--- klass ---");
+ new TestConfig(A.class, 0).doFollowReferencesTest();
+
+ // Test heap filter.
+ System.out.println("--- heap_filter ---");
+ System.out.println("---- tagged objects");
+ new TestConfig(null, 0x4).doFollowReferencesTest();
+ System.out.println("---- untagged objects");
+ new TestConfig(null, 0x8).doFollowReferencesTest();
+ System.out.println("---- tagged classes");
+ new TestConfig(null, 0x10).doFollowReferencesTest();
+ System.out.println("---- untagged classes");
+ new TestConfig(null, 0x20).doFollowReferencesTest();
+ }
+
+ public static void doTest() throws Exception {
+ setupGcCallback();
+
+ enableGcTracking(true);
+ runGc();
+ enableGcTracking(false);
+ }
+
+ public static void doStringTest() throws Exception {
+ final String str = new String("HelloWorld");
+ final String str2 = new String("");
+ Object o = new Object() {
+ String s = str;
+ String s2 = str2;
+ };
+
+ setTag(str, 1);
+ setTag(str2, 2);
+ System.out.println(Arrays.toString(followReferencesString(o)));
+ System.out.println(getTag(str));
+ System.out.println(getTag(str2));
+ }
+
+ public static void doPrimitiveArrayTest() throws Exception {
+ final boolean[] zArray = new boolean[] { false, true };
+ setTag(zArray, 1);
+
+ final byte[] bArray = new byte[] { 1, 2, 3 };
+ setTag(bArray, 2);
+
+ final char[] cArray = new char[] { 'A', 'Z' };
+ setTag(cArray, 3);
+
+ final short[] sArray = new short[] { 1, 2, 3 };
+ setTag(sArray, 4);
+
+ final int[] iArray = new int[] { 1, 2, 3 };
+ setTag(iArray, 5);
+
+ final float[] fArray = new float[] { 0.0f, 1.0f };
+ setTag(fArray, 6);
+
+ final long[] lArray = new long[] { 1, 2, 3 };
+ setTag(lArray, 7);
+
+ final double[] dArray = new double[] { 0.0, 1.0 };
+ setTag(dArray, 8);
+
+ Object o = new Object() {
+ Object z = zArray;
+ Object b = bArray;
+ Object c = cArray;
+ Object s = sArray;
+ Object i = iArray;
+ Object f = fArray;
+ Object l = lArray;
+ Object d = dArray;
+ };
+
+ System.out.println(followReferencesPrimitiveArray(o));
+ System.out.print(getTag(zArray));
+ System.out.print(getTag(bArray));
+ System.out.print(getTag(cArray));
+ System.out.print(getTag(sArray));
+ System.out.print(getTag(iArray));
+ System.out.print(getTag(fArray));
+ System.out.print(getTag(lArray));
+ System.out.println(getTag(dArray));
+ }
+
+ public static void doPrimitiveFieldTest() throws Exception {
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsClasses();
+
+ doTestPrimitiveFieldsIntegral();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsFloat();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private static void doTestPrimitiveFieldsClasses() {
+ setTag(IntObject.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(IntObject.class));
+ System.out.println(getTag(IntObject.class));
+ setTag(IntObject.class, 0);
+
+ setTag(FloatObject.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(FloatObject.class));
+ System.out.println(getTag(FloatObject.class));
+ setTag(FloatObject.class, 0);
+
+ setTag(Inf1.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(Inf1.class));
+ System.out.println(getTag(Inf1.class));
+ setTag(Inf1.class, 0);
+
+ setTag(Inf2.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(Inf2.class));
+ System.out.println(getTag(Inf2.class));
+ setTag(Inf2.class, 0);
+ }
+
+ private static void doTestPrimitiveFieldsIntegral() {
+ IntObject intObject = new IntObject();
+ setTag(intObject, 10000);
+ System.out.println(followReferencesPrimitiveFields(intObject));
+ System.out.println(getTag(intObject));
+ }
+
+ private static void doTestPrimitiveFieldsFloat() {
+ FloatObject floatObject = new FloatObject();
+ setTag(floatObject, 10000);
+ System.out.println(followReferencesPrimitiveFields(floatObject));
+ System.out.println(getTag(floatObject));
+ }
+
+ private static void runGc() {
+ clearStats();
+ forceGarbageCollection();
+ printStats();
+ }
+
+ private static void clearStats() {
+ getGcStarts();
+ getGcFinishes();
+ }
+
+ private static void printStats() {
+ System.out.println("---");
+ int s = getGcStarts();
+ int f = getGcFinishes();
+ System.out.println((s > 0) + " " + (f > 0));
+ }
+
+ private static class TestConfig {
+ private Class<?> klass = null;
+ private int heapFilter = 0;
+ private int stopAfter = Integer.MAX_VALUE;
+ private int followSet = -1;
+
+ public TestConfig() {
+ }
+ public TestConfig(Class<?> klass, int heapFilter) {
+ this.klass = klass;
+ this.heapFilter = heapFilter;
+ }
+ public TestConfig(Class<?> klass, int heapFilter, int stopAfter, int followSet) {
+ this.klass = klass;
+ this.heapFilter = heapFilter;
+ this.stopAfter = stopAfter;
+ this.followSet = followSet;
+ }
+
+ public void doFollowReferencesTest() throws Exception {
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ setTag(Thread.currentThread(), 3000);
+
+ {
+ ArrayList<Object> tmpStorage = new ArrayList<>();
+ doFollowReferencesTestNonRoot(tmpStorage);
+ tmpStorage = null;
+ }
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doFollowReferencesTestRoot();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private void doFollowReferencesTestNonRoot(ArrayList<Object> tmpStorage) {
+ Verifier v = new Verifier();
+ tagClasses(v);
+ A a = createTree(v);
+ tmpStorage.add(a);
+ v.add("0@0", "1@1000"); // tmpStorage[0] --(array-element)--> a.
+
+ doFollowReferencesTestImpl(null, stopAfter, followSet, null, v, null);
+ doFollowReferencesTestImpl(a.foo2, stopAfter, followSet, null, v, "3@1001");
+
+ tmpStorage.clear();
+ }
+
+ private void doFollowReferencesTestRoot() {
+ Verifier v = new Verifier();
+ tagClasses(v);
+ A a = createTree(v);
+
+ doFollowReferencesTestImpl(null, stopAfter, followSet, a, v, null);
+ doFollowReferencesTestImpl(a.foo2, stopAfter, followSet, a, v, "3@1001");
+ }
+
+ private void doFollowReferencesTestImpl(A root, int stopAfter, int followSet,
+ Object asRoot, Verifier v, String additionalEnabled) {
+ String[] lines =
+ followReferences(heapFilter, klass, root, stopAfter, followSet, asRoot);
+
+ v.process(lines, additionalEnabled, heapFilter != 0 || klass != null);
+ }
+
+ private static void tagClasses(Verifier v) {
+ setTag(A.class, 1000);
+
+ setTag(B.class, 1001);
+ v.add("1001@0", "1000@0"); // B.class --(superclass)--> A.class.
+
+ setTag(C.class, 1002);
+ v.add("1002@0", "1001@0"); // C.class --(superclass)--> B.class.
+ v.add("1002@0", "2001@0"); // C.class --(interface)--> I2.class.
+
+ setTag(I1.class, 2000);
+
+ setTag(I2.class, 2001);
+ v.add("2001@0", "2000@0"); // I2.class --(interface)--> I1.class.
+ }
+
+ private static A createTree(Verifier v) {
+ A aInst = new A();
+ setTag(aInst, 1);
+ String aInstStr = "1@1000";
+ String aClassStr = "1000@0";
+ v.add(aInstStr, aClassStr); // A -->(class) --> A.class.
+
+ A a2Inst = new A();
+ setTag(a2Inst, 2);
+ aInst.foo = a2Inst;
+ String a2InstStr = "2@1000";
+ v.add(a2InstStr, aClassStr); // A2 -->(class) --> A.class.
+ v.add(aInstStr, a2InstStr); // A -->(field) --> A2.
+
+ B bInst = new B();
+ setTag(bInst, 3);
+ aInst.foo2 = bInst;
+ String bInstStr = "3@1001";
+ String bClassStr = "1001@0";
+ v.add(bInstStr, bClassStr); // B -->(class) --> B.class.
+ v.add(aInstStr, bInstStr); // A -->(field) --> B.
+
+ A a3Inst = new A();
+ setTag(a3Inst, 4);
+ bInst.bar = a3Inst;
+ String a3InstStr = "4@1000";
+ v.add(a3InstStr, aClassStr); // A3 -->(class) --> A.class.
+ v.add(bInstStr, a3InstStr); // B -->(field) --> A3.
+
+ C cInst = new C();
+ setTag(cInst, 5);
+ bInst.bar2 = cInst;
+ String cInstStr = "5@1000";
+ String cClassStr = "1002@0";
+ v.add(cInstStr, cClassStr); // C -->(class) --> C.class.
+ v.add(bInstStr, cInstStr); // B -->(field) --> C.
+
+ A a4Inst = new A();
+ setTag(a4Inst, 6);
+ cInst.baz = a4Inst;
+ String a4InstStr = "6@1000";
+ v.add(a4InstStr, aClassStr); // A4 -->(class) --> A.class.
+ v.add(cInstStr, a4InstStr); // C -->(field) --> A4.
+
+ cInst.baz2 = aInst;
+ v.add(cInstStr, aInstStr); // C -->(field) --> A.
+
+ return aInst;
+ }
+ }
+
+ public static class A {
+ public A foo;
+ public A foo2;
+
+ public A() {}
+ public A(A a, A b) {
+ foo = a;
+ foo2 = b;
+ }
+ }
+
+ public static class B extends A {
+ public A bar;
+ public A bar2;
+
+ public B() {}
+ public B(A a, A b) {
+ bar = a;
+ bar2 = b;
+ }
+ }
+
+ public static interface I1 {
+ public final static int i1Field = 1;
+ }
+
+ public static interface I2 extends I1 {
+ public final static int i2Field = 2;
+ }
+
+ public static class C extends B implements I2 {
+ public A baz;
+ public A baz2;
+
+ public C() {}
+ public C(A a, A b) {
+ baz = a;
+ baz2 = b;
+ }
+ }
+
+ private static interface Inf1 {
+ public final static int A = 1;
+ }
+
+ private static interface Inf2 extends Inf1 {
+ public final static int B = 1;
+ }
+
+ private static class IntObject implements Inf1 {
+ byte b = (byte)1;
+ char c= 'a';
+ short s = (short)2;
+ int i = 3;
+ long l = 4;
+ Object o = new Object();
+ static int sI = 5;
+ }
+
+ private static class FloatObject extends IntObject implements Inf2 {
+ float f = 1.23f;
+ double d = 1.23;
+ Object p = new Object();
+ static int sI = 6;
+ }
+
+ public static class Verifier {
+ // Should roots with vreg=-1 be printed?
+ public final static boolean PRINT_ROOTS_WITH_UNKNOWN_VREG = false;
+
+ public static class Node {
+ public String referrer;
+
+ public HashSet<String> referrees = new HashSet<>();
+
+ public Node(String r) {
+ referrer = r;
+ }
+
+ public boolean isRoot() {
+ return referrer.startsWith("root@");
+ }
+ }
+
+ HashMap<String, Node> nodes = new HashMap<>();
+
+ public Verifier() {
+ }
+
+ public void add(String referrer, String referree) {
+ if (!nodes.containsKey(referrer)) {
+ nodes.put(referrer, new Node(referrer));
+ }
+ if (referree != null) {
+ nodes.get(referrer).referrees.add(referree);
+ }
+ }
+
+ public void process(String[] lines, String additionalEnabledReferrer, boolean filtered) {
+ // This method isn't optimal. The loops could be merged. However, it's more readable if
+ // the different parts are separated.
+
+ ArrayList<String> rootLines = new ArrayList<>();
+ ArrayList<String> nonRootLines = new ArrayList<>();
+
+ // Check for consecutive chunks of referrers. Also ensure roots come first.
+ {
+ String currentHead = null;
+ boolean rootsDone = false;
+ HashSet<String> completedReferrers = new HashSet<>();
+ for (String l : lines) {
+ String referrer = getReferrer(l);
+
+ if (isRoot(referrer)) {
+ if (rootsDone) {
+ System.out.println("ERROR: Late root " + l);
+ print(lines);
+ return;
+ }
+ rootLines.add(l);
+ continue;
+ }
+
+ rootsDone = true;
+
+ if (currentHead == null) {
+ currentHead = referrer;
+ } else {
+ if (!currentHead.equals(referrer)) {
+ completedReferrers.add(currentHead);
+ currentHead = referrer;
+ if (completedReferrers.contains(referrer)) {
+ System.out.println("Non-contiguous referrer " + l);
+ print(lines);
+ return;
+ }
+ }
+ }
+ nonRootLines.add(l);
+ }
+ }
+
+ // Sort (root order is not specified) and print the roots.
+ // TODO: What about extra roots? JNI and the interpreter seem to introduce those (though it
+ // isn't clear why a debuggable-AoT test doesn't have the same, at least for locals).
+ // For now, swallow duplicates, and resolve once we have the metadata for the roots.
+ {
+ Collections.sort(rootLines);
+ String lastRoot = null;
+ for (String l : rootLines) {
+ if (lastRoot != null && lastRoot.equals(l)) {
+ continue;
+ }
+ lastRoot = l;
+ if (!PRINT_ROOTS_WITH_UNKNOWN_VREG && l.indexOf("vreg=-1") > 0) {
+ continue;
+ }
+ System.out.println(l);
+ }
+ }
+
+ if (filtered) {
+ // If we aren't tracking dependencies, just sort the lines and print.
+ // TODO: As the verifier is currently using the output lines to track dependencies, we
+ // cannot verify that output is correct when parts of it are suppressed by filters.
+ // To correctly track this we need to take node information into account, and
+ // actually analyze the graph.
+ Collections.sort(nonRootLines);
+ for (String l : nonRootLines) {
+ System.out.println(l);
+ }
+
+ System.out.println("---");
+ return;
+ }
+
+ // Iterate through the lines, keeping track of which referrers are visited, to ensure the
+ // order is acceptable.
+ HashSet<String> enabled = new HashSet<>();
+ if (additionalEnabledReferrer != null) {
+ enabled.add(additionalEnabledReferrer);
+ }
+ // Always add "0@0".
+ enabled.add("0@0");
+
+ for (String l : lines) {
+ String referrer = getReferrer(l);
+ String referree = getReferree(l);
+ if (isRoot(referrer)) {
+ // For a root src, just enable the referree.
+ enabled.add(referree);
+ } else {
+ // Check that the referrer is enabled (may be visited).
+ if (!enabled.contains(referrer)) {
+ System.out.println("Referrer " + referrer + " not enabled: " + l);
+ print(lines);
+ return;
+ }
+ enabled.add(referree);
+ }
+ }
+
+ // Now just sort the non-root lines and output them
+ Collections.sort(nonRootLines);
+ for (String l : nonRootLines) {
+ System.out.println(l);
+ }
+
+ System.out.println("---");
+ }
+
+ public static boolean isRoot(String ref) {
+ return ref.startsWith("root@");
+ }
+
+ private static String getReferrer(String line) {
+ int i = line.indexOf(" --");
+ if (i <= 0) {
+ throw new IllegalArgumentException(line);
+ }
+ int j = line.indexOf(' ');
+ if (i != j) {
+ throw new IllegalArgumentException(line);
+ }
+ return line.substring(0, i);
+ }
+
+ private static String getReferree(String line) {
+ int i = line.indexOf("--> ");
+ if (i <= 0) {
+ throw new IllegalArgumentException(line);
+ }
+ int j = line.indexOf(' ', i + 4);
+ if (j < 0) {
+ throw new IllegalArgumentException(line);
+ }
+ return line.substring(i + 4, j);
+ }
+
+ private static void print(String[] lines) {
+ for (String l : lines) {
+ System.out.println(l);
+ }
+ }
+ }
+
+ private static void setTag(Object o, long tag) {
+ Main.setTag(o, tag);
+ }
+ private static long getTag(Object o) {
+ return Main.getTag(o);
+ }
+
+ private static native void setupGcCallback();
+ private static native void enableGcTracking(boolean enable);
+ private static native int getGcStarts();
+ private static native int getGcFinishes();
+ private static native void forceGarbageCollection();
+
+ public static native String[] followReferences(int heapFilter, Class<?> klassFilter,
+ Object initialObject, int stopAfter, int followSet, Object jniRef);
+ public static native String[] followReferencesString(Object initialObject);
+ public static native String followReferencesPrimitiveArray(Object initialObject);
+ public static native String followReferencesPrimitiveFields(Object initialObject);
+}
diff --git a/test/914-hello-obsolescence/src/Main.java b/test/914-hello-obsolescence/src/Main.java
index 8a14716..2ec7664 100644
--- a/test/914-hello-obsolescence/src/Main.java
+++ b/test/914-hello-obsolescence/src/Main.java
@@ -53,6 +53,7 @@
"AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/914-hello-obsolescence/src/art/Main.java b/test/914-hello-obsolescence/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/914-hello-obsolescence/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/915-obsolete-2/src/Main.java b/test/915-obsolete-2/src/Main.java
index 0e3145c..fc73ee8 100644
--- a/test/915-obsolete-2/src/Main.java
+++ b/test/915-obsolete-2/src/Main.java
@@ -79,6 +79,7 @@
"IAAAFwAAAD4CAAADIAAABAAAAAgEAAAAIAAAAQAAACYEAAAAEAAAAQAAADwEAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/915-obsolete-2/src/art/Main.java b/test/915-obsolete-2/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/915-obsolete-2/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
index 2b3296f..3453261 100644
--- a/test/916-obsolete-jit/src/Main.java
+++ b/test/916-obsolete-jit/src/Main.java
@@ -113,6 +113,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform(), new TestWatcher());
}
diff --git a/test/916-obsolete-jit/src/art/Main.java b/test/916-obsolete-jit/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/916-obsolete-jit/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/917-fields-transformation/src/Main.java b/test/917-fields-transformation/src/Main.java
index 632a5c8..588af49 100644
--- a/test/917-fields-transformation/src/Main.java
+++ b/test/917-fields-transformation/src/Main.java
@@ -55,6 +55,7 @@
"AAIgAAAMAAAAXAEAAAMgAAACAAAA4QEAAAAgAAABAAAA8AEAAAAQAAABAAAABAIAAA==");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform("Hello", "Goodbye"),
new Transform("start", "end"));
}
diff --git a/test/917-fields-transformation/src/art/Main.java b/test/917-fields-transformation/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/917-fields-transformation/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/918-fields/expected.txt b/test/918-fields/expected.txt
index 1a1209c..af78615 100644
--- a/test/918-fields/expected.txt
+++ b/test/918-fields/expected.txt
@@ -6,15 +6,15 @@
class java.lang.Integer
18
false
-[this$0, LMain;, null]
-class Main$Foo
+[this$0, Lart/Test918;, null]
+class art.Test918$Foo
4112
true
[VAL, I, null]
-interface Main$Bar
+interface art.Test918$Bar
25
false
[generics, Ljava/lang/Object;, TT;]
-class Main$Generics
+class art.Test918$Generics
0
false
diff --git a/test/918-fields/fields.cc b/test/918-fields/fields.cc
index c659126..e63acef 100644
--- a/test/918-fields/fields.cc
+++ b/test/918-fields/fields.cc
@@ -16,18 +16,19 @@
#include <stdio.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "test_env.h"
namespace art {
namespace Test918Fields {
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getFieldName(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test918_getFieldName(
JNIEnv* env, jclass klass, jobject field) {
jfieldID id = env->FromReflectedField(field);
@@ -79,7 +80,7 @@
return ret;
}
-extern "C" JNIEXPORT jclass JNICALL Java_Main_getFieldDeclaringClass(
+extern "C" JNIEXPORT jclass JNICALL Java_art_Test918_getFieldDeclaringClass(
JNIEnv* env, jclass klass, jobject field) {
jfieldID id = env->FromReflectedField(field);
@@ -96,7 +97,7 @@
return declaring_class;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getFieldModifiers(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test918_getFieldModifiers(
JNIEnv* env, jclass klass, jobject field) {
jfieldID id = env->FromReflectedField(field);
@@ -113,7 +114,7 @@
return modifiers;
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isFieldSynthetic(
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test918_isFieldSynthetic(
JNIEnv* env, jclass klass, jobject field) {
jfieldID id = env->FromReflectedField(field);
diff --git a/test/918-fields/src/Main.java b/test/918-fields/src/Main.java
index ad0d0c5..e0ed65c 100644
--- a/test/918-fields/src/Main.java
+++ b/test/918-fields/src/Main.java
@@ -14,60 +14,8 @@
* limitations under the License.
*/
-import java.lang.reflect.Field;
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
- }
-
- public static void doTest() throws Exception {
- testField(Math.class, "PI");
- testField(Integer.class, "value");
- testField(Foo.class, "this$0");
- testField(Bar.class, "VAL");
- testField(Generics.class, "generics");
- }
-
- private static void testField(Class<?> base, String fieldName)
- throws Exception {
- Field f = base.getDeclaredField(fieldName);
- String[] result = getFieldName(f);
- System.out.println(Arrays.toString(result));
-
- Class<?> declClass = getFieldDeclaringClass(f);
- if (base != declClass) {
- throw new RuntimeException("Declaring class not equal: " + base + " vs " + declClass);
- }
- System.out.println(declClass);
-
- int modifiers = getFieldModifiers(f);
- if (modifiers != f.getModifiers()) {
- throw new RuntimeException("Modifiers not equal: " + f.getModifiers() + " vs " + modifiers);
- }
- System.out.println(modifiers);
-
- boolean synth = isFieldSynthetic(f);
- if (synth != f.isSynthetic()) {
- throw new RuntimeException("Synthetic not equal: " + f.isSynthetic() + " vs " + synth);
- }
- System.out.println(synth);
- }
-
- private static native String[] getFieldName(Field f);
- private static native Class<?> getFieldDeclaringClass(Field f);
- private static native int getFieldModifiers(Field f);
- private static native boolean isFieldSynthetic(Field f);
-
- private class Foo {
- }
-
- private static interface Bar {
- public static int VAL = 1;
- }
-
- private static class Generics<T> {
- T generics;
+ art.Test918.run();
}
}
diff --git a/test/918-fields/src/art/Main.java b/test/918-fields/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/918-fields/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/918-fields/src/art/Test918.java b/test/918-fields/src/art/Test918.java
new file mode 100644
index 0000000..89d518c
--- /dev/null
+++ b/test/918-fields/src/art/Test918.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.util.Arrays;
+
+public class Test918 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test918.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ testField(Math.class, "PI");
+ testField(Integer.class, "value");
+ testField(Foo.class, "this$0");
+ testField(Bar.class, "VAL");
+ testField(Generics.class, "generics");
+ }
+
+ private static void testField(Class<?> base, String fieldName)
+ throws Exception {
+ Field f = base.getDeclaredField(fieldName);
+ String[] result = getFieldName(f);
+ System.out.println(Arrays.toString(result));
+
+ Class<?> declClass = getFieldDeclaringClass(f);
+ if (base != declClass) {
+ throw new RuntimeException("Declaring class not equal: " + base + " vs " + declClass);
+ }
+ System.out.println(declClass);
+
+ int modifiers = getFieldModifiers(f);
+ if (modifiers != f.getModifiers()) {
+ throw new RuntimeException("Modifiers not equal: " + f.getModifiers() + " vs " + modifiers);
+ }
+ System.out.println(modifiers);
+
+ boolean synth = isFieldSynthetic(f);
+ if (synth != f.isSynthetic()) {
+ throw new RuntimeException("Synthetic not equal: " + f.isSynthetic() + " vs " + synth);
+ }
+ System.out.println(synth);
+ }
+
+ private static native String[] getFieldName(Field f);
+ private static native Class<?> getFieldDeclaringClass(Field f);
+ private static native int getFieldModifiers(Field f);
+ private static native boolean isFieldSynthetic(Field f);
+
+ private class Foo {
+ }
+
+ private static interface Bar {
+ public static int VAL = 1;
+ }
+
+ private static class Generics<T> {
+ T generics;
+ }
+}
diff --git a/test/919-obsolete-fields/src/Main.java b/test/919-obsolete-fields/src/Main.java
index ffb9897..34ee2a9 100644
--- a/test/919-obsolete-fields/src/Main.java
+++ b/test/919-obsolete-fields/src/Main.java
@@ -116,6 +116,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
TestWatcher w = new TestWatcher();
doTest(new Transform(w), w);
}
diff --git a/test/919-obsolete-fields/src/art/Main.java b/test/919-obsolete-fields/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/919-obsolete-fields/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/920-objects/objects.cc b/test/920-objects/objects.cc
index ad1431e..101ebb9 100644
--- a/test/920-objects/objects.cc
+++ b/test/920-objects/objects.cc
@@ -16,18 +16,18 @@
#include <stdio.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "test_env.h"
namespace art {
namespace Test920Objects {
-extern "C" JNIEXPORT jlong JNICALL Java_Main_getObjectSize(
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test920_getObjectSize(
JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jobject object) {
jlong size;
@@ -43,7 +43,7 @@
return size;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getObjectHashCode(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test920_getObjectHashCode(
JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jobject object) {
jint hash;
diff --git a/test/920-objects/src/Main.java b/test/920-objects/src/Main.java
index 5dbe1a7..44426f6 100644
--- a/test/920-objects/src/Main.java
+++ b/test/920-objects/src/Main.java
@@ -14,85 +14,8 @@
* limitations under the License.
*/
-import java.lang.reflect.Field;
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
- doTest();
- }
-
- public static void doTest() throws Exception {
- testObjectSize(new Object());
- testObjectSize(new Object());
-
- testObjectSize(new int[0]);
- testObjectSize(new int[1]);
- testObjectSize(new int[2]);
-
- testObjectSize(new double[0]);
- testObjectSize(new double[1]);
- testObjectSize(new double[2]);
-
- testObjectSize(new String("abc"));
- testObjectSize(new String("wxyz"));
-
- testObjectHash();
- }
-
- private static void testObjectSize(Object o) {
- System.out.println(o.getClass() + " " + getObjectSize(o));
- }
-
- private static void testObjectHash() {
- Object[] objects = new Object[] {
- new Object(),
- new Object(),
-
- new MyHash(1),
- new MyHash(1),
- new MyHash(2)
- };
-
- int hashes[] = new int[objects.length];
-
- for (int i = 0; i < objects.length; i++) {
- hashes[i] = getObjectHashCode(objects[i]);
- }
-
- // Implementation detail: we use the identity hashcode, for simplicity.
- for (int i = 0; i < objects.length; i++) {
- int ihash = System.identityHashCode(objects[i]);
- if (hashes[i] != ihash) {
- throw new RuntimeException(objects[i] + ": " + hashes[i] + " vs " + ihash);
- }
- }
-
- Runtime.getRuntime().gc();
- Runtime.getRuntime().gc();
-
- for (int i = 0; i < objects.length; i++) {
- int newhash = getObjectHashCode(objects[i]);
- if (hashes[i] != newhash) {
- throw new RuntimeException(objects[i] + ": " + hashes[i] + " vs " + newhash);
- }
- }
- }
-
- private static native long getObjectSize(Object o);
- private static native int getObjectHashCode(Object o);
-
- private static class MyHash {
- private int hash;
-
- public MyHash(int h) {
- hash = h;
- }
-
- public int hashCode() {
- return hash;
- }
+ art.Test920.run();
}
}
diff --git a/test/920-objects/src/art/Main.java b/test/920-objects/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/920-objects/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/920-objects/src/art/Test920.java b/test/920-objects/src/art/Test920.java
new file mode 100644
index 0000000..708e417
--- /dev/null
+++ b/test/920-objects/src/art/Test920.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.util.Arrays;
+
+public class Test920 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test920.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ testObjectSize(new Object());
+ testObjectSize(new Object());
+
+ testObjectSize(new int[0]);
+ testObjectSize(new int[1]);
+ testObjectSize(new int[2]);
+
+ testObjectSize(new double[0]);
+ testObjectSize(new double[1]);
+ testObjectSize(new double[2]);
+
+ testObjectSize(new String("abc"));
+ testObjectSize(new String("wxyz"));
+
+ testObjectHash();
+ }
+
+ private static void testObjectSize(Object o) {
+ System.out.println(o.getClass() + " " + getObjectSize(o));
+ }
+
+ private static void testObjectHash() {
+ Object[] objects = new Object[] {
+ new Object(),
+ new Object(),
+
+ new MyHash(1),
+ new MyHash(1),
+ new MyHash(2)
+ };
+
+ int hashes[] = new int[objects.length];
+
+ for (int i = 0; i < objects.length; i++) {
+ hashes[i] = getObjectHashCode(objects[i]);
+ }
+
+ // Implementation detail: we use the identity hashcode, for simplicity.
+ for (int i = 0; i < objects.length; i++) {
+ int ihash = System.identityHashCode(objects[i]);
+ if (hashes[i] != ihash) {
+ throw new RuntimeException(objects[i] + ": " + hashes[i] + " vs " + ihash);
+ }
+ }
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ for (int i = 0; i < objects.length; i++) {
+ int newhash = getObjectHashCode(objects[i]);
+ if (hashes[i] != newhash) {
+ throw new RuntimeException(objects[i] + ": " + hashes[i] + " vs " + newhash);
+ }
+ }
+ }
+
+ private static native long getObjectSize(Object o);
+ private static native int getObjectHashCode(Object o);
+
+ private static class MyHash {
+ private int hash;
+
+ public MyHash(int h) {
+ hash = h;
+ }
+
+ public int hashCode() {
+ return hash;
+ }
+ }
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 6779ed8..d9a4948 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -18,6 +18,8 @@
public class Main {
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
+
Verification.doTest(new Transform());
NewName.doTest(new Transform());
DifferentAccess.doTest(new Transform());
diff --git a/test/921-hello-failure/src/art/Main.java b/test/921-hello-failure/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/921-hello-failure/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/922-properties/properties.cc b/test/922-properties/properties.cc
index 3fd274e..6af45f5 100644
--- a/test/922-properties/properties.cc
+++ b/test/922-properties/properties.cc
@@ -16,23 +16,25 @@
#include <stdio.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedUtfChars.h"
+#include "scoped_utf_chars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test922Properties {
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getSystemProperties(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test922_getSystemProperties(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jint count;
char** properties;
jvmtiError result = jvmti_env->GetSystemProperties(&count, &properties);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -52,7 +54,7 @@
return ret;
}
-extern "C" JNIEXPORT jstring JNICALL Java_Main_getSystemProperty(
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test922_getSystemProperty(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jstring key) {
ScopedUtfChars string(env, key);
if (string.c_str() == nullptr) {
@@ -61,7 +63,7 @@
char* value = nullptr;
jvmtiError result = jvmti_env->GetSystemProperty(string.c_str(), &value);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -72,7 +74,7 @@
return ret;
}
-extern "C" JNIEXPORT void JNICALL Java_Main_setSystemProperty(
+extern "C" JNIEXPORT void JNICALL Java_art_Test922_setSystemProperty(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jstring key, jstring value) {
ScopedUtfChars key_string(env, key);
if (key_string.c_str() == nullptr) {
@@ -84,7 +86,7 @@
}
jvmtiError result = jvmti_env->SetSystemProperty(key_string.c_str(), value_string.c_str());
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return;
}
}
diff --git a/test/922-properties/src/Main.java b/test/922-properties/src/Main.java
index 8ad742f..2d37998 100644
--- a/test/922-properties/src/Main.java
+++ b/test/922-properties/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,140 +14,8 @@
* limitations under the License.
*/
-import java.util.Set;
-import java.util.TreeSet;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test922.run();
}
-
- public static void doTest() throws Exception {
- Set<String> recommendedProperties = getRecommendedProperties();
-
- System.out.println("Recommended properties:");
- for (String key : recommendedProperties) {
- checkProperty(key);
- }
-
- Set<String> allProperties = getAllProperties();
-
- Set<String> retained = new TreeSet<String>(recommendedProperties);
- retained.retainAll(allProperties);
- if (!retained.equals(recommendedProperties)) {
- Set<String> missing = new TreeSet<String>(recommendedProperties);
- missing.removeAll(retained);
- System.out.println("Missing recommended properties: " + missing);
- }
-
- Set<String> nonRecommended = new TreeSet<String>(allProperties);
- nonRecommended.removeAll(recommendedProperties);
-
- System.out.println("Other properties:");
- for (String key : nonRecommended) {
- checkProperty(key);
- }
-
- System.out.println("Non-specified property:");
- String key = generate(allProperties);
- checkProperty(key);
-
- System.out.println("Non-specified property (2):");
- String key2 = generateUnique(allProperties);
- checkProperty(key2);
- }
-
- private static Set<String> getRecommendedProperties() {
- Set<String> keys = new TreeSet<String>();
- keys.add("java.vm.vendor");
- keys.add("java.vm.version");
- keys.add("java.vm.name");
- keys.add("java.vm.info");
- keys.add("java.library.path");
- keys.add("java.class.path");
- return keys;
- }
-
- private static Set<String> getAllProperties() {
- Set<String> keys = new TreeSet<String>();
- String[] props = getSystemProperties();
- for (String p : props) {
- keys.add(p);
- }
- return keys;
- }
-
- private static boolean equals(String s1, String s2) {
- if (s1 == null && s2 == null) {
- return true;
- } else if (s1 != null) {
- return s1.equals(s2);
- } else {
- return false;
- }
- }
-
- private static void checkProperty(String key) {
- System.out.print(" \"" + key + "\": ");
- String err = null;
- String value = null;
- try {
- value = getSystemProperty(key);
- } catch (RuntimeException e) {
- err = e.getMessage();
- }
- String sysValue = System.getProperty(key);
- if (equals(value, sysValue)) {
- System.out.print("OK");
- if (err != null) {
- System.out.println(" !!!" + err);
- } else {
- System.out.println();
- }
- } else {
- System.out.println("ERROR !!!" + err);
- }
-
- System.out.print(" Setting value to \"abc\": ");
- try {
- setSystemProperty(key, "abc");
- System.out.println("SUCCEEDED");
- } catch (RuntimeException e) {
- System.out.println("!!!" + e.getMessage());
- }
- }
-
- private static String generateUnique(Set<String> others) {
- // Construct something. To be deterministic, just use "a+".
- StringBuilder sb = new StringBuilder("a");
- for (;;) {
- String key = sb.toString();
- if (!others.contains(key)) {
- return key;
- }
- sb.append('a');
- }
- }
-
- private static String generate(Set<String> others) {
- // First check for something in the overall System properties.
- TreeSet<String> sysProps = new TreeSet<String>(System.getProperties().stringPropertyNames());
- sysProps.removeAll(others);
- if (!sysProps.isEmpty()) {
- // Find something that starts with "java" or "os," trying to be platform-independent.
- for (String s: sysProps) {
- if (s.startsWith("java.") || s.startsWith("os.")) {
- return s;
- }
- }
- // Just return the first thing.
- return sysProps.iterator().next();
- }
-
- return generateUnique(others);
- }
-
- private static native String[] getSystemProperties();
- private static native String getSystemProperty(String key);
- private static native void setSystemProperty(String key, String value);
}
diff --git a/test/922-properties/src/art/Main.java b/test/922-properties/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/922-properties/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/922-properties/src/art/Test922.java b/test/922-properties/src/art/Test922.java
new file mode 100644
index 0000000..4b2204a
--- /dev/null
+++ b/test/922-properties/src/art/Test922.java
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Set;
+import java.util.TreeSet;
+
+public class Test922 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test922.class);
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ Set<String> recommendedProperties = getRecommendedProperties();
+
+ System.out.println("Recommended properties:");
+ for (String key : recommendedProperties) {
+ checkProperty(key);
+ }
+
+ Set<String> allProperties = getAllProperties();
+
+ Set<String> retained = new TreeSet<String>(recommendedProperties);
+ retained.retainAll(allProperties);
+ if (!retained.equals(recommendedProperties)) {
+ Set<String> missing = new TreeSet<String>(recommendedProperties);
+ missing.removeAll(retained);
+ System.out.println("Missing recommended properties: " + missing);
+ }
+
+ Set<String> nonRecommended = new TreeSet<String>(allProperties);
+ nonRecommended.removeAll(recommendedProperties);
+
+ System.out.println("Other properties:");
+ for (String key : nonRecommended) {
+ checkProperty(key);
+ }
+
+ System.out.println("Non-specified property:");
+ String key = generate(allProperties);
+ checkProperty(key);
+
+ System.out.println("Non-specified property (2):");
+ String key2 = generateUnique(allProperties);
+ checkProperty(key2);
+ }
+
+ private static Set<String> getRecommendedProperties() {
+ Set<String> keys = new TreeSet<String>();
+ keys.add("java.vm.vendor");
+ keys.add("java.vm.version");
+ keys.add("java.vm.name");
+ keys.add("java.vm.info");
+ keys.add("java.library.path");
+ keys.add("java.class.path");
+ return keys;
+ }
+
+ private static Set<String> getAllProperties() {
+ Set<String> keys = new TreeSet<String>();
+ String[] props = getSystemProperties();
+ for (String p : props) {
+ keys.add(p);
+ }
+ return keys;
+ }
+
+ private static boolean equals(String s1, String s2) {
+ if (s1 == null && s2 == null) {
+ return true;
+ } else if (s1 != null) {
+ return s1.equals(s2);
+ } else {
+ return false;
+ }
+ }
+
+ private static void checkProperty(String key) {
+ System.out.print(" \"" + key + "\": ");
+ String err = null;
+ String value = null;
+ try {
+ value = getSystemProperty(key);
+ } catch (RuntimeException e) {
+ err = e.getMessage();
+ }
+ String sysValue = System.getProperty(key);
+ if (equals(value, sysValue)) {
+ System.out.print("OK");
+ if (err != null) {
+ System.out.println(" !!!" + err);
+ } else {
+ System.out.println();
+ }
+ } else {
+ System.out.println("ERROR !!!" + err);
+ }
+
+ System.out.print(" Setting value to \"abc\": ");
+ try {
+ setSystemProperty(key, "abc");
+ System.out.println("SUCCEEDED");
+ } catch (RuntimeException e) {
+ System.out.println("!!!" + e.getMessage());
+ }
+ }
+
+ private static String generateUnique(Set<String> others) {
+ // Construct something. To be deterministic, just use "a+".
+ StringBuilder sb = new StringBuilder("a");
+ for (;;) {
+ String key = sb.toString();
+ if (!others.contains(key)) {
+ return key;
+ }
+ sb.append('a');
+ }
+ }
+
+ private static String generate(Set<String> others) {
+ // First check for something in the overall System properties.
+ TreeSet<String> sysProps = new TreeSet<String>(System.getProperties().stringPropertyNames());
+ sysProps.removeAll(others);
+ if (!sysProps.isEmpty()) {
+ // Find something that starts with "java" or "os," trying to be platform-independent.
+ for (String s: sysProps) {
+ if (s.startsWith("java.") || s.startsWith("os.")) {
+ return s;
+ }
+ }
+ // Just return the first thing.
+ return sysProps.iterator().next();
+ }
+
+ return generateUnique(others);
+ }
+
+ private static native String[] getSystemProperties();
+ private static native String getSystemProperty(String key);
+ private static native void setSystemProperty(String key, String value);
+}
diff --git a/test/923-monitors/monitors.cc b/test/923-monitors/monitors.cc
index 131fc6a..9afe22d 100644
--- a/test/923-monitors/monitors.cc
+++ b/test/923-monitors/monitors.cc
@@ -16,13 +16,14 @@
#include <stdio.h>
-#include "base/macros.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedUtfChars.h"
+#include "scoped_utf_chars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test923Monitors {
@@ -36,50 +37,50 @@
return reinterpret_cast<jrawMonitorID>(static_cast<uintptr_t>(l));
}
-extern "C" JNIEXPORT jlong JNICALL Java_Main_createRawMonitor(
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test923_createRawMonitor(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jrawMonitorID id;
jvmtiError result = jvmti_env->CreateRawMonitor("dummy", &id);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return 0;
}
return MonitorToLong(id);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_destroyRawMonitor(
+extern "C" JNIEXPORT void JNICALL Java_art_Test923_destroyRawMonitor(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jlong l) {
jvmtiError result = jvmti_env->DestroyRawMonitor(LongToMonitor(l));
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_rawMonitorEnter(
+extern "C" JNIEXPORT void JNICALL Java_art_Test923_rawMonitorEnter(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jlong l) {
jvmtiError result = jvmti_env->RawMonitorEnter(LongToMonitor(l));
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_rawMonitorExit(
+extern "C" JNIEXPORT void JNICALL Java_art_Test923_rawMonitorExit(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jlong l) {
jvmtiError result = jvmti_env->RawMonitorExit(LongToMonitor(l));
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_rawMonitorWait(
+extern "C" JNIEXPORT void JNICALL Java_art_Test923_rawMonitorWait(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jlong l, jlong millis) {
jvmtiError result = jvmti_env->RawMonitorWait(LongToMonitor(l), millis);
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_rawMonitorNotify(
+extern "C" JNIEXPORT void JNICALL Java_art_Test923_rawMonitorNotify(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jlong l) {
jvmtiError result = jvmti_env->RawMonitorNotify(LongToMonitor(l));
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_rawMonitorNotifyAll(
+extern "C" JNIEXPORT void JNICALL Java_art_Test923_rawMonitorNotifyAll(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jlong l) {
jvmtiError result = jvmti_env->RawMonitorNotifyAll(LongToMonitor(l));
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
} // namespace Test923Monitors
diff --git a/test/923-monitors/src/Main.java b/test/923-monitors/src/Main.java
index ef00728..61fd3d4 100644
--- a/test/923-monitors/src/Main.java
+++ b/test/923-monitors/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,283 +14,8 @@
* limitations under the License.
*/
-import java.util.concurrent.CountDownLatch;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test923.run();
}
-
- private static void doTest() throws Exception {
- // Start a watchdog, to make sure on deadlocks etc the test dies.
- startWatchdog();
-
- sharedId = createRawMonitor();
-
- output = new ArrayList<String>(100);
-
- simpleTests(sharedId);
-
- for (String s : output) {
- System.out.println(s);
- }
- output.clear();
-
- threadTests(sharedId);
-
- destroyRawMonitor(sharedId);
- }
-
- private static void simpleTests(long id) {
- unlock(id); // Should fail.
-
- lock(id);
- unlock(id);
- unlock(id); // Should fail.
-
- lock(id);
- lock(id);
- unlock(id);
- unlock(id);
- unlock(id); // Should fail.
-
- rawWait(id, 0); // Should fail.
- rawWait(id, -1); // Should fail.
- rawWait(id, 1); // Should fail.
-
- lock(id);
- rawWait(id, 50);
- unlock(id);
- unlock(id); // Should fail.
-
- rawNotify(id); // Should fail.
- lock(id);
- rawNotify(id);
- unlock(id);
- unlock(id); // Should fail.
-
- rawNotifyAll(id); // Should fail.
- lock(id);
- rawNotifyAll(id);
- unlock(id);
- unlock(id); // Should fail.
- }
-
- private static void threadTests(final long id) throws Exception {
- final int N = 10;
-
- final CountDownLatch waitLatch = new CountDownLatch(N);
- final CountDownLatch wait2Latch = new CountDownLatch(1);
-
- Runnable r = new Runnable() {
- @Override
- public void run() {
- lock(id);
- waitLatch.countDown();
- rawWait(id, 0);
- firstAwakened = Thread.currentThread();
- appendToLog("Awakened");
- unlock(id);
- wait2Latch.countDown();
- }
- };
-
- List<Thread> threads = new ArrayList<Thread>();
- for (int i = 0; i < N; i++) {
- Thread t = new Thread(r);
- threads.add(t);
- t.start();
- }
-
- // Wait till all threads have been started.
- waitLatch.await();
-
- // Hopefully enough time for all the threads to progress into wait.
- Thread.yield();
- Thread.sleep(500);
-
- // Wake up one.
- lock(id);
- rawNotify(id);
- unlock(id);
-
- wait2Latch.await();
-
- // Wait a little bit more to see stragglers. This is flaky - spurious wakeups could
- // make the test fail.
- Thread.yield();
- Thread.sleep(500);
- if (firstAwakened != null) {
- firstAwakened.join();
- }
-
- // Wake up everyone else.
- lock(id);
- rawNotifyAll(id);
- unlock(id);
-
- // Wait for everyone to die.
- for (Thread t : threads) {
- t.join();
- }
-
- // Check threaded output.
- Iterator<String> it = output.iterator();
- // 1) Start with N locks and Waits.
- {
- int locks = 0;
- int waits = 0;
- for (int i = 0; i < 2*N; i++) {
- String s = it.next();
- if (s.equals("Lock")) {
- locks++;
- } else if (s.equals("Wait")) {
- if (locks <= waits) {
- System.out.println(output);
- throw new RuntimeException("Wait before Lock");
- }
- waits++;
- } else {
- System.out.println(output);
- throw new RuntimeException("Unexpected operation: " + s);
- }
- }
- }
-
- // 2) Expect Lock + Notify + Unlock.
- expect("Lock", it, output);
- expect("Notify", it, output);
- expect("Unlock", it, output);
-
- // 3) A single thread wakes up, runs, and dies.
- expect("Awakened", it, output);
- expect("Unlock", it, output);
-
- // 4) Expect Lock + NotifyAll + Unlock.
- expect("Lock", it, output);
- expect("NotifyAll", it, output);
- expect("Unlock", it, output);
-
- // 5) N-1 threads wake up, run, and die.
- {
- int expectedUnlocks = 0;
- int ops = 2 * (N-1);
- for (int i = 0; i < ops; i++) {
- String s = it.next();
- if (s.equals("Awakened")) {
- expectedUnlocks++;
- } else if (s.equals("Unlock")) {
- expectedUnlocks--;
- if (expectedUnlocks < 0) {
- System.out.println(output);
- throw new RuntimeException("Unexpected unlock");
- }
- }
- }
- }
-
- // 6) That should be it.
- if (it.hasNext()) {
- System.out.println(output);
- throw new RuntimeException("Unexpected trailing output, starting with " + it.next());
- }
-
- output.clear();
- System.out.println("Done");
- }
-
- private static void expect(String s, Iterator<String> it, List<String> output) {
- String t = it.next();
- if (!s.equals(t)) {
- System.out.println(output);
- throw new RuntimeException("Expected " + s + " but got " + t);
- }
- }
-
- private static void lock(long id) {
- appendToLog("Lock");
- rawMonitorEnter(id);
- }
-
- private static void unlock(long id) {
- appendToLog("Unlock");
- try {
- rawMonitorExit(id);
- } catch (RuntimeException e) {
- appendToLog(e.getMessage());
- }
- }
-
- private static void rawWait(long id, long millis) {
- appendToLog("Wait");
- try {
- rawMonitorWait(id, millis);
- } catch (RuntimeException e) {
- appendToLog(e.getMessage());
- }
- }
-
- private static void rawNotify(long id) {
- appendToLog("Notify");
- try {
- rawMonitorNotify(id);
- } catch (RuntimeException e) {
- appendToLog(e.getMessage());
- }
- }
-
- private static void rawNotifyAll(long id) {
- appendToLog("NotifyAll");
- try {
- rawMonitorNotifyAll(id);
- } catch (RuntimeException e) {
- appendToLog(e.getMessage());
- }
- }
-
- private static synchronized void appendToLog(String s) {
- output.add(s);
- }
-
- private static void startWatchdog() {
- Runnable r = new Runnable() {
- @Override
- public void run() {
- long start = System.currentTimeMillis();
- // Give it a minute.
- long end = 60 * 1000 + start;
- for (;;) {
- long delta = end - System.currentTimeMillis();
- if (delta <= 0) {
- break;
- }
-
- try {
- Thread.currentThread().sleep(delta);
- } catch (Exception e) {
- }
- }
- System.out.println("TIMEOUT!");
- System.exit(1);
- }
- };
- Thread t = new Thread(r);
- t.setDaemon(true);
- t.start();
- }
-
- static volatile long sharedId;
- static List<String> output;
- static Thread firstAwakened;
-
- private static native long createRawMonitor();
- private static native void destroyRawMonitor(long id);
- private static native void rawMonitorEnter(long id);
- private static native void rawMonitorExit(long id);
- private static native void rawMonitorWait(long id, long millis);
- private static native void rawMonitorNotify(long id);
- private static native void rawMonitorNotifyAll(long id);
}
diff --git a/test/923-monitors/src/art/Main.java b/test/923-monitors/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/923-monitors/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/923-monitors/src/art/Test923.java b/test/923-monitors/src/art/Test923.java
new file mode 100644
index 0000000..02e86a6
--- /dev/null
+++ b/test/923-monitors/src/art/Test923.java
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+public class Test923 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test923.class);
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ // Start a watchdog, to make sure on deadlocks etc the test dies.
+ startWatchdog();
+
+ sharedId = createRawMonitor();
+
+ output = new ArrayList<String>(100);
+
+ simpleTests(sharedId);
+
+ for (String s : output) {
+ System.out.println(s);
+ }
+ output.clear();
+
+ threadTests(sharedId);
+
+ destroyRawMonitor(sharedId);
+ }
+
+ private static void simpleTests(long id) {
+ unlock(id); // Should fail.
+
+ lock(id);
+ unlock(id);
+ unlock(id); // Should fail.
+
+ lock(id);
+ lock(id);
+ unlock(id);
+ unlock(id);
+ unlock(id); // Should fail.
+
+ rawWait(id, 0); // Should fail.
+ rawWait(id, -1); // Should fail.
+ rawWait(id, 1); // Should fail.
+
+ lock(id);
+ rawWait(id, 50);
+ unlock(id);
+ unlock(id); // Should fail.
+
+ rawNotify(id); // Should fail.
+ lock(id);
+ rawNotify(id);
+ unlock(id);
+ unlock(id); // Should fail.
+
+ rawNotifyAll(id); // Should fail.
+ lock(id);
+ rawNotifyAll(id);
+ unlock(id);
+ unlock(id); // Should fail.
+ }
+
+ private static void threadTests(final long id) throws Exception {
+ final int N = 10;
+
+ final CountDownLatch waitLatch = new CountDownLatch(N);
+ final CountDownLatch wait2Latch = new CountDownLatch(1);
+
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ lock(id);
+ waitLatch.countDown();
+ rawWait(id, 0);
+ firstAwakened = Thread.currentThread();
+ appendToLog("Awakened");
+ unlock(id);
+ wait2Latch.countDown();
+ }
+ };
+
+ List<Thread> threads = new ArrayList<Thread>();
+ for (int i = 0; i < N; i++) {
+ Thread t = new Thread(r);
+ threads.add(t);
+ t.start();
+ }
+
+ // Wait till all threads have been started.
+ waitLatch.await();
+
+ // Hopefully enough time for all the threads to progress into wait.
+ Thread.yield();
+ Thread.sleep(500);
+
+ // Wake up one.
+ lock(id);
+ rawNotify(id);
+ unlock(id);
+
+ wait2Latch.await();
+
+ // Wait a little bit more to see stragglers. This is flaky - spurious wakeups could
+ // make the test fail.
+ Thread.yield();
+ Thread.sleep(500);
+ if (firstAwakened != null) {
+ firstAwakened.join();
+ }
+
+ // Wake up everyone else.
+ lock(id);
+ rawNotifyAll(id);
+ unlock(id);
+
+ // Wait for everyone to die.
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ // Check threaded output.
+ Iterator<String> it = output.iterator();
+ // 1) Start with N locks and Waits.
+ {
+ int locks = 0;
+ int waits = 0;
+ for (int i = 0; i < 2*N; i++) {
+ String s = it.next();
+ if (s.equals("Lock")) {
+ locks++;
+ } else if (s.equals("Wait")) {
+ if (locks <= waits) {
+ System.out.println(output);
+ throw new RuntimeException("Wait before Lock");
+ }
+ waits++;
+ } else {
+ System.out.println(output);
+ throw new RuntimeException("Unexpected operation: " + s);
+ }
+ }
+ }
+
+ // 2) Expect Lock + Notify + Unlock.
+ expect("Lock", it, output);
+ expect("Notify", it, output);
+ expect("Unlock", it, output);
+
+ // 3) A single thread wakes up, runs, and dies.
+ expect("Awakened", it, output);
+ expect("Unlock", it, output);
+
+ // 4) Expect Lock + NotifyAll + Unlock.
+ expect("Lock", it, output);
+ expect("NotifyAll", it, output);
+ expect("Unlock", it, output);
+
+ // 5) N-1 threads wake up, run, and die.
+ {
+ int expectedUnlocks = 0;
+ int ops = 2 * (N-1);
+ for (int i = 0; i < ops; i++) {
+ String s = it.next();
+ if (s.equals("Awakened")) {
+ expectedUnlocks++;
+ } else if (s.equals("Unlock")) {
+ expectedUnlocks--;
+ if (expectedUnlocks < 0) {
+ System.out.println(output);
+ throw new RuntimeException("Unexpected unlock");
+ }
+ }
+ }
+ }
+
+ // 6) That should be it.
+ if (it.hasNext()) {
+ System.out.println(output);
+ throw new RuntimeException("Unexpected trailing output, starting with " + it.next());
+ }
+
+ output.clear();
+ System.out.println("Done");
+ }
+
+ private static void expect(String s, Iterator<String> it, List<String> output) {
+ String t = it.next();
+ if (!s.equals(t)) {
+ System.out.println(output);
+ throw new RuntimeException("Expected " + s + " but got " + t);
+ }
+ }
+
+ private static void lock(long id) {
+ appendToLog("Lock");
+ rawMonitorEnter(id);
+ }
+
+ private static void unlock(long id) {
+ appendToLog("Unlock");
+ try {
+ rawMonitorExit(id);
+ } catch (RuntimeException e) {
+ appendToLog(e.getMessage());
+ }
+ }
+
+ private static void rawWait(long id, long millis) {
+ appendToLog("Wait");
+ try {
+ rawMonitorWait(id, millis);
+ } catch (RuntimeException e) {
+ appendToLog(e.getMessage());
+ }
+ }
+
+ private static void rawNotify(long id) {
+ appendToLog("Notify");
+ try {
+ rawMonitorNotify(id);
+ } catch (RuntimeException e) {
+ appendToLog(e.getMessage());
+ }
+ }
+
+ private static void rawNotifyAll(long id) {
+ appendToLog("NotifyAll");
+ try {
+ rawMonitorNotifyAll(id);
+ } catch (RuntimeException e) {
+ appendToLog(e.getMessage());
+ }
+ }
+
+ private static synchronized void appendToLog(String s) {
+ output.add(s);
+ }
+
+ private static void startWatchdog() {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ long start = System.currentTimeMillis();
+ // Give it a minute.
+ long end = 60 * 1000 + start;
+ for (;;) {
+ long delta = end - System.currentTimeMillis();
+ if (delta <= 0) {
+ break;
+ }
+
+ try {
+ Thread.currentThread().sleep(delta);
+ } catch (Exception e) {
+ }
+ }
+ System.out.println("TIMEOUT!");
+ System.exit(1);
+ }
+ };
+ Thread t = new Thread(r);
+ t.setDaemon(true);
+ t.start();
+ }
+
+ static volatile long sharedId;
+ static List<String> output;
+ static Thread firstAwakened;
+
+ private static native long createRawMonitor();
+ private static native void destroyRawMonitor(long id);
+ private static native void rawMonitorEnter(long id);
+ private static native void rawMonitorExit(long id);
+ private static native void rawMonitorWait(long id, long millis);
+ private static native void rawMonitorNotify(long id);
+ private static native void rawMonitorNotifyAll(long id);
+}
diff --git a/test/924-threads/expected.txt b/test/924-threads/expected.txt
index 67d20eb..4c0f4ea 100644
--- a/test/924-threads/expected.txt
+++ b/test/924-threads/expected.txt
@@ -19,6 +19,11 @@
true
java.lang.ThreadGroup[name=main,maxpri=10]
class dalvik.system.PathClassLoader
+Subclass
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
5
5
0 = NEW
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
index 716f59e..7b468f1 100644
--- a/test/924-threads/src/Main.java
+++ b/test/924-threads/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,319 +14,8 @@
* limitations under the License.
*/
-import java.util.Arrays;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.concurrent.CountDownLatch;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test924.run();
}
-
- private static void doTest() throws Exception {
- Thread t1 = Thread.currentThread();
- Thread t2 = getCurrentThread();
-
- if (t1 != t2) {
- throw new RuntimeException("Expected " + t1 + " but got " + t2);
- }
- System.out.println("currentThread OK");
-
- printThreadInfo(t1);
- printThreadInfo(null);
-
- Thread t3 = new Thread("Daemon Thread");
- t3.setDaemon(true);
- // Do not start this thread, yet.
- printThreadInfo(t3);
- // Start, and wait for it to die.
- t3.start();
- t3.join();
- Thread.sleep(500); // Wait a little bit.
- // Thread has died, check that we can still get info.
- printThreadInfo(t3);
-
- doStateTests();
-
- doAllThreadsTests();
-
- doTLSTests();
-
- doTestEvents();
- }
-
- private static class Holder {
- volatile boolean flag = false;
- }
-
- private static void doStateTests() throws Exception {
- System.out.println(Integer.toHexString(getThreadState(null)));
- System.out.println(Integer.toHexString(getThreadState(Thread.currentThread())));
-
- final CountDownLatch cdl1 = new CountDownLatch(1);
- final CountDownLatch cdl2 = new CountDownLatch(1);
- final CountDownLatch cdl3_1 = new CountDownLatch(1);
- final CountDownLatch cdl3_2 = new CountDownLatch(1);
- final CountDownLatch cdl4 = new CountDownLatch(1);
- final CountDownLatch cdl5 = new CountDownLatch(1);
- final Holder h = new Holder();
- Runnable r = new Runnable() {
- @Override
- public void run() {
- try {
- cdl1.countDown();
- synchronized(cdl1) {
- cdl1.wait();
- }
-
- cdl2.countDown();
- synchronized(cdl2) {
- cdl2.wait(1000); // Wait a second.
- }
-
- cdl3_1.await();
- cdl3_2.countDown();
- synchronized(cdl3_2) {
- // Nothing, just wanted to block on cdl3.
- }
-
- cdl4.countDown();
- Thread.sleep(1000);
-
- cdl5.countDown();
- while (!h.flag) {
- // Busy-loop.
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
- };
-
- Thread t = new Thread(r);
- printThreadState(t);
- t.start();
-
- // Waiting.
- cdl1.await();
- Thread.yield();
- Thread.sleep(100);
- printThreadState(t);
- synchronized(cdl1) {
- cdl1.notifyAll();
- }
-
- // Timed waiting.
- cdl2.await();
- Thread.yield();
- Thread.sleep(100);
- printThreadState(t);
- synchronized(cdl2) {
- cdl2.notifyAll();
- }
-
- // Blocked on monitor.
- synchronized(cdl3_2) {
- cdl3_1.countDown();
- cdl3_2.await();
- // While the latch improves the chances to make good progress, scheduling might still be
- // messy. Wait till we get the right Java-side Thread state.
- do {
- Thread.yield();
- } while (t.getState() != Thread.State.BLOCKED);
- Thread.sleep(10);
- printThreadState(t);
- }
-
- // Sleeping.
- cdl4.await();
- Thread.yield();
- Thread.sleep(100);
- printThreadState(t);
-
- // Running.
- cdl5.await();
- Thread.yield();
- Thread.sleep(100);
- printThreadState(t);
- h.flag = true;
-
- // Dying.
- t.join();
- Thread.yield();
- Thread.sleep(100);
-
- printThreadState(t);
- }
-
- private static void doAllThreadsTests() {
- Thread[] threads = getAllThreads();
- List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
-
- // Filter out JIT thread. It may or may not be there depending on configuration.
- Iterator<Thread> it = threadList.iterator();
- while (it.hasNext()) {
- Thread t = it.next();
- if (t.getName().startsWith("Jit thread pool worker")) {
- it.remove();
- break;
- }
- }
-
- Collections.sort(threadList, THREAD_COMP);
- System.out.println(threadList);
- }
-
- private static void doTLSTests() throws Exception {
- doTLSNonLiveTests();
- doTLSLiveTests();
- }
-
- private static void doTLSNonLiveTests() throws Exception {
- Thread t = new Thread();
- try {
- setTLS(t, 1);
- System.out.println("Expected failure setting TLS for non-live thread");
- } catch (Exception e) {
- System.out.println(e.getMessage());
- }
- t.start();
- t.join();
- try {
- setTLS(t, 1);
- System.out.println("Expected failure setting TLS for non-live thread");
- } catch (Exception e) {
- System.out.println(e.getMessage());
- }
- }
-
- private static void doTLSLiveTests() throws Exception {
- setTLS(Thread.currentThread(), 1);
-
- long l = getTLS(Thread.currentThread());
- if (l != 1) {
- throw new RuntimeException("Unexpected TLS value: " + l);
- };
-
- final CountDownLatch cdl1 = new CountDownLatch(1);
- final CountDownLatch cdl2 = new CountDownLatch(1);
-
- Runnable r = new Runnable() {
- @Override
- public void run() {
- try {
- cdl1.countDown();
- cdl2.await();
- setTLS(Thread.currentThread(), 2);
- if (getTLS(Thread.currentThread()) != 2) {
- throw new RuntimeException("Different thread issue");
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
- };
-
- Thread t = new Thread(r);
- t.start();
- cdl1.await();
- setTLS(Thread.currentThread(), 1);
- cdl2.countDown();
-
- t.join();
- if (getTLS(Thread.currentThread()) != 1) {
- throw new RuntimeException("Got clobbered");
- }
- }
-
- private static void doTestEvents() throws Exception {
- enableThreadEvents(true);
-
- Thread t = new Thread("EventTestThread");
-
- System.out.println("Constructed thread");
- Thread.yield();
-
- t.start();
- t.join();
-
- System.out.println("Thread joined");
-
- enableThreadEvents(false);
- }
-
- private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
- public int compare(Thread o1, Thread o2) {
- return o1.getName().compareTo(o2.getName());
- }
- };
-
- private final static Map<Integer, String> STATE_NAMES = new HashMap<Integer, String>();
- private final static List<Integer> STATE_KEYS = new ArrayList<Integer>();
- static {
- STATE_NAMES.put(0x1, "ALIVE");
- STATE_NAMES.put(0x2, "TERMINATED");
- STATE_NAMES.put(0x4, "RUNNABLE");
- STATE_NAMES.put(0x400, "BLOCKED_ON_MONITOR_ENTER");
- STATE_NAMES.put(0x80, "WAITING");
- STATE_NAMES.put(0x10, "WAITING_INDEFINITELY");
- STATE_NAMES.put(0x20, "WAITING_WITH_TIMEOUT");
- STATE_NAMES.put(0x40, "SLEEPING");
- STATE_NAMES.put(0x100, "IN_OBJECT_WAIT");
- STATE_NAMES.put(0x200, "PARKED");
- STATE_NAMES.put(0x100000, "SUSPENDED");
- STATE_NAMES.put(0x200000, "INTERRUPTED");
- STATE_NAMES.put(0x400000, "IN_NATIVE");
- STATE_KEYS.addAll(STATE_NAMES.keySet());
- Collections.sort(STATE_KEYS);
- }
-
- private static void printThreadState(Thread t) {
- int state = getThreadState(t);
-
- StringBuilder sb = new StringBuilder();
-
- for (Integer i : STATE_KEYS) {
- if ((state & i) != 0) {
- if (sb.length()>0) {
- sb.append('|');
- }
- sb.append(STATE_NAMES.get(i));
- }
- }
-
- if (sb.length() == 0) {
- sb.append("NEW");
- }
-
- System.out.println(Integer.toHexString(state) + " = " + sb.toString());
- }
-
- private static void printThreadInfo(Thread t) {
- Object[] threadInfo = getThreadInfo(t);
- if (threadInfo == null || threadInfo.length != 5) {
- System.out.println(Arrays.toString(threadInfo));
- throw new RuntimeException("threadInfo length wrong");
- }
-
- System.out.println(threadInfo[0]); // Name
- System.out.println(threadInfo[1]); // Priority
- System.out.println(threadInfo[2]); // Daemon
- System.out.println(threadInfo[3]); // Threadgroup
- System.out.println(threadInfo[4] == null ? "null" : threadInfo[4].getClass()); // Context CL.
- }
-
- private static native Thread getCurrentThread();
- private static native Object[] getThreadInfo(Thread t);
- private static native int getThreadState(Thread t);
- private static native Thread[] getAllThreads();
- private static native void setTLS(Thread t, long l);
- private static native long getTLS(Thread t);
- private static native void enableThreadEvents(boolean b);
}
diff --git a/test/924-threads/src/art/Main.java b/test/924-threads/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/924-threads/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/924-threads/src/art/Test924.java b/test/924-threads/src/art/Test924.java
new file mode 100644
index 0000000..160bf8e
--- /dev/null
+++ b/test/924-threads/src/art/Test924.java
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.concurrent.CountDownLatch;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class Test924 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test924.class);
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ Thread t1 = Thread.currentThread();
+ Thread t2 = getCurrentThread();
+
+ if (t1 != t2) {
+ throw new RuntimeException("Expected " + t1 + " but got " + t2);
+ }
+ System.out.println("currentThread OK");
+
+ printThreadInfo(t1);
+ printThreadInfo(null);
+
+ Thread t3 = new Thread("Daemon Thread");
+ t3.setDaemon(true);
+ // Do not start this thread, yet.
+ printThreadInfo(t3);
+ // Start, and wait for it to die.
+ t3.start();
+ t3.join();
+ Thread.sleep(500); // Wait a little bit.
+ // Thread has died, check that we can still get info.
+ printThreadInfo(t3);
+
+ // Try a subclass of thread.
+ Thread t4 = new Thread("Subclass") {
+ };
+ printThreadInfo(t4);
+
+ doStateTests();
+
+ doAllThreadsTests();
+
+ doTLSTests();
+
+ doTestEvents();
+ }
+
+ private static class Holder {
+ volatile boolean flag = false;
+ }
+
+ private static void doStateTests() throws Exception {
+ System.out.println(Integer.toHexString(getThreadState(null)));
+ System.out.println(Integer.toHexString(getThreadState(Thread.currentThread())));
+
+ final CountDownLatch cdl1 = new CountDownLatch(1);
+ final CountDownLatch cdl2 = new CountDownLatch(1);
+ final CountDownLatch cdl3_1 = new CountDownLatch(1);
+ final CountDownLatch cdl3_2 = new CountDownLatch(1);
+ final CountDownLatch cdl4 = new CountDownLatch(1);
+ final CountDownLatch cdl5 = new CountDownLatch(1);
+ final Holder h = new Holder();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cdl1.countDown();
+ synchronized(cdl1) {
+ cdl1.wait();
+ }
+
+ cdl2.countDown();
+ synchronized(cdl2) {
+ cdl2.wait(1000); // Wait a second.
+ }
+
+ cdl3_1.await();
+ cdl3_2.countDown();
+ synchronized(cdl3_2) {
+ // Nothing, just wanted to block on cdl3.
+ }
+
+ cdl4.countDown();
+ Thread.sleep(1000);
+
+ cdl5.countDown();
+ while (!h.flag) {
+ // Busy-loop.
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+
+ Thread t = new Thread(r);
+ printThreadState(t);
+ t.start();
+
+ // Waiting.
+ cdl1.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl1) {
+ cdl1.notifyAll();
+ }
+
+ // Timed waiting.
+ cdl2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl2) {
+ cdl2.notifyAll();
+ }
+
+ // Blocked on monitor.
+ synchronized(cdl3_2) {
+ cdl3_1.countDown();
+ cdl3_2.await();
+ // While the latch improves the chances to make good progress, scheduling might still be
+ // messy. Wait till we get the right Java-side Thread state.
+ do {
+ Thread.yield();
+ } while (t.getState() != Thread.State.BLOCKED);
+ Thread.sleep(10);
+ printThreadState(t);
+ }
+
+ // Sleeping.
+ cdl4.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+
+ // Running.
+ cdl5.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ h.flag = true;
+
+ // Dying.
+ t.join();
+ Thread.yield();
+ Thread.sleep(100);
+
+ printThreadState(t);
+ }
+
+ private static void doAllThreadsTests() {
+ Thread[] threads = getAllThreads();
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+ System.out.println(threadList);
+ }
+
+ private static void doTLSTests() throws Exception {
+ doTLSNonLiveTests();
+ doTLSLiveTests();
+ }
+
+ private static void doTLSNonLiveTests() throws Exception {
+ Thread t = new Thread();
+ try {
+ setTLS(t, 1);
+ System.out.println("Expected failure setting TLS for non-live thread");
+ } catch (Exception e) {
+ System.out.println(e.getMessage());
+ }
+ t.start();
+ t.join();
+ try {
+ setTLS(t, 1);
+ System.out.println("Expected failure setting TLS for non-live thread");
+ } catch (Exception e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ private static void doTLSLiveTests() throws Exception {
+ setTLS(Thread.currentThread(), 1);
+
+ long l = getTLS(Thread.currentThread());
+ if (l != 1) {
+ throw new RuntimeException("Unexpected TLS value: " + l);
+ };
+
+ final CountDownLatch cdl1 = new CountDownLatch(1);
+ final CountDownLatch cdl2 = new CountDownLatch(1);
+
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cdl1.countDown();
+ cdl2.await();
+ setTLS(Thread.currentThread(), 2);
+ if (getTLS(Thread.currentThread()) != 2) {
+ throw new RuntimeException("Different thread issue");
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+
+ Thread t = new Thread(r);
+ t.start();
+ cdl1.await();
+ setTLS(Thread.currentThread(), 1);
+ cdl2.countDown();
+
+ t.join();
+ if (getTLS(Thread.currentThread()) != 1) {
+ throw new RuntimeException("Got clobbered");
+ }
+ }
+
+ private static void doTestEvents() throws Exception {
+ enableThreadEvents(true);
+
+ Thread t = new Thread("EventTestThread");
+
+ System.out.println("Constructed thread");
+ Thread.yield();
+
+ t.start();
+ t.join();
+
+ System.out.println("Thread joined");
+
+ enableThreadEvents(false);
+ }
+
+ private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
+ public int compare(Thread o1, Thread o2) {
+ return o1.getName().compareTo(o2.getName());
+ }
+ };
+
+ private final static Map<Integer, String> STATE_NAMES = new HashMap<Integer, String>();
+ private final static List<Integer> STATE_KEYS = new ArrayList<Integer>();
+ static {
+ STATE_NAMES.put(0x1, "ALIVE");
+ STATE_NAMES.put(0x2, "TERMINATED");
+ STATE_NAMES.put(0x4, "RUNNABLE");
+ STATE_NAMES.put(0x400, "BLOCKED_ON_MONITOR_ENTER");
+ STATE_NAMES.put(0x80, "WAITING");
+ STATE_NAMES.put(0x10, "WAITING_INDEFINITELY");
+ STATE_NAMES.put(0x20, "WAITING_WITH_TIMEOUT");
+ STATE_NAMES.put(0x40, "SLEEPING");
+ STATE_NAMES.put(0x100, "IN_OBJECT_WAIT");
+ STATE_NAMES.put(0x200, "PARKED");
+ STATE_NAMES.put(0x100000, "SUSPENDED");
+ STATE_NAMES.put(0x200000, "INTERRUPTED");
+ STATE_NAMES.put(0x400000, "IN_NATIVE");
+ STATE_KEYS.addAll(STATE_NAMES.keySet());
+ Collections.sort(STATE_KEYS);
+ }
+
+ private static void printThreadState(Thread t) {
+ int state = getThreadState(t);
+
+ StringBuilder sb = new StringBuilder();
+
+ for (Integer i : STATE_KEYS) {
+ if ((state & i) != 0) {
+ if (sb.length()>0) {
+ sb.append('|');
+ }
+ sb.append(STATE_NAMES.get(i));
+ }
+ }
+
+ if (sb.length() == 0) {
+ sb.append("NEW");
+ }
+
+ System.out.println(Integer.toHexString(state) + " = " + sb.toString());
+ }
+
+ private static void printThreadInfo(Thread t) {
+ Object[] threadInfo = getThreadInfo(t);
+ if (threadInfo == null || threadInfo.length != 5) {
+ System.out.println(Arrays.toString(threadInfo));
+ throw new RuntimeException("threadInfo length wrong");
+ }
+
+ System.out.println(threadInfo[0]); // Name
+ System.out.println(threadInfo[1]); // Priority
+ System.out.println(threadInfo[2]); // Daemon
+ System.out.println(threadInfo[3]); // Threadgroup
+ System.out.println(threadInfo[4] == null ? "null" : threadInfo[4].getClass()); // Context CL.
+ }
+
+ private static native Thread getCurrentThread();
+ private static native Object[] getThreadInfo(Thread t);
+ private static native int getThreadState(Thread t);
+ private static native Thread[] getAllThreads();
+ private static native void setTLS(Thread t, long l);
+ private static native long getTLS(Thread t);
+ private static native void enableThreadEvents(boolean b);
+}
diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc
index 14ea5af..701ab1d 100644
--- a/test/924-threads/threads.cc
+++ b/test/924-threads/threads.cc
@@ -16,15 +16,17 @@
#include <stdio.h>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "base/macros.h"
-#include "base/logging.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
namespace art {
namespace Test924Threads {
@@ -32,23 +34,23 @@
// private static native Thread getCurrentThread();
// private static native Object[] getThreadInfo(Thread t);
-extern "C" JNIEXPORT jthread JNICALL Java_Main_getCurrentThread(
+extern "C" JNIEXPORT jthread JNICALL Java_art_Test924_getCurrentThread(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jthread thread = nullptr;
jvmtiError result = jvmti_env->GetCurrentThread(&thread);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
return thread;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getThreadInfo(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test924_getThreadInfo(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
jvmtiThreadInfo info;
memset(&info, 0, sizeof(jvmtiThreadInfo));
jvmtiError result = jvmti_env->GetThreadInfo(thread, &info);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -90,23 +92,23 @@
return ret;
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_getThreadState(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test924_getThreadState(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
jint state;
jvmtiError result = jvmti_env->GetThreadState(thread, &state);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return 0;
}
return state;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getAllThreads(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test924_getAllThreads(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jint thread_count;
jthread* threads;
jvmtiError result = jvmti_env->GetAllThreads(&thread_count, &threads);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -120,21 +122,21 @@
return ret;
}
-extern "C" JNIEXPORT jlong JNICALL Java_Main_getTLS(
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test924_getTLS(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
void* tls;
jvmtiError result = jvmti_env->GetThreadLocalStorage(thread, &tls);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return 0;
}
return static_cast<jlong>(reinterpret_cast<uintptr_t>(tls));
}
-extern "C" JNIEXPORT void JNICALL Java_Main_setTLS(
+extern "C" JNIEXPORT void JNICALL Java_art_Test924_setTLS(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread, jlong val) {
const void* tls = reinterpret_cast<void*>(static_cast<uintptr_t>(val));
jvmtiError result = jvmti_env->SetThreadLocalStorage(thread, tls);
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
static void JNICALL ThreadEvent(jvmtiEnv* jvmti_env,
@@ -166,19 +168,19 @@
ThreadEvent(jvmti_env, jni_env, thread, false);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_enableThreadEvents(
+extern "C" JNIEXPORT void JNICALL Java_art_Test924_enableThreadEvents(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
if (b == JNI_FALSE) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_THREAD_START,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_THREAD_END,
nullptr);
- JvmtiErrorToException(env, ret);
+ JvmtiErrorToException(env, jvmti_env, ret);
return;
}
@@ -187,20 +189,20 @@
callbacks.ThreadStart = ThreadStart;
callbacks.ThreadEnd = ThreadEnd;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_THREAD_START,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_THREAD_END,
nullptr);
- JvmtiErrorToException(env, ret);
+ JvmtiErrorToException(env, jvmti_env, ret);
}
} // namespace Test924Threads
diff --git a/test/925-threadgroups/src/Main.java b/test/925-threadgroups/src/Main.java
index bf7441f..73635a8 100644
--- a/test/925-threadgroups/src/Main.java
+++ b/test/925-threadgroups/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,115 +14,8 @@
* limitations under the License.
*/
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test925.run();
}
-
- private static void doTest() throws Exception {
- Thread t1 = Thread.currentThread();
- ThreadGroup curGroup = t1.getThreadGroup();
-
- ThreadGroup rootGroup = curGroup;
- while (rootGroup.getParent() != null) {
- rootGroup = rootGroup.getParent();
- }
-
- ThreadGroup topGroups[] = getTopThreadGroups();
- if (topGroups == null || topGroups.length != 1 || topGroups[0] != rootGroup) {
- System.out.println(Arrays.toString(topGroups));
- throw new RuntimeException("Unexpected topGroups");
- }
-
- printThreadGroupInfo(curGroup);
- printThreadGroupInfo(rootGroup);
-
- waitGroupChildren(rootGroup, 5 /* # daemons */, 30 /* timeout in seconds */);
-
- checkChildren(curGroup);
- }
-
- private static void printThreadGroupInfo(ThreadGroup tg) {
- Object[] threadGroupInfo = getThreadGroupInfo(tg);
- if (threadGroupInfo == null || threadGroupInfo.length != 4) {
- System.out.println(Arrays.toString(threadGroupInfo));
- throw new RuntimeException("threadGroupInfo length wrong");
- }
-
- System.out.println(tg);
- System.out.println(" " + threadGroupInfo[0]); // Parent
- System.out.println(" " + threadGroupInfo[1]); // Name
- System.out.println(" " + threadGroupInfo[2]); // Priority
- System.out.println(" " + threadGroupInfo[3]); // Daemon
- }
-
- private static void checkChildren(ThreadGroup tg) {
- Object[] data = getThreadGroupChildren(tg);
- Thread[] threads = (Thread[])data[0];
- ThreadGroup[] groups = (ThreadGroup[])data[1];
-
- List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
-
- // Filter out JIT thread. It may or may not be there depending on configuration.
- Iterator<Thread> it = threadList.iterator();
- while (it.hasNext()) {
- Thread t = it.next();
- if (t.getName().startsWith("Jit thread pool worker")) {
- it.remove();
- break;
- }
- }
-
- Collections.sort(threadList, THREAD_COMP);
-
- Arrays.sort(groups, THREADGROUP_COMP);
- System.out.println(tg.getName() + ":");
- System.out.println(" " + threadList);
- System.out.println(" " + Arrays.toString(groups));
-
- if (tg.getParent() != null) {
- checkChildren(tg.getParent());
- }
- }
-
- private static void waitGroupChildren(ThreadGroup tg, int expectedChildCount, int timeoutS)
- throws Exception {
- for (int i = 0; i < timeoutS; i++) {
- Object[] data = getThreadGroupChildren(tg);
- Thread[] threads = (Thread[])data[0];
- if (threads.length == expectedChildCount) {
- return;
- }
- Thread.sleep(1000);
- }
-
- Object[] data = getThreadGroupChildren(tg);
- Thread[] threads = (Thread[])data[0];
- System.out.println(Arrays.toString(threads));
- throw new RuntimeException("Waited unsuccessfully for " + expectedChildCount + " children.");
- }
-
- private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
- public int compare(Thread o1, Thread o2) {
- return o1.getName().compareTo(o2.getName());
- }
- };
-
- private final static Comparator<ThreadGroup> THREADGROUP_COMP = new Comparator<ThreadGroup>() {
- public int compare(ThreadGroup o1, ThreadGroup o2) {
- return o1.getName().compareTo(o2.getName());
- }
- };
-
- private static native ThreadGroup[] getTopThreadGroups();
- private static native Object[] getThreadGroupInfo(ThreadGroup tg);
- // Returns an array where element 0 is an array of threads and element 1 is an array of groups.
- private static native Object[] getThreadGroupChildren(ThreadGroup tg);
}
diff --git a/test/925-threadgroups/src/art/Main.java b/test/925-threadgroups/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/925-threadgroups/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/925-threadgroups/src/art/Test925.java b/test/925-threadgroups/src/art/Test925.java
new file mode 100644
index 0000000..14ca7a7
--- /dev/null
+++ b/test/925-threadgroups/src/art/Test925.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+public class Test925 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test925.class);
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ Thread t1 = Thread.currentThread();
+ ThreadGroup curGroup = t1.getThreadGroup();
+
+ ThreadGroup rootGroup = curGroup;
+ while (rootGroup.getParent() != null) {
+ rootGroup = rootGroup.getParent();
+ }
+
+ ThreadGroup topGroups[] = getTopThreadGroups();
+ if (topGroups == null || topGroups.length != 1 || topGroups[0] != rootGroup) {
+ System.out.println(Arrays.toString(topGroups));
+ throw new RuntimeException("Unexpected topGroups");
+ }
+
+ printThreadGroupInfo(curGroup);
+ printThreadGroupInfo(rootGroup);
+
+ waitGroupChildren(rootGroup, 5 /* # daemons */, 30 /* timeout in seconds */);
+
+ checkChildren(curGroup);
+ }
+
+ private static void printThreadGroupInfo(ThreadGroup tg) {
+ Object[] threadGroupInfo = getThreadGroupInfo(tg);
+ if (threadGroupInfo == null || threadGroupInfo.length != 4) {
+ System.out.println(Arrays.toString(threadGroupInfo));
+ throw new RuntimeException("threadGroupInfo length wrong");
+ }
+
+ System.out.println(tg);
+ System.out.println(" " + threadGroupInfo[0]); // Parent
+ System.out.println(" " + threadGroupInfo[1]); // Name
+ System.out.println(" " + threadGroupInfo[2]); // Priority
+ System.out.println(" " + threadGroupInfo[3]); // Daemon
+ }
+
+ private static void checkChildren(ThreadGroup tg) {
+ Object[] data = getThreadGroupChildren(tg);
+ Thread[] threads = (Thread[])data[0];
+ ThreadGroup[] groups = (ThreadGroup[])data[1];
+
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+
+ Arrays.sort(groups, THREADGROUP_COMP);
+ System.out.println(tg.getName() + ":");
+ System.out.println(" " + threadList);
+ System.out.println(" " + Arrays.toString(groups));
+
+ if (tg.getParent() != null) {
+ checkChildren(tg.getParent());
+ }
+ }
+
+ private static void waitGroupChildren(ThreadGroup tg, int expectedChildCount, int timeoutS)
+ throws Exception {
+ for (int i = 0; i < timeoutS; i++) {
+ Object[] data = getThreadGroupChildren(tg);
+ Thread[] threads = (Thread[])data[0];
+ if (threads.length == expectedChildCount) {
+ return;
+ }
+ Thread.sleep(1000);
+ }
+
+ Object[] data = getThreadGroupChildren(tg);
+ Thread[] threads = (Thread[])data[0];
+ System.out.println(Arrays.toString(threads));
+ throw new RuntimeException("Waited unsuccessfully for " + expectedChildCount + " children.");
+ }
+
+ private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
+ public int compare(Thread o1, Thread o2) {
+ return o1.getName().compareTo(o2.getName());
+ }
+ };
+
+ private final static Comparator<ThreadGroup> THREADGROUP_COMP = new Comparator<ThreadGroup>() {
+ public int compare(ThreadGroup o1, ThreadGroup o2) {
+ return o1.getName().compareTo(o2.getName());
+ }
+ };
+
+ private static native ThreadGroup[] getTopThreadGroups();
+ private static native Object[] getThreadGroupInfo(ThreadGroup tg);
+ // Returns an array where element 0 is an array of threads and element 1 is an array of groups.
+ private static native Object[] getThreadGroupChildren(ThreadGroup tg);
+}
diff --git a/test/925-threadgroups/threadgroups.cc b/test/925-threadgroups/threadgroups.cc
index 2feaab0..cc053bc 100644
--- a/test/925-threadgroups/threadgroups.cc
+++ b/test/925-threadgroups/threadgroups.cc
@@ -16,15 +16,17 @@
#include <stdio.h>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "base/macros.h"
-#include "base/logging.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
namespace art {
namespace Test925ThreadGroups {
@@ -33,12 +35,12 @@
// // Returns an array where element 0 is an array of threads and element 1 is an array of groups.
// private static native Object[] getThreadGroupChildren();
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTopThreadGroups(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test925_getTopThreadGroups(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jthreadGroup* groups;
jint group_count;
jvmtiError result = jvmti_env->GetTopThreadGroups(&group_count, &groups);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -52,11 +54,11 @@
return ret;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getThreadGroupInfo(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test925_getThreadGroupInfo(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthreadGroup group) {
jvmtiThreadGroupInfo info;
jvmtiError result = jvmti_env->GetThreadGroupInfo(group, &info);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
@@ -84,7 +86,7 @@
return CreateObjectArray(env, 4, "java/lang/Object", callback);
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getThreadGroupChildren(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test925_getThreadGroupChildren(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthreadGroup group) {
jint thread_count;
jthread* threads;
@@ -96,7 +98,7 @@
&threads,
&threadgroup_count,
&groups);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
diff --git a/test/926-multi-obsolescence/src/Main.java b/test/926-multi-obsolescence/src/Main.java
index 6d9f96c..2440908 100644
--- a/test/926-multi-obsolescence/src/Main.java
+++ b/test/926-multi-obsolescence/src/Main.java
@@ -92,6 +92,7 @@
"AACUAQAAAiAAABEAAACiAQAAAyAAAAIAAACXAgAAACAAAAEAAAClAgAAABAAAAEAAAC0AgAA"));
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform(), new Transform2());
}
diff --git a/test/926-multi-obsolescence/src/art/Main.java b/test/926-multi-obsolescence/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/926-multi-obsolescence/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/927-timers/src/Main.java b/test/927-timers/src/Main.java
index b67f66d..a908532 100644
--- a/test/927-timers/src/Main.java
+++ b/test/927-timers/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,47 +14,8 @@
* limitations under the License.
*/
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) throws Exception {
- doTest();
+ art.Test927.run();
}
-
- private static void doTest() {
- int all1 = Runtime.getRuntime().availableProcessors();
- int all2 = getAvailableProcessors();
- if (all1 != all2) {
- throw new RuntimeException("Available processors doesn't match: " + all1 + " vs " + all2);
- }
- System.out.println("availableProcessors OK");
-
- Object info[] = getTimerInfo();
- System.out.println(Arrays.toString(info));
-
- // getTime checks.
- // Note: there isn't really much to check independent from the implementation. So we check
- // a few details of the ART implementation. This may fail on other runtimes.
- long time1 = getTime();
- long time2 = getTime();
-
- // Under normal circumstances, time1 <= time2.
- if (time2 < time1) {
- throw new RuntimeException("Time unexpectedly decreased: " + time1 + " vs " + time2);
- }
-
- long time3 = System.nanoTime();
- long time4 = getTime();
-
- final long MINUTE = 60l * 1000 * 1000 * 1000;
- if (time4 < time3 || (time4 - time3 > MINUTE)) {
- throw new RuntimeException("Time unexpectedly divergent: " + time3 + " vs " + time4);
- }
-
- System.out.println("Time OK");
- }
-
- private static native int getAvailableProcessors();
- private static native Object[] getTimerInfo();
- private static native long getTime();
}
diff --git a/test/927-timers/src/art/Main.java b/test/927-timers/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/927-timers/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/927-timers/src/art/Test927.java b/test/927-timers/src/art/Test927.java
new file mode 100644
index 0000000..1ed0160
--- /dev/null
+++ b/test/927-timers/src/art/Test927.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Arrays;
+
+public class Test927 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test927.class);
+ doTest();
+ }
+
+ private static void doTest() {
+ int all1 = Runtime.getRuntime().availableProcessors();
+ int all2 = getAvailableProcessors();
+ if (all1 != all2) {
+ throw new RuntimeException("Available processors doesn't match: " + all1 + " vs " + all2);
+ }
+ System.out.println("availableProcessors OK");
+
+ Object info[] = getTimerInfo();
+ System.out.println(Arrays.toString(info));
+
+ // getTime checks.
+ // Note: there isn't really much to check independent from the implementation. So we check
+ // a few details of the ART implementation. This may fail on other runtimes.
+ long time1 = getTime();
+ long time2 = getTime();
+
+ // Under normal circumstances, time1 <= time2.
+ if (time2 < time1) {
+ throw new RuntimeException("Time unexpectedly decreased: " + time1 + " vs " + time2);
+ }
+
+ long time3 = System.nanoTime();
+ long time4 = getTime();
+
+ final long MINUTE = 60l * 1000 * 1000 * 1000;
+ if (time4 < time3 || (time4 - time3 > MINUTE)) {
+ throw new RuntimeException("Time unexpectedly divergent: " + time3 + " vs " + time4);
+ }
+
+ System.out.println("Time OK");
+ }
+
+ private static native int getAvailableProcessors();
+ private static native Object[] getTimerInfo();
+ private static native long getTime();
+}
diff --git a/test/927-timers/timers.cc b/test/927-timers/timers.cc
index 7b1d5c3..9eaac71 100644
--- a/test/927-timers/timers.cc
+++ b/test/927-timers/timers.cc
@@ -16,43 +16,46 @@
#include <inttypes.h>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "base/logging.h"
-#include "base/macros.h"
+
#include "jni.h"
#include "jvmti.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
namespace art {
namespace Test926Timers {
-extern "C" JNIEXPORT jint JNICALL Java_Main_getAvailableProcessors(
+extern "C" JNIEXPORT jint JNICALL Java_art_Test927_getAvailableProcessors(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jint count;
jvmtiError result = jvmti_env->GetAvailableProcessors(&count);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return count;
}
-extern "C" JNIEXPORT jlong JNICALL Java_Main_getTime(
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test927_getTime(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jlong time;
jvmtiError result = jvmti_env->GetTime(&time);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return -1;
}
return time;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTimerInfo(
+extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test927_getTimerInfo(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jvmtiTimerInfo info;
jvmtiError result = jvmti_env->GetTimerInfo(&info);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return nullptr;
}
diff --git a/test/928-jni-table/jni_table.cc b/test/928-jni-table/jni_table.cc
index b5c0efd..9a8b7fe 100644
--- a/test/928-jni-table/jni_table.cc
+++ b/test/928-jni-table/jni_table.cc
@@ -19,11 +19,12 @@
#include "jni.h"
#include "jvmti.h"
-#include "base/logging.h"
-#include "base/macros.h"
+#include "android-base/logging.h"
+#include "android-base/macros.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test927JNITable {
@@ -38,18 +39,18 @@
return gOriginalEnv->NewGlobalRef(env, o);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_doJNITableTest(
+extern "C" JNIEXPORT void JNICALL Java_art_Test928_doJNITableTest(
JNIEnv* env, jclass klass) {
// Get the current table, as the delegate.
jvmtiError getorig_result = jvmti_env->GetJNIFunctionTable(&gOriginalEnv);
- if (JvmtiErrorToException(env, getorig_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, getorig_result)) {
return;
}
// Get the current table, as the override we'll install.
JNINativeInterface* env_override;
jvmtiError getoverride_result = jvmti_env->GetJNIFunctionTable(&env_override);
- if (JvmtiErrorToException(env, getoverride_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, getoverride_result)) {
return;
}
@@ -58,7 +59,7 @@
// Install the override.
jvmtiError setoverride_result = jvmti_env->SetJNIFunctionTable(env_override);
- if (JvmtiErrorToException(env, setoverride_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, setoverride_result)) {
return;
}
@@ -68,7 +69,7 @@
// Install the "original." There is no real reset.
jvmtiError setoverride2_result = jvmti_env->SetJNIFunctionTable(gOriginalEnv);
- if (JvmtiErrorToException(env, setoverride2_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, setoverride2_result)) {
return;
}
diff --git a/test/928-jni-table/src/Main.java b/test/928-jni-table/src/Main.java
index fd61b7d..464a608 100644
--- a/test/928-jni-table/src/Main.java
+++ b/test/928-jni-table/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,10 +16,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- doJNITableTest();
-
- System.out.println("Done");
+ art.Test928.run();
}
-
- public static native void doJNITableTest();
}
diff --git a/test/928-jni-table/src/art/Main.java b/test/928-jni-table/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/928-jni-table/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/ti-agent/common_load.h b/test/928-jni-table/src/art/Test928.java
similarity index 65%
copy from test/ti-agent/common_load.h
copy to test/928-jni-table/src/art/Test928.java
index e79a006..3f3935d 100644
--- a/test/ti-agent/common_load.h
+++ b/test/928-jni-table/src/art/Test928.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+package art;
-#include "jvmti.h"
+public class Test928 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test928.class);
+ doJNITableTest();
-namespace art {
+ System.out.println("Done");
+ }
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+ public static native void doJNITableTest();
+}
diff --git a/test/929-search/search.cc b/test/929-search/search.cc
index ad7a053..5516105 100644
--- a/test/929-search/search.cc
+++ b/test/929-search/search.cc
@@ -16,15 +16,16 @@
#include <inttypes.h>
+#include "android-base/logging.h"
+#include "android-base/macros.h"
#include "android-base/stringprintf.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedUtfChars.h"
+#include "scoped_utf_chars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test929Search {
@@ -36,7 +37,7 @@
return;
}
jvmtiError result = jvmti_env->AddToBootstrapClassLoaderSearch(utf.c_str());
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
extern "C" JNIEXPORT void JNICALL Java_Main_addToSystemClassLoader(
@@ -46,7 +47,7 @@
return;
}
jvmtiError result = jvmti_env->AddToSystemClassLoaderSearch(utf.c_str());
- JvmtiErrorToException(env, result);
+ JvmtiErrorToException(env, jvmti_env, result);
}
} // namespace Test929Search
diff --git a/test/929-search/src/Main.java b/test/929-search/src/Main.java
index bbeb081..4073c3f 100644
--- a/test/929-search/src/Main.java
+++ b/test/929-search/src/Main.java
@@ -18,6 +18,7 @@
public class Main {
public static void main(String[] args) throws Exception {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest();
}
diff --git a/test/929-search/src/art/Main.java b/test/929-search/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/929-search/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/930-hello-retransform/src/Main.java b/test/930-hello-retransform/src/Main.java
index 0063c82..da59c74 100644
--- a/test/930-hello-retransform/src/Main.java
+++ b/test/930-hello-retransform/src/Main.java
@@ -49,6 +49,7 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/930-hello-retransform/src/art/Main.java b/test/930-hello-retransform/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/930-hello-retransform/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/931-agent-thread/agent_thread.cc b/test/931-agent-thread/agent_thread.cc
index f8f9e48..577a97e 100644
--- a/test/931-agent-thread/agent_thread.cc
+++ b/test/931-agent-thread/agent_thread.cc
@@ -15,20 +15,18 @@
*/
#include <inttypes.h>
+#include <pthread.h>
#include <sched.h>
-#include "barrier.h"
-#include "base/logging.h"
-#include "base/macros.h"
+#include "android-base/logging.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "runtime.h"
-#include "ScopedLocalRef.h"
-#include "thread-inl.h"
-#include "well_known_classes.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test930AgentThread {
@@ -36,12 +34,12 @@
struct AgentData {
AgentData() : main_thread(nullptr),
jvmti_env(nullptr),
- b(2) {
+ priority(0) {
}
jthread main_thread;
jvmtiEnv* jvmti_env;
- Barrier b;
+ pthread_barrier_t b;
jint priority;
};
@@ -52,14 +50,21 @@
// This thread is not the main thread.
jthread this_thread;
jvmtiError this_thread_result = jenv->GetCurrentThread(&this_thread);
- CHECK(!JvmtiErrorToException(env, this_thread_result));
+ CheckJvmtiError(jenv, this_thread_result);
CHECK(!env->IsSameObject(this_thread, data->main_thread));
// The thread is a daemon.
jvmtiThreadInfo info;
jvmtiError info_result = jenv->GetThreadInfo(this_thread, &info);
- CHECK(!JvmtiErrorToException(env, info_result));
+ CheckJvmtiError(jenv, info_result);
CHECK(info.is_daemon);
+ CheckJvmtiError(jenv, jenv->Deallocate(reinterpret_cast<unsigned char*>(info.name)));
+ if (info.thread_group != nullptr) {
+ env->DeleteLocalRef(info.thread_group);
+ }
+ if (info.context_class_loader != nullptr) {
+ env->DeleteLocalRef(info.context_class_loader);
+ }
// The thread has the requested priority.
// TODO: Our thread priorities do not work on the host.
@@ -69,7 +74,7 @@
jint thread_count;
jthread* threads;
jvmtiError threads_result = jenv->GetAllThreads(&thread_count, &threads);
- CHECK(!JvmtiErrorToException(env, threads_result));
+ CheckJvmtiError(jenv, threads_result);
bool found = false;
for (jint i = 0; i != thread_count; ++i) {
if (env->IsSameObject(threads[i], this_thread)) {
@@ -80,29 +85,53 @@
CHECK(found);
// Done, let the main thread progress.
- data->b.Pass(Thread::Current());
+ int wait_result = pthread_barrier_wait(&data->b);
+ CHECK(wait_result == PTHREAD_BARRIER_SERIAL_THREAD || wait_result == 0);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_testAgentThread(
+extern "C" JNIEXPORT void JNICALL Java_art_Test931_testAgentThread(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
// Create a Thread object.
- ScopedLocalRef<jobject> thread_name(env,
- env->NewStringUTF("Agent Thread"));
+ ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF("Agent Thread"));
if (thread_name.get() == nullptr) {
return;
}
- ScopedLocalRef<jobject> thread(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
+ ScopedLocalRef<jclass> thread_klass(env, env->FindClass("java/lang/Thread"));
+ if (thread_klass.get() == nullptr) {
+ return;
+ }
+ ScopedLocalRef<jobject> thread(env, env->AllocObject(thread_klass.get()));
if (thread.get() == nullptr) {
return;
}
+ // Get a ThreadGroup from the current thread. We need a non-null one as we're gonna call a
+ // runtime-only constructor (so we can set priority and daemon state).
+ jvmtiThreadInfo cur_thread_info;
+ jvmtiError info_result = jvmti_env->GetThreadInfo(nullptr, &cur_thread_info);
+ if (JvmtiErrorToException(env, jvmti_env, info_result)) {
+ return;
+ }
+ CheckJvmtiError(jvmti_env,
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(cur_thread_info.name)));
+ ScopedLocalRef<jobject> thread_group(env, cur_thread_info.thread_group);
+ if (cur_thread_info.context_class_loader != nullptr) {
+ env->DeleteLocalRef(cur_thread_info.context_class_loader);
+ }
+
+ jmethodID initID = env->GetMethodID(thread_klass.get(),
+ "<init>",
+ "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
+ if (initID == nullptr) {
+ return;
+ }
env->CallNonvirtualVoidMethod(thread.get(),
- WellKnownClasses::java_lang_Thread,
- WellKnownClasses::java_lang_Thread_init,
- Runtime::Current()->GetMainThreadGroup(),
+ thread_klass.get(),
+ initID,
+ thread_group.get(),
thread_name.get(),
- kMinThreadPriority,
+ 0,
JNI_FALSE);
if (env->ExceptionCheck()) {
return;
@@ -110,7 +139,7 @@
jthread main_thread;
jvmtiError main_thread_result = jvmti_env->GetCurrentThread(&main_thread);
- if (JvmtiErrorToException(env, main_thread_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, main_thread_result)) {
return;
}
@@ -118,21 +147,23 @@
data.main_thread = env->NewGlobalRef(main_thread);
data.jvmti_env = jvmti_env;
data.priority = JVMTI_THREAD_MIN_PRIORITY;
+ CHECK_EQ(0, pthread_barrier_init(&data.b, nullptr, 2));
jvmtiError result = jvmti_env->RunAgentThread(thread.get(), AgentMain, &data, data.priority);
- if (JvmtiErrorToException(env, result)) {
+ if (JvmtiErrorToException(env, jvmti_env, result)) {
return;
}
- data.b.Wait(Thread::Current());
+ int wait_result = pthread_barrier_wait(&data.b);
+ CHECK(wait_result == PTHREAD_BARRIER_SERIAL_THREAD || wait_result == 0);
// Scheduling may mean that the agent thread is put to sleep. Wait until it's dead in an effort
// to not unload the plugin and crash.
for (;;) {
- NanoSleep(1000 * 1000);
+ sleep(1);
jint thread_state;
jvmtiError state_result = jvmti_env->GetThreadState(thread.get(), &thread_state);
- if (JvmtiErrorToException(env, state_result)) {
+ if (JvmtiErrorToException(env, jvmti_env, state_result)) {
return;
}
if (thread_state == 0 || // Was never alive.
@@ -142,9 +173,11 @@
}
// Yield and sleep a bit more, to give the plugin time to tear down the native thread structure.
sched_yield();
- NanoSleep(100 * 1000 * 1000);
+ sleep(1);
env->DeleteGlobalRef(data.main_thread);
+
+ pthread_barrier_destroy(&data.b);
}
} // namespace Test930AgentThread
diff --git a/test/931-agent-thread/src/Main.java b/test/931-agent-thread/src/Main.java
index a7639fb..59a9d9e 100644
--- a/test/931-agent-thread/src/Main.java
+++ b/test/931-agent-thread/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,14 +14,8 @@
* limitations under the License.
*/
-import java.util.Arrays;
-
public class Main {
public static void main(String[] args) throws Exception {
- testAgentThread();
-
- System.out.println("Done");
+ art.Test931.run();
}
-
- private static native void testAgentThread();
}
diff --git a/test/931-agent-thread/src/art/Main.java b/test/931-agent-thread/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/931-agent-thread/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/ti-agent/common_load.h b/test/931-agent-thread/src/art/Test931.java
similarity index 63%
copy from test/ti-agent/common_load.h
copy to test/931-agent-thread/src/art/Test931.java
index e79a006..bc096a7 100644
--- a/test/ti-agent/common_load.h
+++ b/test/931-agent-thread/src/art/Test931.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,17 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+package art;
-#include "jvmti.h"
+import java.util.Arrays;
-namespace art {
+public class Test931 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test931.class);
+ testAgentThread();
-extern jvmtiEnv* jvmti_env;
+ System.out.println("Done");
+ }
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+ private static native void testAgentThread();
+}
diff --git a/test/932-transform-saves/src/Main.java b/test/932-transform-saves/src/Main.java
index d960322..14e5da0 100644
--- a/test/932-transform-saves/src/Main.java
+++ b/test/932-transform-saves/src/Main.java
@@ -79,6 +79,7 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/932-transform-saves/src/Transform.java b/test/932-transform-saves/src/Transform.java
index 8e8af35..83f7aa4 100644
--- a/test/932-transform-saves/src/Transform.java
+++ b/test/932-transform-saves/src/Transform.java
@@ -23,6 +23,6 @@
// of the two different strings were the same).
// We know the string ids will be different because lexicographically:
// "Goodbye" < "LTransform;" < "hello".
- System.out.println("hello");
+ System.out.println("foobar");
}
}
diff --git a/test/932-transform-saves/src/art/Main.java b/test/932-transform-saves/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/932-transform-saves/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/933-misc-events/misc_events.cc b/test/933-misc-events/misc_events.cc
index 7043350..27dab8b 100644
--- a/test/933-misc-events/misc_events.cc
+++ b/test/933-misc-events/misc_events.cc
@@ -18,13 +18,14 @@
#include <signal.h>
#include <sys/types.h>
-#include "base/logging.h"
-#include "base/macros.h"
+#include "android-base/logging.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test933MiscEvents {
@@ -36,20 +37,20 @@
saw_dump_request.store(true, std::memory_order::memory_order_relaxed);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_testSigQuit(
+extern "C" JNIEXPORT void JNICALL Java_art_Test933_testSigQuit(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
callbacks.DataDumpRequest = DumpRequestCallback;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_DATA_DUMP_REQUEST,
nullptr);
- if (JvmtiErrorToException(env, ret)) {
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
return;
}
@@ -65,7 +66,7 @@
}
ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr);
- JvmtiErrorToException(env, ret);
+ JvmtiErrorToException(env, jvmti_env, ret);
}
} // namespace Test933MiscEvents
diff --git a/test/933-misc-events/src/Main.java b/test/933-misc-events/src/Main.java
index 89801a3..c107ba0 100644
--- a/test/933-misc-events/src/Main.java
+++ b/test/933-misc-events/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,10 +16,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- testSigQuit();
-
- System.out.println("Done");
+ art.Test933.run();
}
-
- private static native void testSigQuit();
}
diff --git a/test/933-misc-events/src/art/Main.java b/test/933-misc-events/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/933-misc-events/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/ti-agent/common_load.h b/test/933-misc-events/src/art/Test933.java
similarity index 66%
copy from test/ti-agent/common_load.h
copy to test/933-misc-events/src/art/Test933.java
index e79a006..afebbf8 100644
--- a/test/ti-agent/common_load.h
+++ b/test/933-misc-events/src/art/Test933.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+package art;
-#include "jvmti.h"
+public class Test933 {
+ public static void run() throws Exception {
+ Main.bindAgentJNIForClass(Test933.class);
+ testSigQuit();
-namespace art {
+ System.out.println("Done");
+ }
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+ private static native void testSigQuit();
+}
diff --git a/test/934-load-transform/src/Main.java b/test/934-load-transform/src/Main.java
index de312b0..606ce78 100644
--- a/test/934-load-transform/src/Main.java
+++ b/test/934-load-transform/src/Main.java
@@ -66,6 +66,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
// Don't pop transformations. Make sure that even if 2 threads race to define the class both
// will get the same result.
setPopRetransformations(false);
diff --git a/test/934-load-transform/src/art/Main.java b/test/934-load-transform/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/934-load-transform/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/935-non-retransformable/src/Main.java b/test/935-non-retransformable/src/Main.java
index 82ba197..df92561 100644
--- a/test/935-non-retransformable/src/Main.java
+++ b/test/935-non-retransformable/src/Main.java
@@ -74,6 +74,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
setPopRetransformations(false);
addCommonTransformationResult("Transform", CLASS_BYTES, DEX_BYTES);
enableCommonRetransformation(true);
diff --git a/test/935-non-retransformable/src/art/Main.java b/test/935-non-retransformable/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/935-non-retransformable/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/936-search-onload/search_onload.cc b/test/936-search-onload/search_onload.cc
index 3b19ca5..b2ef056 100644
--- a/test/936-search-onload/search_onload.cc
+++ b/test/936-search-onload/search_onload.cc
@@ -23,10 +23,11 @@
#include "base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedUtfChars.h"
+#include "scoped_utf_chars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test936SearchOnload {
diff --git a/test/936-search-onload/src/Main.java b/test/936-search-onload/src/Main.java
index 2e7a871..8d40753 100644
--- a/test/936-search-onload/src/Main.java
+++ b/test/936-search-onload/src/Main.java
@@ -18,6 +18,7 @@
public class Main {
public static void main(String[] args) throws Exception {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest();
}
diff --git a/test/936-search-onload/src/art/Main.java b/test/936-search-onload/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/936-search-onload/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/937-hello-retransform-package/src/Main.java b/test/937-hello-retransform-package/src/Main.java
index 4b9271b..866f75d 100644
--- a/test/937-hello-retransform-package/src/Main.java
+++ b/test/937-hello-retransform-package/src/Main.java
@@ -53,6 +53,7 @@
"YgEAAAMgAAACAAAAGwIAAAAgAAABAAAAJgIAAAAQAAABAAAANAIAAA==");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/937-hello-retransform-package/src/art/Main.java b/test/937-hello-retransform-package/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/937-hello-retransform-package/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/938-load-transform-bcp/src/Main.java b/test/938-load-transform-bcp/src/Main.java
index 5484899..21b841f 100644
--- a/test/938-load-transform-bcp/src/Main.java
+++ b/test/938-load-transform-bcp/src/Main.java
@@ -96,6 +96,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
setPopRetransformations(false);
addCommonTransformationResult("java/util/OptionalLong", CLASS_BYTES, DEX_BYTES);
enableCommonRetransformation(true);
diff --git a/test/938-load-transform-bcp/src/art/Main.java b/test/938-load-transform-bcp/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/938-load-transform-bcp/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/939-hello-transformation-bcp/src/Main.java b/test/939-hello-transformation-bcp/src/Main.java
index bdf7f59..0e1f845 100644
--- a/test/939-hello-transformation-bcp/src/Main.java
+++ b/test/939-hello-transformation-bcp/src/Main.java
@@ -110,6 +110,7 @@
"AABHBgAABCAAAAIAAACVBgAAACAAAAEAAACtBgAAABAAAAEAAAD4BgAA");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
// OptionalLong is a class that is unlikely to be used by the time this test starts and is not
// likely to be changed in any meaningful way in the future.
OptionalLong ol = OptionalLong.of(0xDEADBEEF);
diff --git a/test/939-hello-transformation-bcp/src/art/Main.java b/test/939-hello-transformation-bcp/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/939-hello-transformation-bcp/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/940-recursive-obsolete/src/Main.java b/test/940-recursive-obsolete/src/Main.java
index 3766906..724f82d 100644
--- a/test/940-recursive-obsolete/src/Main.java
+++ b/test/940-recursive-obsolete/src/Main.java
@@ -70,6 +70,7 @@
"1gMAAAAQAAABAAAA5AMAAA==");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/940-recursive-obsolete/src/art/Main.java b/test/940-recursive-obsolete/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/940-recursive-obsolete/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/941-recurive-obsolete-jit/src/Main.java b/test/941-recurive-obsolete-jit/src/Main.java
index f6d6416..d88bb9b 100644
--- a/test/941-recurive-obsolete-jit/src/Main.java
+++ b/test/941-recurive-obsolete-jit/src/Main.java
@@ -99,6 +99,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/941-recurive-obsolete-jit/src/art/Main.java b/test/941-recurive-obsolete-jit/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/942-private-recursive/src/Main.java b/test/942-private-recursive/src/Main.java
index 8cbab7b..cac75c0 100644
--- a/test/942-private-recursive/src/Main.java
+++ b/test/942-private-recursive/src/Main.java
@@ -75,6 +75,7 @@
"IAAAAQAAABAEAAAAEAAAAQAAACQEAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/942-private-recursive/src/art/Main.java b/test/942-private-recursive/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/942-private-recursive/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/943-private-recursive-jit/src/Main.java b/test/943-private-recursive-jit/src/Main.java
index 8fa534d..f380c06 100644
--- a/test/943-private-recursive-jit/src/Main.java
+++ b/test/943-private-recursive-jit/src/Main.java
@@ -109,6 +109,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/943-private-recursive-jit/src/art/Main.java b/test/943-private-recursive-jit/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/943-private-recursive-jit/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/944-transform-classloaders/classloader.cc b/test/944-transform-classloaders/classloader.cc
index 7cb3c08..698e023 100644
--- a/test/944-transform-classloaders/classloader.cc
+++ b/test/944-transform-classloaders/classloader.cc
@@ -14,19 +14,18 @@
* limitations under the License.
*/
-#include "base/macros.h"
+#include "android-base/macros.h"
#include "jni.h"
#include "jvmti.h"
#include "mirror/class-inl.h"
-#include "ScopedLocalRef.h"
+#include "scoped_local_ref.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "test_env.h"
namespace art {
namespace Test944TransformClassloaders {
-
extern "C" JNIEXPORT jlong JNICALL Java_Main_getDexFilePointer(JNIEnv* env, jclass, jclass klass) {
if (Runtime::Current() == nullptr) {
env->ThrowNew(env->FindClass("java/lang/Exception"),
diff --git a/test/944-transform-classloaders/src/Main.java b/test/944-transform-classloaders/src/Main.java
index 4911e00..b558660 100644
--- a/test/944-transform-classloaders/src/Main.java
+++ b/test/944-transform-classloaders/src/Main.java
@@ -203,6 +203,8 @@
}
private static void doTest() throws Exception {
+ art.Main.bindAgentJNIForClass(Main.class);
+
Transform t = new Transform();
Transform2 t2 = new Transform2();
diff --git a/test/944-transform-classloaders/src/art/Main.java b/test/944-transform-classloaders/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/944-transform-classloaders/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/945-obsolete-native/obsolete_native.cc b/test/945-obsolete-native/obsolete_native.cc
index 442836b..ee653a4 100644
--- a/test/945-obsolete-native/obsolete_native.cc
+++ b/test/945-obsolete-native/obsolete_native.cc
@@ -25,9 +25,11 @@
#include "base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "test_env.h"
namespace art {
namespace Test945ObsoleteNative {
diff --git a/test/945-obsolete-native/src/Main.java b/test/945-obsolete-native/src/Main.java
index 5e2154e..a7901cd 100644
--- a/test/945-obsolete-native/src/Main.java
+++ b/test/945-obsolete-native/src/Main.java
@@ -55,6 +55,7 @@
"AA==");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
bindTest945ObsoleteNative();
doTest(new Transform());
}
diff --git a/test/945-obsolete-native/src/art/Main.java b/test/945-obsolete-native/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/945-obsolete-native/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/946-obsolete-throw/expected.txt b/test/946-obsolete-throw/expected.txt
index 91dd7df..71d5182 100644
--- a/test/946-obsolete-throw/expected.txt
+++ b/test/946-obsolete-throw/expected.txt
@@ -5,10 +5,10 @@
transforming calling function
Received error : java.lang.Error: Throwing exception into an obsolete method!
java.lang.Error: Throwing exception into an obsolete method!
- at Main$DoRedefinitionClass.run(Main.java:64)
+ at Main$DoRedefinitionClass.run(Main.java:65)
at Transform.sayHi(Transform.java:27)
- at Main.doTest(Main.java:71)
- at Main.main(Main.java:56)
+ at Main.doTest(Main.java:72)
+ at Main.main(Main.java:57)
Hello - Transformed
Not doing anything here
Goodbye - Transformed
diff --git a/test/946-obsolete-throw/src/Main.java b/test/946-obsolete-throw/src/Main.java
index 21fe972..077ad72 100644
--- a/test/946-obsolete-throw/src/Main.java
+++ b/test/946-obsolete-throw/src/Main.java
@@ -53,6 +53,7 @@
"AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/946-obsolete-throw/src/art/Main.java b/test/946-obsolete-throw/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/946-obsolete-throw/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/947-reflect-method/src/Main.java b/test/947-reflect-method/src/Main.java
index a229dd4..da746ac 100644
--- a/test/947-reflect-method/src/Main.java
+++ b/test/947-reflect-method/src/Main.java
@@ -51,6 +51,7 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new Transform());
}
diff --git a/test/947-reflect-method/src/art/Main.java b/test/947-reflect-method/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/947-reflect-method/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/948-change-annotations/src/Main.java b/test/948-change-annotations/src/Main.java
index 30bfbf9..a290396 100644
--- a/test/948-change-annotations/src/Main.java
+++ b/test/948-change-annotations/src/Main.java
@@ -55,6 +55,7 @@
"AAQgAAACAAAAmwIAAAAgAAABAAAApwIAAAAQAAABAAAAuAIAAA==");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(new RemoveAnnotationsTest());
doTest(new AddAnnotationsTest());
doTest(new ChangeAnnotationValues());
diff --git a/test/948-change-annotations/src/art/Main.java b/test/948-change-annotations/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/948-change-annotations/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/949-in-memory-transform/src/Main.java b/test/949-in-memory-transform/src/Main.java
index 2ffabf5..1a6b224 100644
--- a/test/949-in-memory-transform/src/Main.java
+++ b/test/949-in-memory-transform/src/Main.java
@@ -83,6 +83,7 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) throws Exception {
+ art.Main.bindAgentJNIForClass(Main.class);
ClassLoader loader;
try {
// Art uses this classloader to do in-memory dex files. There is no support for defineClass
diff --git a/test/949-in-memory-transform/src/art/Main.java b/test/949-in-memory-transform/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/949-in-memory-transform/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/950-redefine-intrinsic/src/Main.java b/test/950-redefine-intrinsic/src/Main.java
index 30cd3ab..2578d6e 100644
--- a/test/950-redefine-intrinsic/src/Main.java
+++ b/test/950-redefine-intrinsic/src/Main.java
@@ -425,6 +425,7 @@
}
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest(10000);
}
diff --git a/test/950-redefine-intrinsic/src/art/Main.java b/test/950-redefine-intrinsic/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/950-redefine-intrinsic/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/951-threaded-obsolete/src/Main.java b/test/951-threaded-obsolete/src/Main.java
index 98e7236..a82090e 100644
--- a/test/951-threaded-obsolete/src/Main.java
+++ b/test/951-threaded-obsolete/src/Main.java
@@ -54,6 +54,7 @@
"AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
// Semaphores to let each thread know where the other is. We could use barriers but semaphores
// mean we don't need to have the worker thread be waiting around.
final Semaphore sem_redefine_start = new Semaphore(0);
diff --git a/test/951-threaded-obsolete/src/art/Main.java b/test/951-threaded-obsolete/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/951-threaded-obsolete/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/980-redefine-object/redefine_object.cc b/test/980-redefine-object/redefine_object.cc
index daae087..1faf1a1 100644
--- a/test/980-redefine-object/redefine_object.cc
+++ b/test/980-redefine-object/redefine_object.cc
@@ -22,10 +22,12 @@
#include "base/macros.h"
#include "jni.h"
#include "jvmti.h"
-#include "ScopedUtfChars.h"
+#include "scoped_utf_chars.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
+// Test infrastructure
+#include "jni_binder.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
namespace art {
namespace Test980RedefineObjects {
@@ -39,9 +41,11 @@
JNIEnv* env, jclass TestWatcherClass ATTRIBUTE_UNUSED, jobject constructed) {
char* sig = nullptr;
char* generic_sig = nullptr;
- if (JvmtiErrorToException(env, jvmti_env->GetClassSignature(env->GetObjectClass(constructed),
- &sig,
- &generic_sig))) {
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetClassSignature(env->GetObjectClass(constructed),
+ &sig,
+ &generic_sig))) {
// Exception.
return;
}
diff --git a/test/980-redefine-object/src/Main.java b/test/980-redefine-object/src/Main.java
index 348951c..a50215e 100644
--- a/test/980-redefine-object/src/Main.java
+++ b/test/980-redefine-object/src/Main.java
@@ -288,6 +288,7 @@
System.getenv("DEX_LOCATION") + "/980-redefine-object-ex.jar";
public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
doTest();
}
diff --git a/test/980-redefine-object/src/art/Main.java b/test/980-redefine-object/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/980-redefine-object/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/981-dedup-original-dex/expected.txt b/test/981-dedup-original-dex/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/981-dedup-original-dex/expected.txt
diff --git a/test/981-dedup-original-dex/info.txt b/test/981-dedup-original-dex/info.txt
new file mode 100644
index 0000000..62696e0
--- /dev/null
+++ b/test/981-dedup-original-dex/info.txt
@@ -0,0 +1,4 @@
+Tests basic functions in the jvmti plugin.
+
+This checks that we do not needlessly duplicate the contents of retransformed
+classes original dex files.
diff --git a/test/981-dedup-original-dex/run b/test/981-dedup-original-dex/run
new file mode 100755
index 0000000..e92b873
--- /dev/null
+++ b/test/981-dedup-original-dex/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/981-dedup-original-dex/src/Main.java b/test/981-dedup-original-dex/src/Main.java
new file mode 100644
index 0000000..288f7ce
--- /dev/null
+++ b/test/981-dedup-original-dex/src/Main.java
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.util.Base64;
+import java.nio.ByteBuffer;
+
+import dalvik.system.ClassExt;
+import dalvik.system.InMemoryDexClassLoader;
+
+public class Main {
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES_1 = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB" +
+ "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA" +
+ "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50" +
+ "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh" +
+ "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291" +
+ "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA" +
+ "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA" +
+ "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA" +
+ "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform2 {
+ * public void sayHi() {
+ * System.out.println("Goodbye2");
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES_2 = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAjXDED2iflQ3NXbPtBRVjQVMqoDU9nDz/QAgAAcAAAAHhWNBIAAAAAAAAAADACAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACwAQAAIAEAAGIB" +
+ "AABqAQAAdAEAAIIBAACZAQAArQEAAMEBAADVAQAA5gEAAOkBAADtAQAAAQIAAAYCAAAPAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAACECAAAA" +
+ "AAAAAQABAAEAAAAWAgAABAAAAHAQAwAAAA4AAwABAAIAAAAbAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAIR29vZGJ5ZTIADExUcmFuc2Zvcm0yOwAVTGphdmEvaW8vUHJp" +
+ "bnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEv" +
+ "bGFuZy9TeXN0ZW07AA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTQuMzAA" +
+ "A291dAAHcHJpbnRsbgAFc2F5SGkAAQAHDgADAAcOhwAAAAEBAICABKACAQG4AgANAAAAAAAAAAEA" +
+ "AAAAAAAAAQAAAA4AAABwAAAAAgAAAAYAAACoAAAAAwAAAAIAAADAAAAABAAAAAEAAADYAAAABQAA" +
+ "AAQAAADgAAAABgAAAAEAAAAAAQAAASAAAAIAAAAgAQAAARAAAAEAAABcAQAAAiAAAA4AAABiAQAA" +
+ "AyAAAAIAAAAWAgAAACAAAAEAAAAhAgAAABAAAAEAAAAwAgAA");
+
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform3 {
+ * public void sayHi() {
+ * System.out.println("hello3");
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES_3_INITIAL = Base64.getDecoder().decode(
+ "ZGV4CjAzNQC2W2fBsAeLNAwWYlG8FVigzfsV7nBWITzQAgAAcAAAAHhWNBIAAAAAAAAAADACAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACwAQAAIAEAAGIB" +
+ "AABqAQAAeAEAAI8BAACjAQAAtwEAAMsBAADcAQAA3wEAAOMBAAD3AQAA/wEAAAQCAAANAgAAAQAA" +
+ "AAIAAAADAAAABAAAAAUAAAAHAAAABwAAAAUAAAAAAAAACAAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAB8CAAAA" +
+ "AAAAAQABAAEAAAAUAgAABAAAAHAQAwAAAA4AAwABAAIAAAAZAgAACQAAAGIAAAAbAQoAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAMTFRyYW5zZm9ybTM7ABVMamF2YS9pby9QcmludFN0cmVhbTsA" +
+ "EkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5c3Rl" +
+ "bTsAD1RyYW5zZm9ybTMuamF2YQABVgACVkwAEmVtaXR0ZXI6IGphY2stNC4zMAAGaGVsbG8zAANv" +
+ "dXQAB3ByaW50bG4ABXNheUhpAAIABw4ABAAHDocAAAABAQCAgASgAgEBuAIAAAANAAAAAAAAAAEA" +
+ "AAAAAAAAAQAAAA4AAABwAAAAAgAAAAYAAACoAAAAAwAAAAIAAADAAAAABAAAAAEAAADYAAAABQAA" +
+ "AAQAAADgAAAABgAAAAEAAAAAAQAAASAAAAIAAAAgAQAAARAAAAEAAABcAQAAAiAAAA4AAABiAQAA" +
+ "AyAAAAIAAAAUAgAAACAAAAEAAAAfAgAAABAAAAEAAAAwAgAA");
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform3 {
+ * public void sayHi() {
+ * System.out.println("Goodbye3");
+ * }
+ * }
+ */
+ private static final byte[] DEX_BYTES_3_FINAL = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBAXE5GthgMydaFBuinf+ZBfXcBYIw2UlXQAgAAcAAAAHhWNBIAAAAAAAAAADACAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACwAQAAIAEAAGIB" +
+ "AABqAQAAdAEAAIIBAACZAQAArQEAAMEBAADVAQAA5gEAAOkBAADtAQAAAQIAAAYCAAAPAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAACECAAAA" +
+ "AAAAAQABAAEAAAAWAgAABAAAAHAQAwAAAA4AAwABAAIAAAAbAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAIR29vZGJ5ZTMADExUcmFuc2Zvcm0zOwAVTGphdmEvaW8vUHJp" +
+ "bnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEv" +
+ "bGFuZy9TeXN0ZW07AA9UcmFuc2Zvcm0zLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTQuMzAA" +
+ "A291dAAHcHJpbnRsbgAFc2F5SGkAAgAHDgAEAAcOhwAAAAEBAICABKACAQG4AgANAAAAAAAAAAEA" +
+ "AAAAAAAAAQAAAA4AAABwAAAAAgAAAAYAAACoAAAAAwAAAAIAAADAAAAABAAAAAEAAADYAAAABQAA" +
+ "AAQAAADgAAAABgAAAAEAAAAAAQAAASAAAAIAAAAgAQAAARAAAAEAAABcAQAAAiAAAA4AAABiAQAA" +
+ "AyAAAAIAAAAWAgAAACAAAAEAAAAhAgAAABAAAAEAAAAwAgAA");
+
+ public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
+ try {
+ doTest();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ private static void assertSame(Object a, Object b) throws Exception {
+ if (a != b) {
+ throw new AssertionError("'" + (a != null ? a.toString() : "null") + "' is not the same as " +
+ "'" + (b != null ? b.toString() : "null") + "'");
+ }
+ }
+
+ private static Object getObjectField(Object o, String name) throws Exception {
+ return getObjectField(o, o.getClass(), name);
+ }
+
+ private static Object getObjectField(Object o, Class<?> type, String name) throws Exception {
+ Field f = type.getDeclaredField(name);
+ f.setAccessible(true);
+ return f.get(o);
+ }
+
+ private static Object getOriginalDexFile(Class<?> k) throws Exception {
+ ClassExt ext_data_object = (ClassExt) getObjectField(k, "extData");
+ if (ext_data_object == null) {
+ return null;
+ }
+
+ return getObjectField(ext_data_object, "originalDexFile");
+ }
+
+ public static void doTest() throws Exception {
+ // Make sure both of these are loaded prior to transformations being added so they have the same
+ // original dex files.
+ Transform t1 = new Transform();
+ Transform2 t2 = new Transform2();
+
+ assertSame(null, getOriginalDexFile(t1.getClass()));
+ assertSame(null, getOriginalDexFile(t2.getClass()));
+ assertSame(null, getOriginalDexFile(Main.class));
+
+ addCommonTransformationResult("Transform", new byte[0], DEX_BYTES_1);
+ addCommonTransformationResult("Transform2", new byte[0], DEX_BYTES_2);
+ enableCommonRetransformation(true);
+ doCommonClassRetransformation(Transform.class, Transform2.class);
+
+ assertSame(getOriginalDexFile(t1.getClass()), getOriginalDexFile(t2.getClass()));
+ assertSame(null, getOriginalDexFile(Main.class));
+ // Make sure that the original dex file is a DexCache object.
+ assertSame(getOriginalDexFile(t1.getClass()).getClass(), Class.forName("java.lang.DexCache"));
+
+ // Check that we end up with a byte[] if we do a direct RedefineClasses
+ enableCommonRetransformation(false);
+ doCommonClassRedefinition(Transform.class, new byte[0], DEX_BYTES_1);
+ assertSame((new byte[0]).getClass(), getOriginalDexFile(t1.getClass()).getClass());
+
+ // Check we don't have anything if we don't have any originalDexFile if the onload
+ // transformation doesn't do anything.
+ enableCommonRetransformation(true);
+ Class<?> transform3Class = new InMemoryDexClassLoader(
+ ByteBuffer.wrap(DEX_BYTES_3_INITIAL), Main.class.getClassLoader()).loadClass("Transform3");
+ assertSame(null, getOriginalDexFile(transform3Class));
+
+ // Check that we end up with a java.lang.Long pointer if we do an 'on-load' redefinition.
+ addCommonTransformationResult("Transform3", new byte[0], DEX_BYTES_3_FINAL);
+ enableCommonRetransformation(true);
+ Class<?> transform3ClassTransformed = new InMemoryDexClassLoader(
+ ByteBuffer.wrap(DEX_BYTES_3_INITIAL), Main.class.getClassLoader()).loadClass("Transform3");
+ assertSame(Long.class, getOriginalDexFile(transform3ClassTransformed).getClass());
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRetransformation(Class<?>... target);
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] class_file,
+ byte[] dex_file);
+ private static native void enableCommonRetransformation(boolean enable);
+ private static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/ti-agent/common_load.h b/test/981-dedup-original-dex/src/Transform.java
similarity index 73%
copy from test/ti-agent/common_load.h
copy to test/981-dedup-original-dex/src/Transform.java
index e79a006..3c97907 100644
--- a/test/ti-agent/common_load.h
+++ b/test/981-dedup-original-dex/src/Transform.java
@@ -14,15 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
-
-#include "jvmti.h"
-
-namespace art {
-
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+class Transform {
+ public void sayHi() {
+ System.out.println("hello");
+ }
+}
diff --git a/test/ti-agent/common_load.h b/test/981-dedup-original-dex/src/Transform2.java
similarity index 73%
copy from test/ti-agent/common_load.h
copy to test/981-dedup-original-dex/src/Transform2.java
index e79a006..eb22842 100644
--- a/test/ti-agent/common_load.h
+++ b/test/981-dedup-original-dex/src/Transform2.java
@@ -14,15 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
-
-#include "jvmti.h"
-
-namespace art {
-
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+class Transform2 {
+ public void sayHi() {
+ System.out.println("hello2");
+ }
+}
diff --git a/test/981-dedup-original-dex/src/art/Main.java b/test/981-dedup-original-dex/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/981-dedup-original-dex/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/982-ok-no-retransform/expected.txt b/test/982-ok-no-retransform/expected.txt
new file mode 100644
index 0000000..317e967
--- /dev/null
+++ b/test/982-ok-no-retransform/expected.txt
@@ -0,0 +1,2 @@
+hello
+hello
diff --git a/test/982-ok-no-retransform/info.txt b/test/982-ok-no-retransform/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/982-ok-no-retransform/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/982-ok-no-retransform/run b/test/982-ok-no-retransform/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/982-ok-no-retransform/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/982-ok-no-retransform/src/Main.java b/test/982-ok-no-retransform/src/Main.java
new file mode 100644
index 0000000..33e50d7
--- /dev/null
+++ b/test/982-ok-no-retransform/src/Main.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+public class Main {
+
+ public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ enableCommonRetransformation(true);
+ doCommonClassRetransformation(Transform.class);
+ t.sayHi();
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRetransformation(Class<?>... target);
+ private static native void enableCommonRetransformation(boolean enable);
+}
diff --git a/test/982-ok-no-retransform/src/Transform.java b/test/982-ok-no-retransform/src/Transform.java
new file mode 100644
index 0000000..8e8af35
--- /dev/null
+++ b/test/982-ok-no-retransform/src/Transform.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Goodbye" < "LTransform;" < "hello".
+ System.out.println("hello");
+ }
+}
diff --git a/test/982-ok-no-retransform/src/art/Main.java b/test/982-ok-no-retransform/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/982-ok-no-retransform/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/983-source-transform-verify/expected.txt b/test/983-source-transform-verify/expected.txt
new file mode 100644
index 0000000..0a94212
--- /dev/null
+++ b/test/983-source-transform-verify/expected.txt
@@ -0,0 +1,2 @@
+Dex file hook for Transform
+Dex file hook for java/lang/Object
diff --git a/test/983-source-transform-verify/info.txt b/test/983-source-transform-verify/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/983-source-transform-verify/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/983-source-transform-verify/run b/test/983-source-transform-verify/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/983-source-transform-verify/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
new file mode 100644
index 0000000..3ef3c7c
--- /dev/null
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <iostream>
+#include <vector>
+
+#include "android-base/stringprintf.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "bytecode_utils.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "jit/jit.h"
+#include "jni.h"
+#include "native_stack_dump.h"
+#include "jvmti.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test983SourceTransformVerify {
+
+constexpr bool kSkipInitialLoad = true;
+
+// The hook we are using.
+void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
+ JNIEnv* jni_env ATTRIBUTE_UNUSED,
+ jclass class_being_redefined,
+ jobject loader ATTRIBUTE_UNUSED,
+ const char* name,
+ jobject protection_domain ATTRIBUTE_UNUSED,
+ jint class_data_len,
+ const unsigned char* class_data,
+ jint* new_class_data_len ATTRIBUTE_UNUSED,
+ unsigned char** new_class_data ATTRIBUTE_UNUSED) {
+ if (kSkipInitialLoad && class_being_redefined == nullptr) {
+ // Something got loaded concurrently. Just ignore it for now.
+ return;
+ }
+ std::cout << "Dex file hook for " << name << std::endl;
+ if (IsJVM()) {
+ return;
+ }
+ std::string error;
+ std::unique_ptr<const DexFile> dex(DexFile::Open(class_data,
+ class_data_len,
+ "fake_location.dex",
+ /*location_checksum*/ 0,
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error));
+ if (dex.get() == nullptr) {
+ std::cout << "Failed to verify dex file for " << name << " because " << error << std::endl;
+ return;
+ }
+ for (uint32_t i = 0; i < dex->NumClassDefs(); i++) {
+ const DexFile::ClassDef& def = dex->GetClassDef(i);
+ const uint8_t* data_item = dex->GetClassData(def);
+ if (data_item == nullptr) {
+ continue;
+ }
+ for (ClassDataItemIterator it(*dex, data_item); it.HasNext(); it.Next()) {
+ if (!it.IsAtMethod() || it.GetMethodCodeItem() == nullptr) {
+ continue;
+ }
+ for (CodeItemIterator code_it(*it.GetMethodCodeItem()); !code_it.Done(); code_it.Advance()) {
+ const Instruction& inst = code_it.CurrentInstruction();
+ int forbiden_flags = (Instruction::kVerifyError | Instruction::kVerifyRuntimeOnly);
+ if (inst.Opcode() == Instruction::RETURN_VOID_NO_BARRIER ||
+ (inst.GetVerifyExtraFlags() & forbiden_flags) != 0) {
+ std::cout << "Unexpected instruction found in " << dex->PrettyMethod(it.GetMemberIndex())
+ << " [Dex PC: 0x" << std::hex << code_it.CurrentDexPc() << std::dec << "] : "
+ << inst.DumpString(dex.get()) << std::endl;
+ continue;
+ }
+ }
+ }
+ }
+}
+
+// Get all capabilities except those related to retransformation.
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ SetAllCapabilities(jvmti_env);
+ jvmtiEventCallbacks cb;
+ memset(&cb, 0, sizeof(cb));
+ cb.ClassFileLoadHook = CheckDexFileHook;
+ if (jvmti_env->SetEventCallbacks(&cb, sizeof(cb)) != JVMTI_ERROR_NONE) {
+ printf("Unable to set class file load hook cb!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Test983SourceTransformVerify
+} // namespace art
diff --git a/test/983-source-transform-verify/source_transform.h b/test/983-source-transform-verify/source_transform.h
new file mode 100644
index 0000000..db9415a
--- /dev/null
+++ b/test/983-source-transform-verify/source_transform.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_983_SOURCE_TRANSFORM_VERIFY_SOURCE_TRANSFORM_H_
+#define ART_TEST_983_SOURCE_TRANSFORM_VERIFY_SOURCE_TRANSFORM_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test983SourceTransformVerify {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test983SourceTransformVerify
+} // namespace art
+
+#endif // ART_TEST_983_SOURCE_TRANSFORM_VERIFY_SOURCE_TRANSFORM_H_
diff --git a/test/983-source-transform-verify/src/Main.java b/test/983-source-transform-verify/src/Main.java
new file mode 100644
index 0000000..ad081a2
--- /dev/null
+++ b/test/983-source-transform-verify/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+public class Main {
+
+ public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
+ doTest();
+ }
+
+ public static void doTest() {
+ Transform abc = new Transform();
+ enableCommonRetransformation(true);
+ doCommonClassRetransformation(Transform.class);
+ doCommonClassRetransformation(Object.class);
+ enableCommonRetransformation(false);
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRetransformation(Class<?>... target);
+ private static native void enableCommonRetransformation(boolean enable);
+}
diff --git a/test/983-source-transform-verify/src/Transform.java b/test/983-source-transform-verify/src/Transform.java
new file mode 100644
index 0000000..8e8af35
--- /dev/null
+++ b/test/983-source-transform-verify/src/Transform.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Goodbye" < "LTransform;" < "hello".
+ System.out.println("hello");
+ }
+}
diff --git a/test/983-source-transform-verify/src/art/Main.java b/test/983-source-transform-verify/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/983-source-transform-verify/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/984-obsolete-invoke/expected.txt b/test/984-obsolete-invoke/expected.txt
new file mode 100644
index 0000000..8052c46
--- /dev/null
+++ b/test/984-obsolete-invoke/expected.txt
@@ -0,0 +1,10 @@
+hello
+transforming calling function
+Retrieving obsolete method from current stack
+goodbye
+Invoking redefined version of method.
+Hello - Transformed
+Not doing anything here
+Goodbye - Transformed
+invoking obsolete method
+Caught expected error from attempting to invoke an obsolete method.
diff --git a/test/984-obsolete-invoke/info.txt b/test/984-obsolete-invoke/info.txt
new file mode 100644
index 0000000..48e0de0
--- /dev/null
+++ b/test/984-obsolete-invoke/info.txt
@@ -0,0 +1,4 @@
+Tests basic obsolete method support
+
+Tests that a specific method of potentially executing obsolete method code does
+not work.
diff --git a/test/984-obsolete-invoke/obsolete_invoke.cc b/test/984-obsolete-invoke/obsolete_invoke.cc
new file mode 100644
index 0000000..27e36ba
--- /dev/null
+++ b/test/984-obsolete-invoke/obsolete_invoke.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "android-base/macros.h"
+#include "jni.h"
+#include "jvmti.h"
+#include "mirror/class-inl.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "test_env.h"
+
+#include "jvmti_helper.h"
+
+namespace art {
+namespace Test984ObsoleteInvoke {
+
+static constexpr size_t kNumFrames = 30;
+
+extern "C" JNIEXPORT jobject JNICALL Java_Main_getFirstObsoleteMethod984(JNIEnv* env, jclass) {
+ jthread cur;
+ jint frame_count;
+ jvmtiFrameInfo frames[kNumFrames];
+ // jint cur_start = 0;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetCurrentThread(&cur))) {
+ // ERROR
+ return nullptr;
+ }
+ if (JvmtiErrorToException(env, jvmti_env,
+ jvmti_env->GetStackTrace(cur,
+ 0,
+ kNumFrames,
+ frames,
+ &frame_count))) {
+ // ERROR
+ return nullptr;
+ }
+ for (jint i = 0; i < frame_count; i++) {
+ jmethodID method = frames[i].method;
+ jboolean is_obsolete = false;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->IsMethodObsolete(method, &is_obsolete))) {
+ // ERROR
+ return nullptr;
+ }
+ if (is_obsolete) {
+ return env->ToReflectedMethod(env->FindClass("java/lang/reflect/Method"),
+ method,
+ JNI_TRUE);
+ }
+ }
+ ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+ env->ThrowNew(rt_exception.get(), "Unable to find obsolete method!");
+ return nullptr;
+}
+
+} // namespace Test984ObsoleteInvoke
+} // namespace art
diff --git a/test/984-obsolete-invoke/run b/test/984-obsolete-invoke/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/984-obsolete-invoke/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/984-obsolete-invoke/src/Main.java b/test/984-obsolete-invoke/src/Main.java
new file mode 100644
index 0000000..418d64d
--- /dev/null
+++ b/test/984-obsolete-invoke/src/Main.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // public static void sayHi(Runnable r) {
+ // System.out.println("Hello - Transformed");
+ // r.run();
+ // System.out.println("Goodbye - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAeAQATSGVsbG8gLSBU" +
+ "cmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABVHb29kYnllIC0gVHJhbnNmb3JtZWQBAAlUcmFu" +
+ "c2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZh" +
+ "L2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAAAAACAAAA" +
+ "CQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQAJAA0ADgABAAsAAAA7AAIA" +
+ "AQAAABeyAAISA7YABCq5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4ABQAWAAYA" +
+ "AQAPAAAAAgAQ");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCMekj2NPwzrEp/v+2yzzSg8xZvBtU1bC1QAwAAcAAAAHhWNBIAAAAAAAAAALACAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAMAgAARAEAAKIB" +
+ "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAAB3AgAAfAIA" +
+ "AIUCAACKAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAJ8CAAAAAAAAAQABAAEAAACRAgAABAAAAHAQ" +
+ "AwAAAA4AAwABAAIAAACWAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAgBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+ "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+ "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00" +
+ "LjMxAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAIAAICABMQCAQnc" +
+ "AgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAACAAAABwAAALQAAAADAAAAAwAAANAAAAAE" +
+ "AAAAAQAAAPQAAAAFAAAABQAAAPwAAAAGAAAAAQAAACQBAAABIAAAAgAAAEQBAAABEAAAAgAAAJQB" +
+ "AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
+
+ public static void main(String[] args) {
+ art.Main.bindAgentJNIForClass(Main.class);
+ doTest();
+ }
+
+ // The Method that holds an obsolete method pointer. We will fill it in by getting a jmethodID
+ // from a stack with an obsolete method in it. There should be no other ways to obtain an obsolete
+ // jmethodID in ART without unsafe casts.
+ public static Method obsolete_method = null;
+
+ public static void doTest() {
+ // Capture the obsolete method.
+ //
+ // NB The obsolete method must be direct so that we will not look in the receiver type to get
+ // the actual method.
+ Transform.sayHi(() -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ System.out.println("Retrieving obsolete method from current stack");
+ // This should get the obsolete sayHi method (as the only obsolete method on the current
+ // threads stack).
+ Main.obsolete_method = getFirstObsoleteMethod984();
+ });
+
+ // Prove we did actually redefine something.
+ System.out.println("Invoking redefined version of method.");
+ Transform.sayHi(() -> { System.out.println("Not doing anything here"); });
+
+ System.out.println("invoking obsolete method");
+ try {
+ obsolete_method.invoke(null, (Runnable)() -> {
+ throw new Error("Unexpected code running from invoke of obsolete method!");
+ });
+ throw new Error("Running obsolete method did not throw exception");
+ } catch (Throwable e) {
+ if (e instanceof InternalError || e.getCause() instanceof InternalError) {
+ System.out.println("Caught expected error from attempting to invoke an obsolete method.");
+ } else {
+ System.out.println("Unexpected error type for calling obsolete method! Expected either "
+ + "an InternalError or something that is caused by an InternalError.");
+ throw new Error("Unexpected error caught: ", e);
+ }
+ }
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ // Gets the first obsolete method on the current threads stack (NB only looks through the first 30
+ // stack frames).
+ private static native Method getFirstObsoleteMethod984();
+}
diff --git a/test/911-get-stack-trace/src/ControlData.java b/test/984-obsolete-invoke/src/Transform.java
similarity index 65%
copy from test/911-get-stack-trace/src/ControlData.java
copy to test/984-obsolete-invoke/src/Transform.java
index 76ac4b8..536de84 100644
--- a/test/911-get-stack-trace/src/ControlData.java
+++ b/test/984-obsolete-invoke/src/Transform.java
@@ -14,18 +14,12 @@
* limitations under the License.
*/
-import java.util.concurrent.CountDownLatch;
-
-public class ControlData {
- CountDownLatch reached;
- Object waitFor = null;
- volatile boolean stop = false;
-
- public ControlData() {
- this(1);
- }
-
- public ControlData(int latchCount) {
- reached = new CountDownLatch(latchCount);
+class Transform {
+ // This method must be 'static' so that when we try to invoke it through a j.l.r.Method we will
+ // simply use the jmethodID directly and not do any lookup in any receiver object.
+ public static void sayHi(Runnable r) {
+ System.out.println("hello");
+ r.run();
+ System.out.println("goodbye");
}
}
diff --git a/test/984-obsolete-invoke/src/art/Main.java b/test/984-obsolete-invoke/src/art/Main.java
new file mode 100644
index 0000000..8b01920
--- /dev/null
+++ b/test/984-obsolete-invoke/src/art/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+}
diff --git a/test/Android.bp b/test/Android.bp
index 4ebfd74..9a8e174 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -241,26 +241,25 @@
}
art_cc_defaults {
- name: "libtiagent-defaults",
+ name: "libtiagent-base-defaults",
defaults: ["libartagent-defaults"],
srcs: [
- // This is to get the IsInterpreted native method.
- "common/stack_inspect.cc",
- "common/runtime_state.cc",
- "ti-agent/common_load.cc",
- "ti-agent/common_helper.cc",
- "901-hello-ti-agent/basics.cc",
+ // These are the ART-independent parts.
+ "ti-agent/agent_common.cc",
+ "ti-agent/agent_startup.cc",
+ "ti-agent/jni_binder.cc",
+ "ti-agent/jvmti_helper.cc",
+ "ti-agent/test_env.cc",
+ // This is the list of non-special OnLoad things and excludes BCI and anything that depends
+ // on ART internals.
"903-hello-tagging/tagging.cc",
"904-object-allocation/tracking.cc",
"905-object-free/tracking_free.cc",
"906-iterate-heap/iterate_heap.cc",
"907-get-loaded-classes/get_loaded_classes.cc",
"908-gc-start-finish/gc_callbacks.cc",
- "909-attach-agent/attach.cc",
"910-methods/methods.cc",
"911-get-stack-trace/stack_trace.cc",
- "912-classes/classes.cc",
- "913-heaps/heaps.cc",
"918-fields/fields.cc",
"920-objects/objects.cc",
"922-properties/properties.cc",
@@ -272,15 +271,36 @@
"929-search/search.cc",
"931-agent-thread/agent_thread.cc",
"933-misc-events/misc_events.cc",
- "936-search-onload/search_onload.cc",
- "944-transform-classloaders/classloader.cc",
- "945-obsolete-native/obsolete_native.cc",
- "980-redefine-object/redefine_object.cc",
],
shared_libs: [
"libbase",
],
header_libs: ["libopenjdkjvmti_headers"],
+ include_dirs: ["art/test/ti-agent"],
+}
+
+art_cc_defaults {
+ name: "libtiagent-defaults",
+ defaults: ["libtiagent-base-defaults"],
+ srcs: [
+ // This is to get the IsInterpreted native method.
+ "common/stack_inspect.cc",
+ "common/runtime_state.cc",
+ // This includes the remaining test functions. We should try to refactor things to
+ // make this list smaller.
+ "ti-agent/common_helper.cc",
+ "ti-agent/common_load.cc",
+ "901-hello-ti-agent/basics.cc",
+ "909-attach-agent/attach.cc",
+ "912-classes/classes.cc",
+ "913-heaps/heaps.cc",
+ "936-search-onload/search_onload.cc",
+ "944-transform-classloaders/classloader.cc",
+ "945-obsolete-native/obsolete_native.cc",
+ "980-redefine-object/redefine_object.cc",
+ "983-source-transform-verify/source_transform.cc",
+ "984-obsolete-invoke/obsolete_invoke.cc",
+ ],
}
art_cc_test_library {
@@ -298,6 +318,12 @@
shared_libs: ["libartd"],
}
+art_cc_test_library {
+ name: "libctstiagent",
+ defaults: ["libtiagent-base-defaults"],
+ export_include_dirs: ["ti-agent"],
+}
+
cc_defaults {
name: "libarttest-defaults",
defaults: [
diff --git a/test/Android.run-test-jvmti-java-library.mk b/test/Android.run-test-jvmti-java-library.mk
new file mode 100644
index 0000000..b6da92b
--- /dev/null
+++ b/test/Android.run-test-jvmti-java-library.mk
@@ -0,0 +1,110 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# Main shim classes. We use one that exposes the tagging common functionality.
+LOCAL_MAIN_SHIM := 903-hello-tagging/src/art/Main.java
+LOCAL_SRC_FILES := $(LOCAL_MAIN_SHIM)
+
+# Actual test classes.
+LOCAL_SRC_FILES += \
+ 901-hello-ti-agent/src/art/Test901.java \
+ 903-hello-tagging/src/art/Test903.java \
+ 904-object-allocation/src/art/Test904.java \
+ 905-object-free/src/art/Test905.java \
+ 906-iterate-heap/src/art/Test906.java \
+ 907-get-loaded-classes/src/art/Test907.java \
+ 908-gc-start-finish/src/art/Test908.java \
+ 910-methods/src/art/Test910.java \
+ 911-get-stack-trace/src/art/Test911.java \
+ 911-get-stack-trace/src/art/AllTraces.java \
+ 911-get-stack-trace/src/art/ControlData.java \
+ 911-get-stack-trace/src/art/Frames.java \
+ 911-get-stack-trace/src/art/OtherThread.java \
+ 911-get-stack-trace/src/art/PrintThread.java \
+ 911-get-stack-trace/src/art/Recurse.java \
+ 911-get-stack-trace/src/art/SameThread.java \
+ 911-get-stack-trace/src/art/ThreadListTraces.java \
+ 913-heaps/src/art/Test913.java \
+ 918-fields/src/art/Test918.java \
+ 920-objects/src/art/Test920.java \
+ 922-properties/src/art/Test922.java \
+ 923-monitors/src/art/Test923.java \
+ 924-threads/src/art/Test924.java \
+ 925-threadgroups/src/art/Test925.java \
+ 927-timers/src/art/Test927.java \
+ 928-jni-table/src/art/Test928.java \
+ 931-agent-thread/src/art/Test931.java \
+ 933-misc-events/src/art/Test933.java \
+
+JVMTI_RUN_TEST_GENERATED_NUMBERS := \
+ 901 \
+ 903 \
+ 904 \
+ 905 \
+ 906 \
+ 907 \
+ 908 \
+ 910 \
+ 911 \
+ 913 \
+ 918 \
+ 920 \
+ 922 \
+ 923 \
+ 924 \
+ 925 \
+ 927 \
+ 928 \
+ 931 \
+ 933 \
+
+# Try to enforce that the directories correspond to the Java files we pull in.
+JVMTI_RUN_TEST_DIR_CHECK := $(sort $(foreach DIR,$(JVMTI_RUN_TEST_GENERATED_NUMBERS), \
+ $(filter $(DIR)%,$(LOCAL_SRC_FILES))))
+ifneq ($(sort $(LOCAL_SRC_FILES)),$(JVMTI_RUN_TEST_DIR_CHECK))
+ $(error Missing file, compare $(sort $(LOCAL_SRC_FILES)) with $(JVMTI_RUN_TEST_DIR_CHECK))
+endif
+
+LOCAL_MODULE_CLASS := JAVA_LIBRARIES
+LOCAL_MODULE_TAGS := optional
+LOCAL_JAVA_LANGUAGE_VERSION := 1.8
+LOCAL_MODULE := run-test-jvmti-java
+
+GENERATED_SRC_DIR := $(call local-generated-sources-dir)
+JVMTI_RUN_TEST_GENERATED_FILES := \
+ $(foreach NR,$(JVMTI_RUN_TEST_GENERATED_NUMBERS),$(GENERATED_SRC_DIR)/results.$(NR).expected.txt)
+
+define GEN_JVMTI_RUN_TEST_GENERATED_FILE
+
+GEN_INPUT := $(wildcard $(LOCAL_PATH)/$(1)*/expected.txt)
+GEN_OUTPUT := $(GENERATED_SRC_DIR)/results.$(1).expected.txt
+$$(GEN_OUTPUT): $$(GEN_INPUT)
+ cp $$< $$@
+
+GEN_INPUT :=
+GEN_OUTPUT :=
+
+endef
+
+$(foreach NR,$(JVMTI_RUN_TEST_GENERATED_NUMBERS),\
+ $(eval $(call GEN_JVMTI_RUN_TEST_GENERATED_FILE,$(NR))))
+LOCAL_JAVA_RESOURCE_FILES := $(JVMTI_RUN_TEST_GENERATED_FILES)
+
+include $(BUILD_JAVA_LIBRARY)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 100b031..ece5762 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -43,18 +43,6 @@
TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES := setup-jack-server
-ifeq ($(ART_TEST_DEBUG_GC),true)
- ART_TEST_WITH_STRACE := true
-endif
-
-ifeq ($(ART_TEST_BISECTION),true)
- # Need to keep rebuilding the test to bisection search it.
- ART_TEST_RUN_TEST_NO_PREBUILD := true
- ART_TEST_RUN_TEST_PREBUILD := false
- # Bisection search writes to standard output.
- ART_TEST_QUIET := false
-endif
-
# Helper to create individual build targets for tests. Must be called with $(eval).
# $(1): the test number
define define-build-art-run-test
@@ -65,7 +53,11 @@
run_test_options += --quiet
endif
$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
-$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
+$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
+$$(dmart_target): $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
+endif
+$$(dmart_target):
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
@@ -97,699 +89,11 @@
include $(BUILD_PHONY_PACKAGE)
-# Clear temp vars.
-art_run_tests_build_dir :=
-art_run_tests_install_dir :=
-define-build-art-run-test :=
-TEST_ART_RUN_TEST_BUILD_RULES :=
-
-########################################################################
-# General rules to build and run a run-test.
-
-TARGET_TYPES := host target
-PREBUILD_TYPES :=
-ifeq ($(ART_TEST_RUN_TEST_PREBUILD),true)
- PREBUILD_TYPES += prebuild
-endif
-ifeq ($(ART_TEST_RUN_TEST_NO_PREBUILD),true)
- PREBUILD_TYPES += no-prebuild
-endif
-ifeq ($(ART_TEST_RUN_TEST_NO_DEX2OAT),true)
- PREBUILD_TYPES += no-dex2oat
-endif
-COMPILER_TYPES :=
-ifeq ($(ART_TEST_INTERPRETER_ACCESS_CHECKS),true)
- COMPILER_TYPES += interp-ac
-endif
-ifeq ($(ART_TEST_INTERPRETER),true)
- COMPILER_TYPES += interpreter
-endif
-ifeq ($(ART_TEST_JIT),true)
- COMPILER_TYPES += jit
-endif
-OPTIMIZING_COMPILER_TYPES :=
-ifeq ($(ART_TEST_OPTIMIZING),true)
- COMPILER_TYPES += optimizing
- OPTIMIZING_COMPILER_TYPES += optimizing
-endif
-ifeq ($(ART_TEST_OPTIMIZING_GRAPH_COLOR),true)
- COMPILER_TYPES += regalloc_gc
- OPTIMIZING_COMPILER_TYPES += regalloc_gc
-endif
-RELOCATE_TYPES := no-relocate
-ifeq ($(ART_TEST_RUN_TEST_RELOCATE),true)
- RELOCATE_TYPES += relocate
-endif
-ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
- RELOCATE_TYPES += relocate-npatchoat
-endif
-TRACE_TYPES := ntrace
-ifeq ($(ART_TEST_TRACE),true)
- TRACE_TYPES += trace
-endif
-ifeq ($(ART_TEST_TRACE_STREAM),true)
- TRACE_TYPES += stream
-endif
-GC_TYPES := cms
-ifeq ($(ART_TEST_GC_STRESS),true)
- GC_TYPES += gcstress
-endif
-ifeq ($(ART_TEST_GC_VERIFY),true)
- GC_TYPES += gcverify
-endif
-JNI_TYPES := checkjni
-ifeq ($(ART_TEST_JNI_FORCECOPY),true)
- JNI_TYPES += forcecopy
-endif
-ifeq ($(ART_TEST_RUN_TEST_IMAGE),true)
-IMAGE_TYPES := picimage
-endif
-ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
- IMAGE_TYPES += no-image
-endif
-ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true)
- IMAGE_TYPES := multipicimage
-endif
-PICTEST_TYPES := npictest
-ifeq ($(ART_TEST_PIC_TEST),true)
- PICTEST_TYPES += pictest
-endif
-RUN_TYPES :=
-ifeq ($(ART_TEST_RUN_TEST_DEBUG),true)
- RUN_TYPES += debug
-endif
-ifeq ($(ART_TEST_RUN_TEST_NDEBUG),true)
- RUN_TYPES += ndebug
-endif
-DEBUGGABLE_TYPES := ndebuggable
-ifeq ($(ART_TEST_RUN_TEST_DEBUGGABLE),true)
-DEBUGGABLE_TYPES += debuggable
-endif
-ADDRESS_SIZES_TARGET := $(ART_PHONY_TEST_TARGET_SUFFIX)
-ADDRESS_SIZES_HOST := $(ART_PHONY_TEST_HOST_SUFFIX)
-ifeq ($(ART_TEST_RUN_TEST_2ND_ARCH),true)
- ADDRESS_SIZES_TARGET += $(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
- ADDRESS_SIZES_HOST += $(2ND_ART_PHONY_TEST_HOST_SUFFIX)
-endif
-ALL_ADDRESS_SIZES := 64 32
-
-# List all run test names with number arguments agreeing with the comment above.
-define all-run-test-names
- $(foreach target, $(1), \
- $(foreach run-type, $(2), \
- $(foreach prebuild, $(3), \
- $(foreach compiler, $(4), \
- $(foreach relocate, $(5), \
- $(foreach trace, $(6), \
- $(foreach gc, $(7), \
- $(foreach jni, $(8), \
- $(foreach image, $(9), \
- $(foreach pictest, $(10), \
- $(foreach debuggable, $(11), \
- $(foreach test, $(12), \
- $(foreach address_size, $(13), \
- test-art-$(target)-run-test-$(run-type)-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(pictest)-$(debuggable)-$(test)$(address_size) \
- )))))))))))))
-endef # all-run-test-names
-
-# To generate a full list or tests:
-# $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
-# $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-# $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-
# Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE
define name-to-var
$(shell echo $(1) | tr '[:lower:]' '[:upper:]' | tr '-' '_')
endef # name-to-var
-# Disable 115-native-bridge, it fails when run through make b/35984597.
-# Disable 153-reference-stress temporarily until a fix arrives. b/33389022.
-# Disable 080-oom-fragmentation due to flakes. b/33795328
-# Disable 497-inlining-and-class-loader and 542-unresolved-access-check until
-# they are rewritten. These tests use a broken class loader that tries to
-# register a dex file that's already registered with a different loader.
-# b/34193123
-ART_TEST_RUN_TEST_SKIP += \
- 115-native-bridge \
- 153-reference-stress \
- 080-oom-fragmentation \
- 497-inlining-and-class-loader \
- 542-unresolved-access-check
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(ART_TEST_RUN_TEST_SKIP), $(ALL_ADDRESS_SIZES))
-
-
-# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
-TEST_ART_BROKEN_ALL_TARGET_TESTS := \
- 149-suspend-all-stress \
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_ALL_TARGET_TESTS), \
- $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_ALL_TARGET_TESTS :=
-
-# Tests that are timing sensitive and flaky on heavily loaded systems.
-TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
- 002-sleep \
- 053-wait-some \
- 055-enum-performance \
- 133-static-invoke-super
-
-# disable timing sensitive tests on "dist" builds.
-ifdef dist_goal
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-# 147-stripped-dex-fallback isn't supported on device because --strip-dex
-# requires the zip command.
-# 569-checker-pattern-replacement tests behaviour present only on host.
-TEST_ART_BROKEN_TARGET_TESTS := \
- 147-stripped-dex-fallback \
- 569-checker-pattern-replacement
-
-ifneq (,$(filter target,$(TARGET_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TARGET_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_TARGET_TESTS :=
-
-# Tests that require python3.
-TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
- 960-default-smali \
- 961-default-iface-resolution-gen \
- 964-default-iface-init-gen \
- 968-default-partial-compile-gen \
- 969-iface-super \
- 970-iface-super-resolution-gen \
- 971-iface-super
-
-# Check if we have python3 to run our tests.
-ifeq ($(wildcard /usr/bin/python3),)
- $(warning "No python3 found. Disabling tests: $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS)")
-
- # Currently disable tests requiring python3 when it is not installed.
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
-
-# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
-TEST_ART_BROKEN_PREBUILD_RUN_TESTS := \
- 116-nodex2oat \
- 118-noimage-dex2oat \
- 134-nodex2oat-nofallback
-
-ifneq (,$(filter prebuild,$(PREBUILD_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),prebuild, \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
-
-# 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save.
-# 529 and 555: b/27784033
-TEST_ART_BROKEN_NO_PREBUILD_TESTS := \
- 117-nopatchoat \
- 147-stripped-dex-fallback \
- 554-jit-profile-file \
- 529-checker-unresolved \
- 555-checker-regression-x86const \
- 608-checker-unresolved-lse
-
-ifneq (,$(filter no-prebuild,$(PREBUILD_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-prebuild, \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_NO_PREBUILD_TESTS :=
-
-# Note 117-nopatchoat is not broken per-se it just doesn't work (and isn't meant to) without
-# --prebuild --relocate
-TEST_ART_BROKEN_NO_RELOCATE_TESTS := \
- 117-nopatchoat \
- 118-noimage-dex2oat \
- 119-noimage-patchoat \
- 554-jit-profile-file
-
-ifneq (,$(filter no-relocate,$(RELOCATE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), no-relocate,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
-
-# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
-# 629 requires compilation.
-# 080 and 530: b/36377828
-TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
- 137-cfi \
- 530-checker-lse \
- 080-oom-throw \
- 629-vdex-speed
-
-ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- interp-ac,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
-
-# Tests that are broken with GC stress.
-# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
-# hope the second process got into the expected state. The slowness of gcstress makes this bad.
-# * 152-dead-large-object requires a heap larger than what gcstress uses.
-# * 908-gc-start-finish expects GCs only to be run at clear points. The reduced heap size makes
-# this non-deterministic. Same for 913.
-# * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often
-# will take more than the timeout to run when gcstress is enabled. This is because gcstress
-# slows down allocations significantly which these tests do a lot.
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
- 137-cfi \
- 152-dead-large-object \
- 154-gc-loop \
- 908-gc-start-finish \
- 913-heaps \
- 961-default-iface-resolution-gen \
- 964-default-iface-init-gen \
-
-ifneq (,$(filter gcstress,$(GC_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
-
-# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
- $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), 115-native-bridge, \
- $(ALL_ADDRESS_SIZES))
-
-# 130-hprof dumps the heap and runs hprof-conv to check whether the file is somewhat readable. This
-# is only possible on the host.
-# TODO: Turn off all the other combinations, this is more about testing actual ART code. A gtest is
-# very hard to write here, as (for a complete test) JDWP must be set up.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
-
-# 131 is an old test. The functionality has been implemented at an earlier stage and is checked
-# in tests 138. Blacklisted for debug builds since these builds have duplicate classes checks which
-# punt to interpreter.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),debug,$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
-
-# 138-duplicate-classes-check. Turned on for debug builds since debug builds have duplicate classes
-# checks enabled, b/2133391.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),138-duplicate-classes-check,$(ALL_ADDRESS_SIZES))
-
-# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
-# Therefore we shouldn't run them in situations where we actually don't have these since they
-# explicitly test for them. These all also assume we have an image.
-# 147-stripped-dex-fallback is disabled because it requires --prebuild.
-# 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save.
-# 629-vdex-speed requires compiled code.
-TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \
- 116-nodex2oat \
- 117-nopatchoat \
- 118-noimage-dex2oat \
- 119-noimage-patchoat \
- 137-cfi \
- 138-duplicate-classes-check2 \
- 147-stripped-dex-fallback \
- 554-jit-profile-file \
- 616-cha \
- 616-cha-abstract \
- 912-classes \
- 629-vdex-speed
-
-# This test fails without an image.
-# 018, 961, 964, 968 often time out. b/34369284
-TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS := \
- 137-cfi \
- 138-duplicate-classes-check \
- 018-stack-overflow \
- 476-clinit-inline-static-invoke \
- 496-checker-inlining-class-loader \
- 637-checker-throw-inline \
- 616-cha \
- 616-cha-abstract \
- 912-classes \
- 961-default-iface-resolution-gen \
- 964-default-iface-init \
- 968-default-partial-compile-gen \
-
-ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-
-ifneq (,$(filter no-image,$(IMAGE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
- $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
- $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-ifneq (,$(filter relocate-npatchoat,$(RELOCATE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), relocate-npatchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
-
-# 137:
-# This test unrolls and expects managed frames, but tracing means we run the interpreter.
-# 802 and 570-checker-osr:
-# This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
-# when already tracing, and writes an error message that we do not want to check for.
-# 130 occasional timeout b/32383962.
-# 629 requires compilation.
-TEST_ART_BROKEN_TRACING_RUN_TESTS := \
- 087-gc-after-link \
- 130-hprof \
- 137-cfi \
- 141-class-unload \
- 570-checker-osr \
- 629-vdex-speed \
- 802-deoptimization
-
-ifneq (,$(filter trace stream,$(TRACE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),trace stream,$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_TRACING_RUN_TESTS :=
-
-# These tests expect JIT compilation, which is suppressed when tracing.
-TEST_ART_BROKEN_JIT_TRACING_RUN_TESTS := \
- 604-hot-static-interface \
- 612-jit-dex-cache \
- 613-inlining-dex-cache \
- 616-cha \
- 626-set-resolved-string \
-
-ifneq (,$(filter trace stream,$(TRACE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- jit,$(RELOCATE_TYPES),trace stream,$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_JIT_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_JIT_TRACING_RUN_TESTS :=
-
-# Known broken tests for the interpreter.
-# CFI unwinding expects managed frames.
-# 629 requires compilation.
-TEST_ART_BROKEN_INTERPRETER_RUN_TESTS := \
- 137-cfi \
- 554-jit-profile-file \
- 629-vdex-speed
-
-ifneq (,$(filter interpreter,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- interpreter,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_INTERPRETER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
-
-# Known broken tests for the JIT.
-# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
-# also uses Generic JNI instead of the JNI compiler.
-# 154-gc-loop requires more deterministic GC behavior than what JIT does.
-# Test 906 iterates the heap filtering with different options. No instances should be created
-# between those runs to be able to have precise checks.
-# Test 629 requires compilation.
-# 912: b/34655682
-TEST_ART_BROKEN_JIT_RUN_TESTS := \
- 137-cfi \
- 154-gc-loop \
- 629-vdex-speed \
- 904-object-allocation \
- 906-iterate-heap \
-
-ifneq (,$(filter jit,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_JIT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_JIT_RUN_TESTS :=
-
-# Known broken tests for the graph coloring register allocator.
-# These tests were based on the linear scan allocator, which makes different decisions than
-# the graph coloring allocator. (These attempt to test for code quality, not correctness.)
-TEST_ART_BROKEN_OPTIMIZING_GRAPH_COLOR := \
- 570-checker-select \
- 484-checker-register-hints
-
-ifneq (,$(filter regalloc_gc,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- regalloc_gc,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_OPTIMIZING_GRAPH_COLOR),$(ALL_ADDRESS_SIZES))
-endif
-
-# Known broken tests for the mips32 optimizing compiler backend.
-TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
-
-ifeq (mips,$(TARGET_ARCH))
- ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- endif
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS :=
-
-# Known broken tests for the mips64 optimizing compiler backend.
-TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS := \
-
-ifeq (mips64,$(TARGET_ARCH))
- ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- endif
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS :=
-
-# Tests that should fail when the optimizing compiler compiles them non-debuggable.
-TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS := \
- 454-get-vreg \
- 457-regs \
- 602-deoptimizeable
-
-ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),ndebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS :=
-
-# Tests that should fail when the optimizing compiler compiles them debuggable.
-TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS := \
-
-ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS :=
-
-# Tests that should fail in the read barrier configuration with the interpreter.
-TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS :=
-
-# Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
-TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
-
-# Tests that should fail in the read barrier configuration with JIT (Optimizing compiler).
-TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
-
-# Tests failing in non-Baker read barrier configurations with the Optimizing compiler (AOT).
-# 537 and 641: Expect an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
-# handled in non-Baker read barrier configurations.
-TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS := \
- 537-checker-arraycopy \
- 641-checker-arraycopy
-
-# Tests failing in non-Baker read barrier configurations with JIT (Optimizing compiler).
-# 537 and 641: Expect an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
-# handled in non-Baker read barrier configurations.
-TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS := \
- 537-checker-arraycopy \
- 641-checker-arraycopy
-
-ifeq ($(ART_USE_READ_BARRIER),true)
- ifneq (,$(filter interpreter,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),interpreter,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
- $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- endif
-
- ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES), \
- $(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- ifneq ($(ART_READ_BARRIER_TYPE),BAKER)
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES), \
- $(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- endif
- endif
-
- ifneq (,$(filter jit,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
- $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- ifneq ($(ART_READ_BARRIER_TYPE),BAKER)
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
- $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- endif
- endif
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
-TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
-
-TEST_ART_BROKEN_NPIC_RUN_TESTS := 596-app-images
-ifneq (,$(filter npictest,$(PICTEST_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- ${COMPILER_TYPES},$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),npictest,$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NPIC_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-# Tests that should fail in the heap poisoning configuration with the Optimizing compiler.
-# 055: Exceeds run time limits due to heap poisoning instrumentation (on ARM and ARM64 devices).
-TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS := \
- 055-enum-performance
-
-ifeq ($(ART_HEAP_POISONING),true)
- ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
- endif
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
-
-# 909: Tests that check semantics for a non-debuggable app.
-# 137: relies on AOT code and debuggable makes us JIT always.
-TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS := \
- 137-cfi \
- 909-attach-agent \
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS :=
-
-# Tests incompatible with bisection bug search. Sorted by incompatibility reason.
-# 000 through 595 do not compile anything. 089 tests a build failure. 018 through 137
-# run dalvikvm more than once. 115 and 088 assume they are always compiled.
-# 055 tests performance which is degraded during bisecting.
-TEST_ART_INCOMPATIBLE_BISECTION_SEARCH_RUN_TESTS := \
- 000-nop \
- 134-nodex2oat-nofallback \
- 147-stripped-dex-fallback \
- 595-profile-saving \
- \
- 089-many-methods \
- \
- 018-stack-overflow \
- 116-nodex2oat \
- 117-nopatchoat \
- 118-noimage-dex2oat \
- 119-noimage-patchoat \
- 126-miranda-multidex \
- 137-cfi \
- \
- 115-native-bridge \
- 088-monitor-verification \
- \
- 055-enum-performance
-
-ifeq ($(ART_TEST_BISECTION),true)
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
- $(TEST_ART_INCOMPATIBLE_BISECTION_SEARCH_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-# Clear variables ahead of appending to them when defining tests.
-$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach prebuild, $(PREBUILD_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach compiler, $(COMPILER_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach relocate, $(RELOCATE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(relocate))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach trace, $(TRACE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(trace))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach gc, $(GC_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(gc))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach jni, $(JNI_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach image, $(IMAGE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach test, $(TEST_ART_RUN_TESTS), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach address_size, $(ALL_ADDRESS_SIZES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(address_size))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach run_type, $(RUN_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run_type))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach debuggable_type, $(DEBUGGABLE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable_type))_RULES :=)))
-
# We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
# only once).
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
@@ -838,6 +142,7 @@
# specific version depending on the compiler.
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
+ $(HOST_OUT_EXECUTABLES)/hprof-conv \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagent) \
@@ -871,16 +176,18 @@
# Host executables.
host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
# Classpath for Jack compilation for host.
host_prereq_rules += $(HOST_JACK_CLASSPATH_DEPENDENCIES)
+endif
# Required for dx, jasmin, smali, dexmerger, jack.
host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
-host_prereq_rules += $(HOST_OUT_EXECUTABLES)/hprof-conv
-
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
# Classpath for Jack compilation for target.
target_prereq_rules := $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
+endif
# Sync test files to the target, depends upon all things that must be pushed
#to the target.
@@ -908,423 +215,48 @@
endif
endef
-COMPILER_TYPES_2 := optimizing
-COMPILER_TYPES_2 += interpreter
-COMPILER_TYPES_2 += jit
-COMPILER_TYPES_2 += regalloc_gc
-COMPILER_TYPES_2 += interp-ac
-ALL_ADDRESS_SIZES_2 := 32 64
-IMAGE_TYPES_2 := picimage
-IMAGE_TYPES_2 += no-image
-IMAGE_TYPES_2 += npicimage
-IMAGE_TYPES_2 += multinpicimage
-IMAGE_TYPES_2 += multipicimage
+TARGET_TYPES := host target
+COMPILER_TYPES := jit interpreter optimizing regalloc_gc jit interp-ac speed-profile
+IMAGE_TYPES := picimage no-image multipicimage
+ALL_ADDRESS_SIZES := 64 32
# Add core image dependencies required for given target - HOST or TARGET,
# IMAGE_TYPE, COMPILER_TYPE and ADDRESS_SIZE to the prereq_rules.
$(foreach target, $(TARGET_TYPES), \
- $(foreach image, $(IMAGE_TYPES_2), \
- $(foreach compiler, $(COMPILER_TYPES_2), \
- $(foreach address_size, $(ALL_ADDRESS_SIZES_2), $(eval \
+ $(foreach image, $(IMAGE_TYPES), \
+ $(foreach compiler, $(COMPILER_TYPES), \
+ $(foreach address_size, $(ALL_ADDRESS_SIZES), $(eval \
$(call core-image-dependencies,$(target),$(image),$(compiler),$(address_size)))))))
test-art-host-run-test-dependencies : $(host_prereq_rules)
test-art-target-run-test-dependencies : $(target_prereq_rules)
test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
-host_prereq_rules :=
-target_prereq_rules :=
+# Create a rule to build and run a test group of the following form:
+# test-art-{1: host target}-run-test
+define define-test-art-host-or-target-run-test-group
+ build_target := test-art-$(1)-run-test
+ .PHONY: $$(build_target)
-# Create a rule to build and run a tests following the form:
-# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter optimizing jit interp-ac}-
-# {5: relocate nrelocate relocate-npatchoat}-
-# {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
-# {9: no-image image picimage}-{10: pictest npictest}-
-# {11: ndebuggable debuggable}-{12: test name}{13: 32 or 64}
-define define-test-art-run-test
- run_test_options :=
- prereq_rule :=
- test_groups :=
- uc_host_or_target :=
- jack_classpath :=
- ifeq ($(ART_TEST_WITH_STRACE),true)
- run_test_options += --strace
- endif
- ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true)
- run_test_options += --always-clean
- endif
- ifeq ($(ART_TEST_BISECTION),true)
- run_test_options += --bisection-search
- endif
- ifeq ($(1),host)
- uc_host_or_target := HOST
- test_groups := ART_RUN_TEST_HOST_RULES
- run_test_options += --host
- prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(HOST_JACK_CLASSPATH_DEPENDENCIES)
- jack_classpath := $(HOST_JACK_CLASSPATH)
- else
- ifeq ($(1),target)
- uc_host_or_target := TARGET
- test_groups := ART_RUN_TEST_TARGET_RULES
- prereq_rule := test-art-target-sync $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
- jack_classpath := $(TARGET_JACK_CLASSPATH)
- else
- $$(error found $(1) expected $(TARGET_TYPES))
- endif
- endif
- ifeq ($(2),debug)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUG_RULES
- else
- ifeq ($(2),ndebug)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELEASE_RULES
- run_test_options += -O
- else
- $$(error found $(2) expected $(RUN_TYPES))
- endif
- endif
- ifeq ($(3),prebuild)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PREBUILD_RULES
- run_test_options += --prebuild
- else
- ifeq ($(3),no-prebuild)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_PREBUILD_RULES
- run_test_options += --no-prebuild
- else
- ifeq ($(3),no-dex2oat)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_DEX2OAT_RULES
- run_test_options += --no-prebuild --no-dex2oat
- else
- $$(error found $(3) expected $(PREBUILD_TYPES))
- endif
- endif
- endif
- ifeq ($(4),optimizing)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_OPTIMIZING_RULES
- run_test_options += --optimizing
- else ifeq ($(4),regalloc_gc)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_OPTIMIZING_GRAPH_COLOR_RULES
- run_test_options += --optimizing -Xcompiler-option --register-allocation-strategy=graph-color
- else
- ifeq ($(4),interpreter)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES
- run_test_options += --interpreter
- else ifeq ($(4),interp-ac)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_ACCESS_CHECKS_RULES
- run_test_options += --interpreter --verify-soft-fail
- else
- ifeq ($(4),jit)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_JIT_RULES
- run_test_options += --jit
- else
- $$(error found $(4) expected $(COMPILER_TYPES))
- endif
- endif
- endif
-
- ifeq ($(5),relocate)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_RULES
- run_test_options += --relocate
- else
- ifeq ($(5),no-relocate)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_RELOCATE_RULES
- run_test_options += --no-relocate
- else
- ifeq ($(5),relocate-npatchoat)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_NO_PATCHOAT_RULES
- run_test_options += --relocate --no-patchoat
- else
- $$(error found $(5) expected $(RELOCATE_TYPES))
- endif
- endif
- endif
- ifeq ($(6),trace)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_TRACE_RULES
- run_test_options += --trace
- else
- ifeq ($(6),ntrace)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_TRACE_RULES
- else
- ifeq ($(6),stream)
- # Group streaming under normal tracing rules.
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_TRACE_RULES
- run_test_options += --trace --stream
- else
- $$(error found $(6) expected $(TRACE_TYPES))
- endif
- endif
- endif
- ifeq ($(7),gcverify)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_GCVERIFY_RULES
- run_test_options += --gcverify
- else
- ifeq ($(7),gcstress)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_GCSTRESS_RULES
- run_test_options += --gcstress
- else
- ifeq ($(7),cms)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_CMS_RULES
- else
- $$(error found $(7) expected $(GC_TYPES))
- endif
- endif
- endif
- ifeq ($(8),forcecopy)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_FORCECOPY_RULES
- run_test_options += --runtime-option -Xjniopts:forcecopy
- ifneq ($$(ART_TEST_JNI_FORCECOPY),true)
- skip_test := true
- endif
- else
- ifeq ($(8),checkjni)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_CHECKJNI_RULES
- run_test_options += --runtime-option -Xcheck:jni
- else
- ifeq ($(8),jni)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_JNI_RULES
- else
- $$(error found $(8) expected $(JNI_TYPES))
- endif
- endif
- endif
- image_suffix := $(4)
- ifeq ($(4),regalloc_gc)
- # Graph coloring tests share the image_suffix with optimizing tests.
- image_suffix := optimizing
- else
- ifeq ($(4),jit)
- # JIT tests share the image_suffix with interpreter tests.
- image_suffix := interpreter
- endif
- endif
- ifeq ($(9),no-image)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
- run_test_options += --no-image
- # Add the core dependency. This is required for pre-building.
- # Use the PIC image, as it is the default in run-test, to match dependencies.
- ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_$(13))
- else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_$(13))
- endif
- else
- ifeq ($(9),picimage)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
- ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_$(13))
- else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_$(13))
- endif
- else
- ifeq ($(9),multipicimage)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
- run_test_options += --multi-image
- ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_multi_$(13))
- else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_multi_$(13))
- endif
- else
- $$(error found $(9) expected $(IMAGE_TYPES))
- endif
- endif
- endif
- ifeq ($(10),pictest)
- run_test_options += --pic-test
- else
- ifeq ($(10),npictest)
- # Nothing to be done.
- else
- $$(error found $(10) expected $(PICTEST_TYPES))
- endif
- endif
- ifeq ($(11),debuggable)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUGGABLE_RULES
- run_test_options += --debuggable
- else
- ifeq ($(11),ndebuggable)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NONDEBUGGABLE_RULES
- # Nothing to be done.
- else
- $$(error found $(11) expected $(DEBUGGABLE_TYPES))
- endif
- endif
- # $(12) is the test name.
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(12))_RULES
- ifeq ($(13),64)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_64_RULES
- run_test_options += --64
- else
- ifeq ($(13),32)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_32_RULES
- else
- $$(error found $(13) expected $(ALL_ADDRESS_SIZES))
- endif
- endif
- # Override of host instruction-set-features. Required to test advanced x86 intrinsics. The
- # conditionals aren't really correct, they will fail to do the right thing on a 32-bit only
- # host. However, this isn't common enough to worry here and make the conditions complicated.
- ifneq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
- ifeq ($(13),64)
- run_test_options += --instruction-set-features $(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES)
- endif
- endif
- ifneq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
- ifeq ($(13),32)
- run_test_options += --instruction-set-features $($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES)
- endif
- endif
- run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)-$(12)$(13)
- run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
- $$(run_test_options)
- ifneq ($(ART_TEST_ANDROID_ROOT),)
- run_test_options := --android-root $(ART_TEST_ANDROID_ROOT) $$(run_test_options)
- endif
- ifeq ($(ART_TEST_QUIET),true)
- run_test_options += --quiet
- endif
-$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
-$$(run_test_rule_name): PRIVATE_JACK_CLASSPATH := $$(jack_classpath)
-.PHONY: $$(run_test_rule_name)
-$$(run_test_rule_name): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
- $(hide) $$(call ART_TEST_SKIP,$$@) && \
- DX=$(abspath $(DX)) \
- JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
- SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
- DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
- JACK_VERSION=$(JACK_DEFAULT_VERSION) \
- JACK=$(abspath $(JACK)) \
- JACK_VERSION=$(JACK_DEFAULT_VERSION) \
- JACK_CLASSPATH=$$(PRIVATE_JACK_CLASSPATH) \
- art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
- && $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
- $$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
- echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
- rm -r $(ART_HOST_TEST_DIR)) || true
-
- $$(foreach test_group,$$(test_groups), $$(eval $$(value test_group) += $$(run_test_rule_name)))
-
- # Clear locally defined variables.
- uc_host_or_target :=
- test_groups :=
- run_test_options :=
- run_test_rule_name :=
- prereq_rule :=
- jack_classpath :=
-endef # define-test-art-run-test
-
-$(foreach target, $(TARGET_TYPES), \
- $(foreach test, $(TEST_ART_RUN_TESTS), \
- $(foreach run_type, $(RUN_TYPES), \
- $(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), \
- $(foreach prebuild, $(PREBUILD_TYPES), \
- $(foreach compiler, $(COMPILER_TYPES), \
- $(foreach relocate, $(RELOCATE_TYPES), \
- $(foreach trace, $(TRACE_TYPES), \
- $(foreach gc, $(GC_TYPES), \
- $(foreach jni, $(JNI_TYPES), \
- $(foreach image, $(IMAGE_TYPES), \
- $(foreach pictest, $(PICTEST_TYPES), \
- $(foreach debuggable, $(DEBUGGABLE_TYPES), \
- $(eval $(call define-test-art-run-test,$(target),$(run_type),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(pictest),$(debuggable),$(test),$(address_size))) \
- )))))))))))))
-define-test-art-run-test :=
-
-# Define a phony rule whose purpose is to test its prerequisites.
-# $(1): host or target
-# $(2): list of prerequisites
-define define-test-art-run-test-group
-.PHONY: $(1)
-$(1): $(2)
- $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
-endef # define-test-art-run-test-group
-
+ $$(build_target) : args := --$(1) --verbose
+ $$(build_target) : test-art-$(1)-run-test-dependencies
+ ./art/test/testrunner/testrunner.py $$(args)
+ build_target :=
+ args :=
+endef # define-test-art-host-or-target-run-test-group
$(foreach target, $(TARGET_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test,$(ART_RUN_TEST_$(call name-to-var,$(target))_RULES))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach prebuild, $(PREBUILD_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(prebuild),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach run-type, $(RUN_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(run-type),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run-type))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach compiler, $(COMPILER_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(compiler),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach relocate, $(RELOCATE_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(relocate),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(relocate))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach trace, $(TRACE_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(trace),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(trace))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach gc, $(GC_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(gc),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(gc))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach jni, $(JNI_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(jni),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach debuggable, $(DEBUGGABLE_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(debuggable),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach image, $(IMAGE_TYPES), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(image),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach test, $(TEST_ART_RUN_TESTS), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(test),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES)))))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test$(address_size),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(address_size)_RULES)))))
+ $(call define-test-art-host-or-target-run-test-group,$(target))))
-# Clear variables now we're finished with them.
-$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach prebuild, $(PREBUILD_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach compiler, $(COMPILER_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach relocate, $(RELOCATE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(relocate))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach trace, $(TRACE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(trace))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach gc, $(GC_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(gc))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach jni, $(JNI_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach debuggable, $(DEBUGGABLE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach image, $(IMAGE_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach test, $(TEST_ART_RUN_TESTS), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach address_size, $(ALL_ADDRESS_SIZES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(address_size))_RULES :=)))
-$(foreach target, $(TARGET_TYPES), \
- $(foreach run_type, $(RUN_TYPES), \
- $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run_type))_RULES :=)))
-define-test-art-run-test-group :=
+test-art-run-test : test-art-host-run-test test-art-target-run-test
+
+host_prereq_rules :=
+target_prereq_rules :=
+core-image-dependencies :=
+name-to-var :=
+define-test-art-host-or-target-run-test-group :=
TARGET_TYPES :=
-PREBUILD_TYPES :=
COMPILER_TYPES :=
-RELOCATE_TYPES :=
-TRACE_TYPES :=
-GC_TYPES :=
-JNI_TYPES :=
IMAGE_TYPES :=
-ADDRESS_SIZES_TARGET :=
-ADDRESS_SIZES_HOST :=
ALL_ADDRESS_SIZES :=
-RUN_TYPES :=
-DEBUGGABLE_TYPES :=
-
LOCAL_PATH :=
diff --git a/test/ti-agent/common_load.h b/test/DefaultMethods/IterableBase.java
similarity index 67%
copy from test/ti-agent/common_load.h
copy to test/DefaultMethods/IterableBase.java
index e79a006..4cefdef 100644
--- a/test/ti-agent/common_load.h
+++ b/test/DefaultMethods/IterableBase.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+interface Iface {
+ default void defaultMethod() {
+ }
+}
-#include "jvmti.h"
+class Impl implements Iface {
+}
-namespace art {
+abstract class IterableBase implements Iterable {
+}
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 808e58a..f1b6132 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -64,6 +64,7 @@
APP_IMAGE="y"
VDEX_FILTER=""
PROFILE="n"
+RANDOM_PROFILE="n"
# if "y", run 'sync' before dalvikvm to make sure all files from
# build step (e.g. dex2oat) were finished writing.
@@ -273,6 +274,9 @@
elif [ "x$1" = "x--profile" ]; then
PROFILE="y"
shift
+ elif [ "x$1" = "x--random-profile" ]; then
+ RANDOM_PROFILE="y"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -506,17 +510,23 @@
strip_cmdline="true"
sync_cmdline="true"
-if [ "$PROFILE" = "y" ]; then
+# PROFILE takes precedence over RANDOM_PROFILE, since PROFILE tests require a
+# specific profile to run properly.
+if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
profman_cmdline="${ANDROID_ROOT}/bin/profman \
--apk=$DEX_LOCATION/$TEST_NAME.jar \
- --dex-location=$DEX_LOCATION/$TEST_NAME.jar \
- --create-profile-from=$DEX_LOCATION/profile \
- --reference-profile-file=$DEX_LOCATION/$TEST_NAME.prof"
+ --dex-location=$DEX_LOCATION/$TEST_NAME.jar"
COMPILE_FLAGS="${COMPILE_FLAGS} --profile-file=$DEX_LOCATION/$TEST_NAME.prof"
FLAGS="${FLAGS} -Xcompiler-option --profile-file=$DEX_LOCATION/$TEST_NAME.prof"
+ if [ "$PROFILE" = "y" ]; then
+ profman_cmdline="${profman_cmdline} --create-profile-from=$DEX_LOCATION/profile \
+ --reference-profile-file=$DEX_LOCATION/$TEST_NAME.prof"
+ else
+ profman_cmdline="${profman_cmdline} --generate-test-profile=$DEX_LOCATION/$TEST_NAME.prof \
+ --generate-test-profile-seed=0"
+ fi
fi
-
if [ "$PREBUILD" = "y" ]; then
mkdir_locations="${mkdir_locations} ${DEX_LOCATION}/oat/$ISA"
if [ "$APP_IMAGE" = "y" ]; then
@@ -603,7 +613,7 @@
adb shell mkdir -p $DEX_LOCATION
adb push $TEST_NAME.jar $DEX_LOCATION
adb push $TEST_NAME-ex.jar $DEX_LOCATION
- if [ "$PROFILE" = "y" ]; then
+ if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
adb push profile $DEX_LOCATION
fi
else
@@ -611,7 +621,7 @@
adb shell mkdir -p $DEX_LOCATION >/dev/null 2>&1
adb push $TEST_NAME.jar $DEX_LOCATION >/dev/null 2>&1
adb push $TEST_NAME-ex.jar $DEX_LOCATION >/dev/null 2>&1
- if [ "$PROFILE" = "y" ]; then
+ if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
adb push profile $DEX_LOCATION >/dev/null 2>&1
fi
diff --git a/test/knownfailures.json b/test/knownfailures.json
index d540a79..7891d4c 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -259,7 +259,7 @@
"602-deoptimizeable"],
"description": ["Tests that should fail when the optimizing compiler ",
"compiles them non-debuggable."],
- "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable"
+ "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable | speed-profile & ndebuggable"
},
{
"tests": "596-app-images",
@@ -332,21 +332,39 @@
{
"tests": ["912-classes",
"616-cha",
- "616-cha-abstract"],
- "bug": "http://b/36344364 http://b36344221",
+ "616-cha-abstract",
+ "616-cha-interface",
+ "616-cha-interface-default",
+ "616-cha-miranda",
+ "616-cha-proxy-method-inline"],
+ "bug": "http://b/36344364 http://b/36344221",
"variant": "no-dex2oat | relocate-npatchoat"
},
{
"tests": ["476-clinit-inline-static-invoke",
"496-checker-inlining-class-loader",
+ "508-referrer-method",
"637-checker-throw-inline"],
"bug": "http://b/36365552",
"variant": "no-image & jit"
},
{
+ "tests": ["597-deopt-new-string"],
+ "bug": "http://b/36467228",
+ "variant": "no-image & jit"
+ },
+ {
"tests": ["530-checker-lse",
+ "530-checker-lse2",
+ "030-bad-finalizer",
"080-oom-throw"],
"bug": "http://b/36377828",
"variant": "interp-ac"
+ },
+ {
+ "tests": ["629-vdex-speed",
+ "634-vdex-duplicate"],
+ "description": ["Profile driven dexlayout does not work with vdex or dex verifier."],
+ "variant": "speed-profile"
}
]
diff --git a/test/run-all-tests b/test/run-all-tests
index 402c299..a0d2f23 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -155,6 +155,9 @@
elif [ "x$1" = "x--strace" ]; then
run_args="${run_args} --strace"
shift
+ elif [ "x$1" = "x--random-profile" ]; then
+ run_args="${run_args} --random-profile"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
diff --git a/test/run-test b/test/run-test
index 1715423..e46099d 100755
--- a/test/run-test
+++ b/test/run-test
@@ -382,6 +382,9 @@
filter=$1
run_args="${run_args} --vdex-filter $filter"
shift
+ elif [ "x$1" = "x--random-profile" ]; then
+ run_args="${run_args} --random-profile"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
@@ -713,36 +716,13 @@
export TEST_NAME=`basename ${test_dir}`
-# arch_supports_read_barrier ARCH
-# -------------------------------
-# Return whether the Optimizing compiler has read barrier support for ARCH.
-function arch_supports_read_barrier() {
- # Optimizing has read barrier support for ARM, ARM64, x86 and x86-64 at the
- # moment.
- [ "x$1" = xarm ] || [ "x$1" = xarm64 ] || [ "x$1" = xx86 ] || [ "x$1" = xx86_64 ]
-}
-
# Tests named '<number>-checker-*' will also have their CFGs verified with
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$USE_JACK" = "true" ]; then
- # Optimizing has read barrier support for certain architectures
- # only. On other architectures, compiling is disabled when read
- # barriers are enabled, meaning that we do not produce a CFG file
- # as a side-effect of compilation, thus the Checker assertions
- # cannot be checked. Disable Checker for those cases.
- #
- # TODO: Enable Checker when read barrier support is added to more
- # architectures (b/12687968).
- if [ "x$ART_USE_READ_BARRIER" != xfalse ] \
- && (([ "x$host_mode" = "xyes" ] \
- && ! arch_supports_read_barrier "$host_arch_name") \
- || ([ "x$target_mode" = "xyes" ] \
- && ! arch_supports_read_barrier "$target_arch_name")); then
- run_checker="no"
# In no-prebuild mode, the compiler is only invoked if both dex2oat and
# patchoat are available. Disable Checker otherwise (b/22552692).
- elif [ "$prebuild_mode" = "yes" ] \
+ if [ "$prebuild_mode" = "yes" ] \
|| [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
run_checker="yes"
@@ -772,7 +752,7 @@
# Set a hard limit to encourage ART developers to increase the ulimit here if
# needed to support a test case rather than resetting the limit in the run
# script for the particular test in question.
-if ! ulimit -f -H 128000; then
+if ! ulimit -f 128000; then
err_echo "ulimit file size setting failed"
fi
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index ed4b4a9..f5e2a61 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright 2017, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,10 +17,10 @@
import tempfile
import subprocess
-env = dict(os.environ)
+_env = dict(os.environ)
-def getEnvBoolean(var, default):
- val = env.get(var)
+def _getEnvBoolean(var, default):
+ val = _env.get(var)
if val:
if val == "True" or val == "true":
return True
@@ -37,7 +35,7 @@
'HOST_OUT_EXECUTABLES']
_DUMP_MANY_VARS = None # To be set to a dictionary with above list being the keys,
# and the build variable being the value.
-def dump_many_vars(var_name):
+def _dump_many_vars(var_name):
"""
Reach into the Android build system to dump many build vars simultaneously.
Since the make system is so slow, we want to avoid calling into build frequently.
@@ -57,7 +55,9 @@
"make --no-print-directory -C \"%s\" -f build/core/config.mk "
"dump-many-vars DUMP_MANY_VARS=\"%s\"") % (ANDROID_BUILD_TOP, all_vars)
- config = subprocess.Popen(command, stdout=subprocess.PIPE,
+ config = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
shell=True).communicate()[0] # read until EOF, select stdin
# Prints out something like:
# TARGET_ARCH='arm64'
@@ -74,139 +74,159 @@
return _DUMP_MANY_VARS[var_name]
-def get_build_var(var_name):
- return dump_many_vars(var_name)
+def _get_build_var(var_name):
+ return _dump_many_vars(var_name)
def get_env(key):
- return env.get(key)
+ return _env.get(key)
-ANDROID_BUILD_TOP = env.get('ANDROID_BUILD_TOP', os.getcwd())
+def _get_android_build_top():
+ path_to_top = _env.get('ANDROID_BUILD_TOP')
+ if not path_to_top:
+ # nothing set. try to guess it based on the relative path of this env.py file.
+ this_file_path = os.path.realpath(__file__)
+ path_to_top = os.path.join(os.path.dirname(this_file_path), '../../../')
+ path_to_top = os.path.realpath(path_to_top)
+
+ if not os.path.exists(os.path.join(path_to_top, 'build/envsetup.sh')):
+ raise AssertionError("env.py must be located inside an android source tree")
+
+ return path_to_top
+
+ANDROID_BUILD_TOP = _get_android_build_top()
# Directory used for temporary test files on the host.
ART_HOST_TEST_DIR = tempfile.mkdtemp(prefix = 'test-art-')
# Keep going after encountering a test failure?
-ART_TEST_KEEP_GOING = getEnvBoolean('ART_TEST_KEEP_GOING', True)
+ART_TEST_KEEP_GOING = _getEnvBoolean('ART_TEST_KEEP_GOING', True)
# Do you want all tests, even those that are time consuming?
-ART_TEST_FULL = getEnvBoolean('ART_TEST_FULL', False)
+ART_TEST_FULL = _getEnvBoolean('ART_TEST_FULL', False)
# Do you want interpreter tests run?
-ART_TEST_INTERPRETER = getEnvBoolean('ART_TEST_INTERPRETER', ART_TEST_FULL)
-ART_TEST_INTERPRETER_ACCESS_CHECKS = getEnvBoolean('ART_TEST_INTERPRETER_ACCESS_CHECKS',
+ART_TEST_INTERPRETER = _getEnvBoolean('ART_TEST_INTERPRETER', ART_TEST_FULL)
+ART_TEST_INTERPRETER_ACCESS_CHECKS = _getEnvBoolean('ART_TEST_INTERPRETER_ACCESS_CHECKS',
ART_TEST_FULL)
# Do you want JIT tests run?
-ART_TEST_JIT = getEnvBoolean('ART_TEST_JIT', ART_TEST_FULL)
+ART_TEST_JIT = _getEnvBoolean('ART_TEST_JIT', ART_TEST_FULL)
# Do you want optimizing compiler tests run?
-ART_TEST_OPTIMIZING = getEnvBoolean('ART_TEST_OPTIMIZING', ART_TEST_FULL)
+ART_TEST_OPTIMIZING = _getEnvBoolean('ART_TEST_OPTIMIZING', ART_TEST_FULL)
# Do you want to test the optimizing compiler with graph coloring register allocation?
-ART_TEST_OPTIMIZING_GRAPH_COLOR = getEnvBoolean('ART_TEST_OPTIMIZING_GRAPH_COLOR', ART_TEST_FULL)
+ART_TEST_OPTIMIZING_GRAPH_COLOR = _getEnvBoolean('ART_TEST_OPTIMIZING_GRAPH_COLOR', ART_TEST_FULL)
+
+# Do you want to do run-tests with profiles?
+ART_TEST_SPEED_PROFILE = _getEnvBoolean('ART_TEST_SPEED_PROFILE', ART_TEST_FULL)
# Do we want to test PIC-compiled tests ("apps")?
-ART_TEST_PIC_TEST = getEnvBoolean('ART_TEST_PIC_TEST', ART_TEST_FULL)
+ART_TEST_PIC_TEST = _getEnvBoolean('ART_TEST_PIC_TEST', ART_TEST_FULL)
# Do you want tracing tests run?
-ART_TEST_TRACE = getEnvBoolean('ART_TEST_TRACE', ART_TEST_FULL)
+ART_TEST_TRACE = _getEnvBoolean('ART_TEST_TRACE', ART_TEST_FULL)
# Do you want tracing tests (streaming mode) run?
-ART_TEST_TRACE_STREAM = getEnvBoolean('ART_TEST_TRACE_STREAM', ART_TEST_FULL)
+ART_TEST_TRACE_STREAM = _getEnvBoolean('ART_TEST_TRACE_STREAM', ART_TEST_FULL)
# Do you want tests with GC verification enabled run?
-ART_TEST_GC_VERIFY = getEnvBoolean('ART_TEST_GC_VERIFY', ART_TEST_FULL)
+ART_TEST_GC_VERIFY = _getEnvBoolean('ART_TEST_GC_VERIFY', ART_TEST_FULL)
# Do you want tests with the GC stress mode enabled run?
-ART_TEST_GC_STRESS = getEnvBoolean('ART_TEST_GC_STRESS', ART_TEST_FULL)
+ART_TEST_GC_STRESS = _getEnvBoolean('ART_TEST_GC_STRESS', ART_TEST_FULL)
# Do you want tests with the JNI forcecopy mode enabled run?
-ART_TEST_JNI_FORCECOPY = getEnvBoolean('ART_TEST_JNI_FORCECOPY', ART_TEST_FULL)
+ART_TEST_JNI_FORCECOPY = _getEnvBoolean('ART_TEST_JNI_FORCECOPY', ART_TEST_FULL)
# Do you want run-tests with relocation disabled run?
-ART_TEST_RUN_TEST_RELOCATE = getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE', ART_TEST_FULL)
+ART_TEST_RUN_TEST_RELOCATE = _getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE', ART_TEST_FULL)
# Do you want run-tests with prebuilding?
-ART_TEST_RUN_TEST_PREBUILD = getEnvBoolean('ART_TEST_RUN_TEST_PREBUILD', ART_TEST_FULL)
+ART_TEST_RUN_TEST_PREBUILD = _getEnvBoolean('ART_TEST_RUN_TEST_PREBUILD', ART_TEST_FULL)
# Do you want run-tests with no prebuilding enabled run?
-ART_TEST_RUN_TEST_NO_PREBUILD = getEnvBoolean('ART_TEST_RUN_TEST_NO_PREBUILD', ART_TEST_FULL)
+ART_TEST_RUN_TEST_NO_PREBUILD = _getEnvBoolean('ART_TEST_RUN_TEST_NO_PREBUILD', ART_TEST_FULL)
# Do you want run-tests with a pregenerated core.art?
-ART_TEST_RUN_TEST_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_IMAGE', ART_TEST_FULL)
+ART_TEST_RUN_TEST_IMAGE = _getEnvBoolean('ART_TEST_RUN_TEST_IMAGE', ART_TEST_FULL)
# Do you want run-tests without a pregenerated core.art?
-ART_TEST_RUN_TEST_NO_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_NO_IMAGE', ART_TEST_FULL)
+ART_TEST_RUN_TEST_NO_IMAGE = _getEnvBoolean('ART_TEST_RUN_TEST_NO_IMAGE', ART_TEST_FULL)
# Do you want run-tests with relocation enabled but patchoat failing?
-ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT = getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT',
+ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT = _getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT',
ART_TEST_FULL)
# Do you want run-tests without a dex2oat?
-ART_TEST_RUN_TEST_NO_DEX2OAT = getEnvBoolean('ART_TEST_RUN_TEST_NO_DEX2OAT', ART_TEST_FULL)
+ART_TEST_RUN_TEST_NO_DEX2OAT = _getEnvBoolean('ART_TEST_RUN_TEST_NO_DEX2OAT', ART_TEST_FULL)
# Do you want run-tests with libartd.so?
-ART_TEST_RUN_TEST_DEBUG = getEnvBoolean('ART_TEST_RUN_TEST_DEBUG', ART_TEST_FULL)
+ART_TEST_RUN_TEST_DEBUG = _getEnvBoolean('ART_TEST_RUN_TEST_DEBUG', ART_TEST_FULL)
# Do you want run-tests with libart.so?
-ART_TEST_RUN_TEST_NDEBUG = getEnvBoolean('ART_TEST_RUN_TEST_NDEBUG', ART_TEST_FULL)
+ART_TEST_RUN_TEST_NDEBUG = _getEnvBoolean('ART_TEST_RUN_TEST_NDEBUG', ART_TEST_FULL)
# Do you want failed tests to have their artifacts cleaned up?
-ART_TEST_RUN_TEST_ALWAYS_CLEAN = getEnvBoolean('ART_TEST_RUN_TEST_ALWAYS_CLEAN', True)
+ART_TEST_RUN_TEST_ALWAYS_CLEAN = _getEnvBoolean('ART_TEST_RUN_TEST_ALWAYS_CLEAN', True)
# Do you want run-tests with the --debuggable flag
-ART_TEST_RUN_TEST_DEBUGGABLE = getEnvBoolean('ART_TEST_RUN_TEST_DEBUGGABLE', ART_TEST_FULL)
+ART_TEST_RUN_TEST_DEBUGGABLE = _getEnvBoolean('ART_TEST_RUN_TEST_DEBUGGABLE', ART_TEST_FULL)
# Do you want to test multi-part boot-image functionality?
-ART_TEST_RUN_TEST_MULTI_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_MULTI_IMAGE', ART_TEST_FULL)
+ART_TEST_RUN_TEST_MULTI_IMAGE = _getEnvBoolean('ART_TEST_RUN_TEST_MULTI_IMAGE', ART_TEST_FULL)
-ART_TEST_DEBUG_GC = getEnvBoolean('ART_TEST_DEBUG_GC', False)
+ART_TEST_DEBUG_GC = _getEnvBoolean('ART_TEST_DEBUG_GC', False)
-ART_TEST_BISECTION = getEnvBoolean('ART_TEST_BISECTION', False)
+ART_TEST_BISECTION = _getEnvBoolean('ART_TEST_BISECTION', False)
-DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = env.get('DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
+DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = _env.get('DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
# Do you want run-tests with the host/target's second arch?
-ART_TEST_RUN_TEST_2ND_ARCH = getEnvBoolean('ART_TEST_RUN_TEST_2ND_ARCH', True)
+ART_TEST_RUN_TEST_2ND_ARCH = _getEnvBoolean('ART_TEST_RUN_TEST_2ND_ARCH', True)
-HOST_2ND_ARCH_PREFIX = get_build_var('HOST_2ND_ARCH_PREFIX')
-HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = env.get(
+HOST_2ND_ARCH_PREFIX = _get_build_var('HOST_2ND_ARCH_PREFIX')
+HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = _env.get(
HOST_2ND_ARCH_PREFIX + 'DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
-ART_TEST_ANDROID_ROOT = env.get('ART_TEST_ANDROID_ROOT')
+ART_TEST_ANDROID_ROOT = _env.get('ART_TEST_ANDROID_ROOT')
-ART_TEST_WITH_STRACE = getEnvBoolean('ART_TEST_DEBUG_GC', False)
+ART_TEST_WITH_STRACE = _getEnvBoolean('ART_TEST_DEBUG_GC', False)
-EXTRA_DISABLED_TESTS = set(env.get("ART_TEST_RUN_TEST_SKIP", "").split())
+EXTRA_DISABLED_TESTS = set(_env.get("ART_TEST_RUN_TEST_SKIP", "").split())
-ART_TEST_RUN_TEST_BUILD = getEnvBoolean('ART_TEST_RUN_TEST_BUILD', False)
+ART_TEST_RUN_TEST_BUILD = _getEnvBoolean('ART_TEST_RUN_TEST_BUILD', False)
-TARGET_2ND_ARCH = get_build_var('TARGET_2ND_ARCH')
-TARGET_ARCH = get_build_var('TARGET_ARCH')
+TARGET_2ND_ARCH = _get_build_var('TARGET_2ND_ARCH')
+TARGET_ARCH = _get_build_var('TARGET_ARCH')
+
+# Note: ART_2ND_PHONY_TEST_TARGET_SUFFIX is 2ND_ART_PHONY_TEST_TARGET_SUFFIX in .mk files
+# Note: ART_2ND_PHONY_TEST_HOST_SUFFIX is 2ND_ART_PHONY_HOST_TARGET_SUFFIX in .mk files
+# Python does not let us have variable names starting with a digit, so it has differ.
if TARGET_2ND_ARCH:
if "64" in TARGET_ARCH:
ART_PHONY_TEST_TARGET_SUFFIX = "64"
- _2ND_ART_PHONY_TEST_TARGET_SUFFIX = "32"
+ ART_2ND_PHONY_TEST_TARGET_SUFFIX = "32"
else:
ART_PHONY_TEST_TARGET_SUFFIX = "32"
- _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+ ART_2ND_PHONY_TEST_TARGET_SUFFIX = ""
else:
if "64" in TARGET_ARCH:
ART_PHONY_TEST_TARGET_SUFFIX = "64"
- _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+ ART_2ND_PHONY_TEST_TARGET_SUFFIX = ""
else:
ART_PHONY_TEST_TARGET_SUFFIX = "32"
- _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+ ART_2ND_PHONY_TEST_TARGET_SUFFIX = ""
-HOST_PREFER_32_BIT = get_build_var('HOST_PREFER_32_BIT')
+HOST_PREFER_32_BIT = _get_build_var('HOST_PREFER_32_BIT')
if HOST_PREFER_32_BIT == "true":
ART_PHONY_TEST_HOST_SUFFIX = "32"
- _2ND_ART_PHONY_TEST_HOST_SUFFIX = ""
+ ART_2ND_PHONY_TEST_HOST_SUFFIX = ""
else:
ART_PHONY_TEST_HOST_SUFFIX = "64"
- _2ND_ART_PHONY_TEST_HOST_SUFFIX = "32"
+ ART_2ND_PHONY_TEST_HOST_SUFFIX = "32"
HOST_OUT_EXECUTABLES = os.path.join(ANDROID_BUILD_TOP,
- get_build_var("HOST_OUT_EXECUTABLES"))
+ _get_build_var("HOST_OUT_EXECUTABLES"))
os.environ['JACK'] = HOST_OUT_EXECUTABLES + '/jack'
os.environ['DX'] = HOST_OUT_EXECUTABLES + '/dx'
os.environ['SMALI'] = HOST_OUT_EXECUTABLES + '/smali'
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 4c519ae..0ab50af 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -16,14 +16,14 @@
"""Build and run go/ab/git_master-art-host target
+This script is executed by the android build server and must not be moved,
+or changed in an otherwise backwards-incompatible manner.
+
Provided with a target name, the script setup the environment for
building the test target by taking config information from
from target_config.py.
-If the target field is defined in the configuration for the target, it
-invokes `make` to build the target, otherwise, it assumes
-that the its is a run-test target, and invokes testrunner.py
-script for building and running the run-tests.
+See target_config.py for the configuration syntax.
"""
import argparse
@@ -35,10 +35,26 @@
import env
parser = argparse.ArgumentParser()
-parser.add_argument('build_target')
parser.add_argument('-j', default='1', dest='n_threads')
+# either -l/--list OR build-target is required (but not both).
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('build_target', nargs='?')
+group.add_argument('-l', '--list', action='store_true', help='List all possible run-build targets.')
options = parser.parse_args()
+##########
+
+if options.list:
+ print "List of all known build_target: "
+ for k in sorted(target_config.iterkeys()):
+ print " * " + k
+ # TODO: would be nice if this was the same order as the target config file.
+ sys.exit(1)
+
+if not target_config.get(options.build_target):
+ sys.stderr.write("error: invalid build_target, see -l/--list.\n")
+ sys.exit(1)
+
target = target_config[options.build_target]
n_threads = options.n_threads
custom_env = target.get('env', {})
@@ -46,26 +62,47 @@
print custom_env
os.environ.update(custom_env)
-
-if target.get('target'):
+if target.get('make'):
build_command = 'make'
build_command += ' -j' + str(n_threads)
build_command += ' -C ' + env.ANDROID_BUILD_TOP
- build_command += ' ' + target.get('target')
- print build_command.split()
+ build_command += ' ' + target.get('make')
+ # Add 'dist' to avoid Jack issues b/36169180.
+ build_command += ' dist'
+ sys.stdout.write(str(build_command) + '\n')
+ sys.stdout.flush()
if subprocess.call(build_command.split()):
sys.exit(1)
-else:
+if target.get('golem'):
+ machine_type = target.get('golem')
+ # use art-opt-cc by default since it mimics the default preopt config.
+ default_golem_config = 'art-opt-cc'
+
+ os.chdir(env.ANDROID_BUILD_TOP)
+ cmd = ['art/tools/golem/build-target.sh']
+ cmd += ['-j' + str(n_threads)]
+ cmd += ['--showcommands']
+ cmd += ['--machine-type=%s' %(machine_type)]
+ cmd += ['--golem=%s' %(default_golem_config)]
+ cmd += ['--tarball']
+ sys.stdout.write(str(cmd) + '\n')
+ sys.stdout.flush()
+
+ if subprocess.call(cmd):
+ sys.exit(1)
+
+if target.get('run-test'):
run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
'art/test/testrunner/testrunner.py')]
- run_test_command += target.get('flags', [])
+ run_test_command += target.get('run-test', [])
run_test_command += ['-j', str(n_threads)]
run_test_command += ['-b']
run_test_command += ['--host']
run_test_command += ['--verbose']
- print run_test_command
+ sys.stdout.write(str(run_test_command) + '\n')
+ sys.stdout.flush()
if subprocess.call(run_test_command):
sys.exit(1)
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 1af2ae7..95ab2e7 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -1,62 +1,99 @@
target_config = {
+
+# Configuration syntax:
+#
+# Required keys: (Use one or more of these)
+# * golem - specify a golem machine-type to build, e.g. android-armv8
+# (uses art/tools/golem/build-target.sh)
+# * make - specify a make target to build, e.g. build-art-host
+# * run-test - runs the tests in art/test/ directory with testrunner.py,
+# specify a list of arguments to pass to testrunner.py
+#
+# Optional keys: (Use any of these)
+# * env - Add additional environment variable to the current environment.
+#
+# *** IMPORTANT ***:
+# This configuration is used by the android build server. Targets must not be renamed
+# or removed.
+#
+
+##########################################
+
+ # General ART configurations.
+ # Calls make and testrunner both.
+
'art-test' : {
- 'flags' : [],
+ 'make' : 'test-art-host-gtest',
+ 'run-test' : [],
'env' : {
'ART_USE_READ_BARRIER' : 'false'
}
},
+
+ 'art-test-javac' : {
+ 'make' : 'test-art-host-gtest',
+ 'run-test' : [],
+ 'env' : {
+ 'ANDROID_COMPILE_WITH_JACK' : 'false',
+ 'ART_USE_READ_BARRIER' : 'true'
+ }
+ },
+
+ # ART run-test configurations
+ # (calls testrunner which builds and then runs the test targets)
+
'art-interpreter' : {
- 'flags' : ['--interpreter'],
+ 'run-test' : ['--interpreter'],
'env' : {
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-interpreter-access-checks' : {
- 'flags' : ['--interp-ac'],
+ 'run-test' : ['--interp-ac'],
'env' : {
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-jit' : {
- 'flags' : ['--jit'],
+ 'run-test' : ['--jit'],
'env' : {
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gcstress-gcverify': {
- 'flags' : ['--gcstress',
- '--gcverify'],
+ 'run-test': ['--gcstress',
+ '--gcverify'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_DEFAULT_GC_TYPE' : 'SS'
}
},
'art-interpreter-gcstress' : {
- 'flags': ['--interpreter',
- '--gcstress'],
+ 'run-test' : ['--interpreter',
+ '--gcstress'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_DEFAULT_GC_TYPE' : 'SS'
}
},
'art-optimizing-gcstress' : {
- 'flags': ['--gcstress',
- '--optimizing'],
+ 'run-test' : ['--gcstress',
+ '--optimizing'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_DEFAULT_GC_TYPE' : 'SS'
}
},
'art-jit-gcstress' : {
- 'flags': ['--jit',
- '--gcstress'],
+ 'run-test' : ['--jit',
+ '--gcstress'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_DEFAULT_GC_TYPE' : 'SS'
}
},
'art-read-barrier' : {
- 'flags': ['--interpreter',
+ 'run-test': ['--interpreter',
'--optimizing'],
'env' : {
'ART_USE_READ_BARRIER' : 'true',
@@ -64,17 +101,17 @@
}
},
'art-read-barrier-gcstress' : {
- 'flags' : ['--interpreter',
- '--optimizing',
- '--gcstress'],
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--gcstress'],
'env' : {
'ART_USE_READ_BARRIER' : 'true',
'ART_HEAP_POISONING' : 'true'
}
},
'art-read-barrier-table-lookup' : {
- 'flags' : ['--interpreter',
- '--optimizing'],
+ 'run-test' : ['--interpreter',
+ '--optimizing'],
'env' : {
'ART_USE_READ_BARRIER' : 'true',
'ART_READ_BARRIER_TYPE' : 'TABLELOOKUP',
@@ -82,35 +119,35 @@
}
},
'art-debug-gc' : {
- 'flags' : ['--interpreter',
- '--optimizing'],
+ 'run-test' : ['--interpreter',
+ '--optimizing'],
'env' : {
'ART_TEST_DEBUG_GC' : 'true',
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-ss-gc' : {
- 'flags' : ['--interpreter',
- '--optimizing',
- '--jit'],
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
'env' : {
'ART_DEFAULT_GC_TYPE' : 'SS',
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gss-gc' : {
- 'flags' : ['--interpreter',
- '--optimizing',
- '--jit'],
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
'env' : {
'ART_DEFAULT_GC_TYPE' : 'GSS',
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-ss-gc-tlab' : {
- 'flags' : ['--interpreter',
- '--optimizing',
- '--jit'],
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
'env' : {
'ART_DEFAULT_GC_TYPE' : 'SS',
'ART_USE_TLAB' : 'true',
@@ -118,9 +155,9 @@
}
},
'art-gss-gc-tlab' : {
- 'flags' : ['--interpreter',
- '--optimizing',
- '--jit'],
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
'env' : {
'ART_DEFAULT_GC_TYPE' : 'GSS',
'ART_USE_TLAB' : 'true',
@@ -128,78 +165,94 @@
}
},
'art-tracing' : {
- 'flags' : ['--trace'],
+ 'run-test' : ['--trace'],
'env' : {
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-interpreter-tracing' : {
- 'flags' : ['--interpreter',
- '--trace'],
+ 'run-test' : ['--interpreter',
+ '--trace'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-forcecopy' : {
- 'flags' : ['--forcecopy'],
+ 'run-test' : ['--forcecopy'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-no-prebuild' : {
- 'flags' : ['--no-prebuild'],
+ 'run-test' : ['--no-prebuild'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-no-image' : {
- 'flags' : ['--no-image'],
+ 'run-test' : ['--no-image'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-interpreter-no-image' : {
- 'flags' : ['--interpreter',
- '--no-image'],
+ 'run-test' : ['--interpreter',
+ '--no-image'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-relocate-no-patchoat' : {
- 'flags' : ['--relocate-npatchoat'],
+ 'run-test' : ['--relocate-npatchoat'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-no-dex2oat' : {
- 'flags' : ['--no-dex2oat'],
+ 'run-test' : ['--no-dex2oat'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
}
},
'art-heap-poisoning' : {
- 'flags' : ['--interpreter',
- '--optimizing'],
+ 'run-test' : ['--interpreter',
+ '--optimizing'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_HEAP_POISONING' : 'true'
}
},
+ 'art-preopt' : {
+ # This test configuration is intended to be representative of the case
+ # of preopted apps, which are precompiled compiled pic against an
+ # unrelocated image, then used with a relocated image.
+ 'run-test' : ['--pictest',
+ '--prebuild',
+ '--relocate',
+ '--jit'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+
+ # ART gtest configurations
+ # (calls make 'target' which builds and then runs the gtests).
+
'art-gtest' : {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env' : {
'ART_USE_READ_BARRIER' : 'true'
}
},
'art-gtest-read-barrier': {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env' : {
'ART_USE_READ_BARRIER' : 'true',
'ART_HEAP_POISONING' : 'true'
}
},
'art-gtest-read-barrier-table-lookup': {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env': {
'ART_USE_READ_BARRIER' : 'true',
'ART_READ_BARRIER_TYPE' : 'TABLELOOKUP',
@@ -207,21 +260,21 @@
}
},
'art-gtest-ss-gc': {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env': {
'ART_DEFAULT_GC_TYPE' : 'SS',
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gtest-gss-gc': {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env' : {
'ART_DEFAULT_GC_TYPE' : 'GSS',
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gtest-ss-gc-tlab': {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env': {
'ART_DEFAULT_GC_TYPE' : 'SS',
'ART_USE_TLAB' : 'true',
@@ -229,7 +282,7 @@
}
},
'art-gtest-gss-gc-tlab': {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env': {
'ART_DEFAULT_GC_TYPE' : 'GSS',
'ART_USE_TLAB' : 'true',
@@ -237,29 +290,54 @@
}
},
'art-gtest-debug-gc' : {
- 'target' : 'test-art-host-gtest',
+ 'make' : 'test-art-host-gtest',
'env' : {
'ART_TEST_DEBUG_GC' : 'true',
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gtest-valgrind32': {
- 'target' : 'valgrind-test-art-host32',
+ 'make' : 'valgrind-test-art-host32',
'env': {
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gtest-valgrind64': {
- 'target' : 'valgrind-test-art-host64',
+ 'make' : 'valgrind-test-art-host64',
'env': {
'ART_USE_READ_BARRIER' : 'false'
}
},
'art-gtest-heap-poisoning': {
- 'target' : 'valgrind-test-art-host64',
+ 'make' : 'valgrind-test-art-host64',
'env' : {
'ART_HEAP_POISONING' : 'true',
'ART_USE_READ_BARRIER' : 'false'
}
- }
+ },
+
+ # ART Golem build targets used by go/lem (continuous ART benchmarking),
+ # (art-opt-cc is used by default since it mimics the default preopt config),
+ #
+ # calls golem/build-target.sh which builds a golem tarball of the target name,
+ # e.g. 'golem: android-armv7' produces an 'android-armv7.tar.gz' upon success.
+
+ 'art-golem-android-armv7': {
+ 'golem' : 'android-armv7'
+ },
+ 'art-golem-android-armv8': {
+ 'golem' : 'android-armv8'
+ },
+ 'art-golem-linux-armv7': {
+ 'golem' : 'linux-armv7'
+ },
+ 'art-golem-linux-armv8': {
+ 'golem' : 'linux-armv8'
+ },
+ 'art-golem-linux-ia32': {
+ 'golem' : 'linux-ia32'
+ },
+ 'art-golem-linux-x64': {
+ 'golem' : 'linux-x64'
+ },
}
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index a550409..5d3687e 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2017, The Android Open Source Project
#
@@ -48,10 +48,12 @@
import fnmatch
import itertools
import json
+import multiprocessing
import os
import re
import subprocess
import sys
+import tempfile
import threading
import time
@@ -72,6 +74,9 @@
ADDRESS_SIZES = set()
OPTIMIZING_COMPILER_TYPES = set()
ADDRESS_SIZES_TARGET = {'host': set(), 'target': set()}
+# timeout for individual tests.
+# TODO: make it adjustable per tests and for buildbots
+timeout = 3000 # 50 minutes
# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
# that has key as the test name (like 001-HelloWorld), and value as set of
@@ -112,7 +117,7 @@
skipped_tests = []
# Flags
-n_thread = 1
+n_thread = -1
test_count = 0
total_test_count = 0
verbose = False
@@ -142,7 +147,7 @@
VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
- 'regalloc_gc'}
+ 'regalloc_gc', 'speed-profile'}
for v_type in VARIANT_TYPE_DICT:
TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
@@ -187,6 +192,8 @@
if env.ART_TEST_OPTIMIZING:
COMPILER_TYPES.add('optimizing')
OPTIMIZING_COMPILER_TYPES.add('optimizing')
+ if env.ART_TEST_SPEED_PROFILE:
+ COMPILER_TYPES.add('speed-profile')
# By default we run all 'compiler' variants.
if not COMPILER_TYPES:
@@ -194,6 +201,7 @@
COMPILER_TYPES.add('jit')
COMPILER_TYPES.add('interpreter')
COMPILER_TYPES.add('interp-ac')
+ COMPILER_TYPES.add('speed-profile')
OPTIMIZING_COMPILER_TYPES.add('optimizing')
if env.ART_TEST_RUN_TEST_RELOCATE:
@@ -248,15 +256,32 @@
ADDRESS_SIZES_TARGET['target'].add(env.ART_PHONY_TEST_TARGET_SUFFIX)
ADDRESS_SIZES_TARGET['host'].add(env.ART_PHONY_TEST_HOST_SUFFIX)
if env.ART_TEST_RUN_TEST_2ND_ARCH:
- ADDRESS_SIZES_TARGET['host'].add(env._2ND_ART_PHONY_TEST_HOST_SUFFIX)
- ADDRESS_SIZES_TARGET['target'].add(env._2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ ADDRESS_SIZES_TARGET['host'].add(env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
+ ADDRESS_SIZES_TARGET['target'].add(env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
else:
ADDRESS_SIZES_TARGET['host'] = ADDRESS_SIZES_TARGET['host'].union(ADDRESS_SIZES)
ADDRESS_SIZES_TARGET['target'] = ADDRESS_SIZES_TARGET['target'].union(ADDRESS_SIZES)
+ global n_thread
+ if n_thread is -1:
+ if 'target' in TARGET_TYPES:
+ n_thread = get_default_threads('target')
+ else:
+ n_thread = get_default_threads('host')
+
global semaphore
semaphore = threading.Semaphore(n_thread)
+ if not sys.stdout.isatty():
+ global COLOR_ERROR
+ global COLOR_PASS
+ global COLOR_SKIP
+ global COLOR_NORMAL
+ COLOR_ERROR = ''
+ COLOR_PASS = ''
+ COLOR_SKIP = ''
+ COLOR_NORMAL = ''
+
def run_tests(tests):
"""Creates thread workers to run the tests.
@@ -367,6 +392,8 @@
options_test += ' --interpreter --verify-soft-fail'
elif compiler == 'jit':
options_test += ' --jit'
+ elif compiler == 'speed-profile':
+ options_test += ' --random-profile'
if relocate == 'relocate':
options_test += ' --relocate'
@@ -412,8 +439,10 @@
options_test += ' --instruction-set-features ' + \
env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
- options_test = (' --output-path %s/run-test-output/%s') % (
- env.ART_HOST_TEST_DIR, test_name) + options_test
+ # TODO(http://36039166): This is a temporary solution to
+ # fix build breakages.
+ options_test = (' --output-path %s') % (
+ tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)) + options_test
run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
command = run_test_sh + ' ' + options_test + ' ' + test
@@ -451,15 +480,15 @@
test_skipped = True
else:
test_skipped = False
- proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
- script_output = proc.stdout.read().strip()
+ proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, universal_newlines=True)
+ script_output = proc.communicate(timeout=timeout)[0]
test_passed = not proc.wait()
if not test_skipped:
if test_passed:
print_test_info(test_name, 'PASS')
else:
- failed_tests.append(test_name)
+ failed_tests.append((test_name, script_output))
if not env.ART_TEST_KEEP_GOING:
stop_testrunner = True
print_test_info(test_name, 'FAIL', ('%s\n%s') % (
@@ -469,9 +498,14 @@
skipped_tests.append(test_name)
else:
print_test_info(test_name, '')
- except Exception, e:
- failed_tests.append(test_name)
- print_text(('%s\n%s\n') % (command, str(e)))
+ except subprocess.TimeoutExpired as e:
+ failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
+ print_test_info(test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (
+ timeout, command))
+ except Exception as e:
+ failed_tests.append((test_name, str(e)))
+ print_test_info(test_name, 'FAIL',
+ ('%s\n%s\n\n') % (command, str(e)))
finally:
semaphore.release()
@@ -508,11 +542,11 @@
test_count,
total_test_count)
- if result == "FAIL":
+ if result == 'FAIL' or result == 'TIMEOUT':
info += ('%s %s %s\n%s\n') % (
progress_info,
test_name,
- COLOR_ERROR + 'FAIL' + COLOR_NORMAL,
+ COLOR_ERROR + result + COLOR_NORMAL,
failed_test_info)
else:
result_text = ''
@@ -533,15 +567,14 @@
allowed_test_length = console_width - total_output_length
test_name_len = len(test_name)
if allowed_test_length < test_name_len:
- test_name = ('%s...%s') % (
- test_name[:(allowed_test_length - 3)/2],
- test_name[-(allowed_test_length - 3)/2:])
+ test_name = ('...%s') % (
+ test_name[-(allowed_test_length - 3):])
info += ('%s %s %s') % (
progress_info,
test_name,
result_text)
print_text(info)
- except Exception, e:
+ except Exception as e:
print_text(('%s\n%s\n') % (test_name, str(e)))
failed_tests.append(test_name)
finally:
@@ -549,10 +582,10 @@
def verify_knownfailure_entry(entry):
supported_field = {
- 'tests' : (list, unicode),
- 'description' : (list, unicode),
- 'bug' : (unicode,),
- 'variant' : (unicode,),
+ 'tests' : (list, str),
+ 'description' : (list, str),
+ 'bug' : (str,),
+ 'variant' : (str,),
'env_vars' : (dict,),
}
for field in entry:
@@ -581,7 +614,7 @@
for failure in known_failures_info:
verify_knownfailure_entry(failure)
tests = failure.get('tests', [])
- if isinstance(tests, unicode):
+ if isinstance(tests, str):
tests = [tests]
variants = parse_variants(failure.get('variant'))
env_vars = failure.get('env_vars')
@@ -686,16 +719,16 @@
# Prints the list of skipped tests, if any.
if skipped_tests:
- print_text(COLOR_SKIP + 'SKIPPED TESTS' + COLOR_NORMAL + '\n')
+ print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
for test in skipped_tests:
print_text(test + '\n')
print_text('\n')
# Prints the list of failed tests, if any.
if failed_tests:
- print_text(COLOR_ERROR + 'FAILED TESTS' + COLOR_NORMAL + '\n')
- for test in failed_tests:
- print_text(test + '\n')
+ print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
+ for test_info in failed_tests:
+ print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
def parse_test_name(test_name):
@@ -767,6 +800,15 @@
return target_options
+def get_default_threads(target):
+ if target is 'target':
+ adb_command = 'adb shell cat /sys/devices/system/cpu/present'
+ cpu_info_proc = subprocess.Popen(adb_command.split(), stdout=subprocess.PIPE)
+ cpu_info = cpu_info_proc.stdout.read()
+ return int(cpu_info.split('-')[1])
+ else:
+ return multiprocessing.cpu_count()
+
def parse_option():
global verbose
global dry_run
@@ -774,10 +816,12 @@
global build
global gdb
global gdb_arg
+ global timeout
parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
parser.add_argument('-t', '--test', dest='test', help='name of the test')
parser.add_argument('-j', type=int, dest='n_thread')
+ parser.add_argument('--timeout', default=timeout, type=int, dest='timeout')
for variant in TOTAL_VARIANTS_SET:
flag = '--' + variant
flag_dest = variant.replace('-', '_')
@@ -842,6 +886,8 @@
IMAGE_TYPES.add('no-image')
if options['optimizing']:
COMPILER_TYPES.add('optimizing')
+ if options['speed_profile']:
+ COMPILER_TYPES.add('speed-profile')
if options['trace']:
TRACE_TYPES.add('trace')
if options['gcstress']:
@@ -885,6 +931,7 @@
gdb = True
if options['gdb_arg']:
gdb_arg = options['gdb_arg']
+ timeout = options['timeout']
return test
@@ -899,9 +946,11 @@
if 'target' in TARGET_TYPES:
build_targets += 'test-art-target-run-test-dependencies'
build_command = 'make'
- build_command += ' -j' + str(n_thread)
+ build_command += ' -j'
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + build_targets
+ # Add 'dist' to avoid Jack issues b/36169180.
+ build_command += ' dist'
if subprocess.call(build_command.split()):
sys.exit(1)
if user_requested_test:
@@ -914,7 +963,7 @@
while threading.active_count() > 1:
time.sleep(0.1)
print_analysis()
- except Exception, e:
+ except Exception as e:
print_analysis()
print_text(str(e))
sys.exit(1)
diff --git a/test/ti-agent/agent_common.cc b/test/ti-agent/agent_common.cc
new file mode 100644
index 0000000..9a91258
--- /dev/null
+++ b/test/ti-agent/agent_common.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+namespace art {
+
+// Common JNI functions.
+
+extern "C" JNIEXPORT void JNICALL Java_art_Main_setTag(
+ JNIEnv* env, jclass, jobject obj, jlong tag) {
+ jvmtiError ret = jvmti_env->SetTag(obj, tag);
+ JvmtiErrorToException(env, jvmti_env, ret);
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_art_Main_getTag(JNIEnv* env, jclass, jobject obj) {
+ jlong tag = 0;
+ jvmtiError ret = jvmti_env->GetTag(obj, &tag);
+ if (JvmtiErrorToException(env, jvmti_env, ret)) {
+ return 0;
+ }
+ return tag;
+}
+
+} // namespace art
diff --git a/test/ti-agent/agent_startup.cc b/test/ti-agent/agent_startup.cc
new file mode 100644
index 0000000..be73de5
--- /dev/null
+++ b/test/ti-agent/agent_startup.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "agent_startup.h"
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+
+#include "jni_binder.h"
+#include "jvmti_helper.h"
+#include "scoped_utf_chars.h"
+#include "test_env.h"
+
+namespace art {
+
+static constexpr const char* kMainClass = "art/Main";
+
+static StartCallback gCallback = nullptr;
+
+// TODO: Check this. This may not work on device. The classloader containing the app's classes
+// may not have been created at this point (i.e., if it's not the system classloader).
+static void JNICALL VMInitCallback(jvmtiEnv* callback_jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread ATTRIBUTE_UNUSED) {
+ // Bind kMainClass native methods.
+ BindFunctions(callback_jvmti_env, jni_env, kMainClass);
+
+ if (gCallback != nullptr) {
+ gCallback(callback_jvmti_env, jni_env);
+ gCallback = nullptr;
+ }
+
+ // And delete the jvmtiEnv.
+ callback_jvmti_env->DisposeEnvironment();
+}
+
+// Install a phase callback that will bind JNI functions on VMInit.
+void BindOnLoad(JavaVM* vm, StartCallback callback) {
+ // Use a new jvmtiEnv. Otherwise we might collide with table changes.
+ jvmtiEnv* install_env;
+ if (vm->GetEnv(reinterpret_cast<void**>(&install_env), JVMTI_VERSION_1_0) != 0) {
+ LOG(FATAL) << "Could not get jvmtiEnv";
+ }
+ SetAllCapabilities(install_env);
+
+ {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.VMInit = VMInitCallback;
+
+ CheckJvmtiError(install_env, install_env->SetEventCallbacks(&callbacks, sizeof(callbacks)));
+ }
+
+ CheckJvmtiError(install_env, install_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_VM_INIT,
+ nullptr));
+
+ gCallback = callback;
+}
+
+// Ensure binding of the Main class when the agent is started through OnAttach.
+void BindOnAttach(JavaVM* vm, StartCallback callback) {
+ // Get a JNIEnv. As the thread is attached, we must not destroy it.
+ JNIEnv* env;
+ CHECK_EQ(0, vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6))
+ << "Could not get JNIEnv";
+
+ jvmtiEnv* bind_jvmti_env;
+ CHECK_EQ(0, vm->GetEnv(reinterpret_cast<void**>(&bind_jvmti_env), JVMTI_VERSION_1_0))
+ << "Could not get jvmtiEnv";
+ SetAllCapabilities(bind_jvmti_env);
+
+ BindFunctions(bind_jvmti_env, env, kMainClass);
+
+ if (callback != nullptr) {
+ callback(bind_jvmti_env, env);
+ }
+
+ if (bind_jvmti_env->DisposeEnvironment() != JVMTI_ERROR_NONE) {
+ LOG(FATAL) << "Could not dispose temporary jvmtiEnv";
+ }
+}
+
+// Utility functions for art.Main shim.
+extern "C" JNIEXPORT void JNICALL Java_art_Main_bindAgentJNI(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jstring className, jobject classLoader) {
+ ScopedUtfChars name(env, className);
+ BindFunctions(jvmti_env, env, name.c_str(), classLoader);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Main_bindAgentJNIForClass(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jclass bindClass) {
+ BindFunctionsOnClass(jvmti_env, env, bindClass);
+}
+
+} // namespace art
diff --git a/test/ti-agent/agent_startup.h b/test/ti-agent/agent_startup.h
new file mode 100644
index 0000000..4963320
--- /dev/null
+++ b/test/ti-agent/agent_startup.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_AGENT_STARTUP_H_
+#define ART_TEST_TI_AGENT_AGENT_STARTUP_H_
+
+#include <functional>
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace art {
+
+using StartCallback = void(*)(jvmtiEnv*, JNIEnv*);
+
+// Ensure binding of the Main class when the agent is started through OnLoad.
+void BindOnLoad(JavaVM* vm, StartCallback callback);
+
+// Ensure binding of the Main class when the agent is started through OnAttach.
+void BindOnAttach(JavaVM* vm, StartCallback callback);
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_AGENT_STARTUP_H_
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 6316a9c..ab5dbcc 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "ti-agent/common_helper.h"
+#include "common_helper.h"
#include <dlfcn.h>
#include <stdio.h>
@@ -27,44 +27,15 @@
#include "jni_internal.h"
#include "jvmti.h"
#include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
#include "stack.h"
-#include "ti-agent/common_load.h"
#include "utils.h"
+#include "jni_binder.h"
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
namespace art {
-bool RuntimeIsJVM;
-
-bool IsJVM() {
- return RuntimeIsJVM;
-}
-
-void SetAllCapabilities(jvmtiEnv* env) {
- jvmtiCapabilities caps;
- env->GetPotentialCapabilities(&caps);
- env->AddCapabilities(&caps);
-}
-
-bool JvmtiErrorToException(JNIEnv* env, jvmtiError error) {
- if (error == JVMTI_ERROR_NONE) {
- return false;
- }
-
- ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
- if (rt_exception.get() == nullptr) {
- // CNFE should be pending.
- return true;
- }
-
- char* err;
- jvmti_env->GetErrorName(error, &err);
-
- env->ThrowNew(rt_exception.get(), err);
-
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- return true;
-}
-
template <bool is_redefine>
static void throwCommonRedefinitionError(jvmtiEnv* jvmti,
@@ -303,7 +274,7 @@
JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
nullptr);
if (res != JVMTI_ERROR_NONE) {
- JvmtiErrorToException(env, res);
+ JvmtiErrorToException(env, jvmti_env, res);
}
}
@@ -412,140 +383,4 @@
} // namespace common_transform
-static void BindMethod(jvmtiEnv* jenv,
- JNIEnv* env,
- jclass klass,
- jmethodID method) {
- char* name;
- char* signature;
- jvmtiError name_result = jenv->GetMethodName(method, &name, &signature, nullptr);
- if (name_result != JVMTI_ERROR_NONE) {
- LOG(FATAL) << "Could not get methods";
- }
-
- std::string names[2];
- if (IsJVM()) {
- // TODO Get the JNI long name
- char* klass_name;
- jvmtiError klass_result = jenv->GetClassSignature(klass, &klass_name, nullptr);
- if (klass_result == JVMTI_ERROR_NONE) {
- std::string name_str(name);
- std::string klass_str(klass_name);
- names[0] = GetJniShortName(klass_str, name_str);
- jenv->Deallocate(reinterpret_cast<unsigned char*>(klass_name));
- } else {
- LOG(FATAL) << "Could not get class name!";
- }
- } else {
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* m = jni::DecodeArtMethod(method);
- names[0] = m->JniShortName();
- names[1] = m->JniLongName();
- }
- for (const std::string& mangled_name : names) {
- if (mangled_name == "") {
- continue;
- }
- void* sym = dlsym(RTLD_DEFAULT, mangled_name.c_str());
- if (sym == nullptr) {
- continue;
- }
-
- JNINativeMethod native_method;
- native_method.fnPtr = sym;
- native_method.name = name;
- native_method.signature = signature;
-
- env->RegisterNatives(klass, &native_method, 1);
-
- jenv->Deallocate(reinterpret_cast<unsigned char*>(name));
- jenv->Deallocate(reinterpret_cast<unsigned char*>(signature));
- return;
- }
-
- LOG(FATAL) << "Could not find " << names[0];
-}
-
-static jclass FindClassWithSystemClassLoader(JNIEnv* env, const char* class_name) {
- // Find the system classloader.
- ScopedLocalRef<jclass> cl_klass(env, env->FindClass("java/lang/ClassLoader"));
- if (cl_klass.get() == nullptr) {
- return nullptr;
- }
- jmethodID getsystemclassloader_method = env->GetStaticMethodID(cl_klass.get(),
- "getSystemClassLoader",
- "()Ljava/lang/ClassLoader;");
- if (getsystemclassloader_method == nullptr) {
- return nullptr;
- }
- ScopedLocalRef<jobject> cl(env, env->CallStaticObjectMethod(cl_klass.get(),
- getsystemclassloader_method));
- if (cl.get() == nullptr) {
- return nullptr;
- }
-
- // Create a String of the name.
- std::string descriptor = android::base::StringPrintf("L%s;", class_name);
- std::string dot_name = DescriptorToDot(descriptor.c_str());
- ScopedLocalRef<jstring> name_str(env, env->NewStringUTF(dot_name.c_str()));
-
- // Call Class.forName with it.
- ScopedLocalRef<jclass> c_klass(env, env->FindClass("java/lang/Class"));
- if (c_klass.get() == nullptr) {
- return nullptr;
- }
- jmethodID forname_method = env->GetStaticMethodID(
- c_klass.get(),
- "forName",
- "(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;");
- if (forname_method == nullptr) {
- return nullptr;
- }
-
- return reinterpret_cast<jclass>(env->CallStaticObjectMethod(c_klass.get(),
- forname_method,
- name_str.get(),
- JNI_FALSE,
- cl.get()));
-}
-
-void BindFunctions(jvmtiEnv* jenv, JNIEnv* env, const char* class_name) {
- // Use JNI to load the class.
- ScopedLocalRef<jclass> klass(env, env->FindClass(class_name));
- if (klass.get() == nullptr) {
- // We may be called with the wrong classloader. Try explicitly using the system classloader.
- env->ExceptionClear();
- klass.reset(FindClassWithSystemClassLoader(env, class_name));
- if (klass.get() == nullptr) {
- LOG(FATAL) << "Could not load " << class_name;
- }
- }
- BindFunctionsOnClass(jenv, env, klass.get());
-}
-
-void BindFunctionsOnClass(jvmtiEnv* jenv, JNIEnv* env, jclass klass) {
- // Use JVMTI to get the methods.
- jint method_count;
- jmethodID* methods;
- jvmtiError methods_result = jenv->GetClassMethods(klass, &method_count, &methods);
- if (methods_result != JVMTI_ERROR_NONE) {
- LOG(FATAL) << "Could not get methods";
- }
-
- // Check each method.
- for (jint i = 0; i < method_count; ++i) {
- jint modifiers;
- jvmtiError mod_result = jenv->GetMethodModifiers(methods[i], &modifiers);
- if (mod_result != JVMTI_ERROR_NONE) {
- LOG(FATAL) << "Could not get methods";
- }
- constexpr jint kNative = static_cast<jint>(kAccNative);
- if ((modifiers & kNative) != 0) {
- BindMethod(jenv, env, klass, methods[i]);
- }
- }
-
- jenv->Deallocate(reinterpret_cast<unsigned char*>(methods));
-}
-
} // namespace art
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
index f10356d..610019e 100644
--- a/test/ti-agent/common_helper.h
+++ b/test/ti-agent/common_helper.h
@@ -19,13 +19,11 @@
#include "jni.h"
#include "jvmti.h"
-#include "ScopedLocalRef.h"
namespace art {
+
namespace common_redefine {
-
jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
} // namespace common_redefine
namespace common_retransform {
@@ -36,53 +34,6 @@
jint OnLoad(JavaVM* vm, char* options, void* reserved);
} // namespace common_transform
-
-extern bool RuntimeIsJVM;
-
-bool IsJVM();
-
-template <typename T>
-static jobjectArray CreateObjectArray(JNIEnv* env,
- jint length,
- const char* component_type_descriptor,
- T src) {
- if (length < 0) {
- return nullptr;
- }
-
- ScopedLocalRef<jclass> obj_class(env, env->FindClass(component_type_descriptor));
- if (obj_class.get() == nullptr) {
- return nullptr;
- }
-
- ScopedLocalRef<jobjectArray> ret(env, env->NewObjectArray(length, obj_class.get(), nullptr));
- if (ret.get() == nullptr) {
- return nullptr;
- }
-
- for (jint i = 0; i < length; ++i) {
- jobject element = src(i);
- env->SetObjectArrayElement(ret.get(), static_cast<jint>(i), element);
- env->DeleteLocalRef(element);
- if (env->ExceptionCheck()) {
- return nullptr;
- }
- }
-
- return ret.release();
-}
-
-void SetAllCapabilities(jvmtiEnv* env);
-
-bool JvmtiErrorToException(JNIEnv* env, jvmtiError error);
-
-// Load the class through JNI. Inspect it, find all native methods. Construct the corresponding
-// mangled name, run dlsym and bind the method.
-//
-// This will abort on failure.
-void BindFunctions(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name);
-void BindFunctionsOnClass(jvmtiEnv* jvmti_env, JNIEnv* env, jclass klass);
-
} // namespace art
#endif // ART_TEST_TI_AGENT_COMMON_HELPER_H_
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index fddae3a..9e7b75d 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -14,24 +14,25 @@
* limitations under the License.
*/
-#include "common_load.h"
-
#include <jni.h>
#include <stdio.h>
-#include "art_method-inl.h"
#include "base/logging.h"
#include "base/macros.h"
+
+#include "agent_startup.h"
#include "common_helper.h"
+#include "jni_binder.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
#include "901-hello-ti-agent/basics.h"
#include "909-attach-agent/attach.h"
#include "936-search-onload/search_onload.h"
+#include "983-source-transform-verify/source_transform.h"
namespace art {
-jvmtiEnv* jvmti_env;
-
namespace {
using OnLoad = jint (*)(JavaVM* vm, char* options, void* reserved);
@@ -43,45 +44,6 @@
OnAttach attach;
};
-static void JNICALL VMInitCallback(jvmtiEnv *jvmti_env,
- JNIEnv* jni_env,
- jthread thread ATTRIBUTE_UNUSED) {
- // Bind Main native methods.
- BindFunctions(jvmti_env, jni_env, "Main");
-}
-
-// Install a phase callback that will bind JNI functions on VMInit.
-bool InstallBindCallback(JavaVM* vm) {
- // Use a new jvmtiEnv. Otherwise we might collide with table changes.
- jvmtiEnv* install_env;
- if (vm->GetEnv(reinterpret_cast<void**>(&install_env), JVMTI_VERSION_1_0) != 0) {
- return false;
- }
- SetAllCapabilities(install_env);
-
- {
- jvmtiEventCallbacks callbacks;
- memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
- callbacks.VMInit = VMInitCallback;
-
- jvmtiError install_error = install_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (install_error != JVMTI_ERROR_NONE) {
- return false;
- }
- }
-
- {
- jvmtiError enable_error = install_env->SetEventNotificationMode(JVMTI_ENABLE,
- JVMTI_EVENT_VM_INIT,
- nullptr);
- if (enable_error != JVMTI_ERROR_NONE) {
- return false;
- }
- }
-
- return true;
-}
-
// A trivial OnLoad implementation that only initializes the global jvmti_env.
static jint MinimalOnLoad(JavaVM* vm,
char* options ATTRIBUTE_UNUSED,
@@ -121,6 +83,9 @@
{ "943-private-recursive-jit", common_redefine::OnLoad, nullptr },
{ "944-transform-classloaders", common_redefine::OnLoad, nullptr },
{ "945-obsolete-native", common_redefine::OnLoad, nullptr },
+ { "981-dedup-original-dex", common_retransform::OnLoad, nullptr },
+ { "982-ok-no-retransform", common_retransform::OnLoad, nullptr },
+ { "983-source-transform-verify", Test983SourceTransformVerify::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
@@ -150,28 +115,8 @@
return true;
}
-static void SetIsJVM(char* options) {
- RuntimeIsJVM = strncmp(options, "jvm", 3) == 0;
-}
-
-static bool BindFunctionsAttached(JavaVM* vm, const char* class_name) {
- // Get a JNIEnv. As the thread is attached, we must not destroy it.
- JNIEnv* env;
- if (vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != 0) {
- printf("Unable to get JNI env!\n");
- return false;
- }
-
- jvmtiEnv* jenv;
- if (vm->GetEnv(reinterpret_cast<void**>(&jenv), JVMTI_VERSION_1_0) != 0) {
- printf("Unable to get jvmti env!\n");
- return false;
- }
- SetAllCapabilities(jenv);
-
- BindFunctions(jenv, env, class_name);
-
- return true;
+static void SetIsJVM(const char* options) {
+ SetJVM(strncmp(options, "jvm", 3) == 0);
}
} // namespace
@@ -186,9 +131,7 @@
SetIsJVM(remaining_options);
- if (!InstallBindCallback(vm)) {
- return 1;
- }
+ BindOnLoad(vm, nullptr);
AgentLib* lib = FindAgent(name_option);
OnLoad fn = nullptr;
@@ -212,7 +155,7 @@
return -1;
}
- BindFunctionsAttached(vm, "Main");
+ BindOnAttach(vm, nullptr);
AgentLib* lib = FindAgent(name_option);
if (lib == nullptr) {
diff --git a/test/ti-agent/jni_binder.cc b/test/ti-agent/jni_binder.cc
new file mode 100644
index 0000000..32236de
--- /dev/null
+++ b/test/ti-agent/jni_binder.cc
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_binder.h"
+
+#include <dlfcn.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+#include "ti_utf.h"
+
+namespace art {
+
+static std::string MangleForJni(const std::string& s) {
+ std::string result;
+ size_t char_count = ti::CountModifiedUtf8Chars(s.c_str(), s.length());
+ const char* cp = &s[0];
+ for (size_t i = 0; i < char_count; ++i) {
+ uint32_t ch = ti::GetUtf16FromUtf8(&cp);
+ if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || (ch >= '0' && ch <= '9')) {
+ result.push_back(ch);
+ } else if (ch == '.' || ch == '/') {
+ result += "_";
+ } else if (ch == '_') {
+ result += "_1";
+ } else if (ch == ';') {
+ result += "_2";
+ } else if (ch == '[') {
+ result += "_3";
+ } else {
+ const uint16_t leading = ti::GetLeadingUtf16Char(ch);
+ const uint32_t trailing = ti::GetTrailingUtf16Char(ch);
+
+ android::base::StringAppendF(&result, "_0%04x", leading);
+ if (trailing != 0) {
+ android::base::StringAppendF(&result, "_0%04x", trailing);
+ }
+ }
+ }
+ return result;
+}
+
+static std::string GetJniShortName(const std::string& class_descriptor, const std::string& method) {
+ // Remove the leading 'L' and trailing ';'...
+ std::string class_name(class_descriptor);
+ CHECK_EQ(class_name[0], 'L') << class_name;
+ CHECK_EQ(class_name[class_name.size() - 1], ';') << class_name;
+ class_name.erase(0, 1);
+ class_name.erase(class_name.size() - 1, 1);
+
+ std::string short_name;
+ short_name += "Java_";
+ short_name += MangleForJni(class_name);
+ short_name += "_";
+ short_name += MangleForJni(method);
+ return short_name;
+}
+
+static void BindMethod(jvmtiEnv* jvmti_env, JNIEnv* env, jclass klass, jmethodID method) {
+ std::string name;
+ std::string signature;
+ std::string mangled_names[2];
+ {
+ char* name_cstr;
+ char* sig_cstr;
+ jvmtiError name_result = jvmti_env->GetMethodName(method, &name_cstr, &sig_cstr, nullptr);
+ CheckJvmtiError(jvmti_env, name_result);
+ CHECK(name_cstr != nullptr);
+ CHECK(sig_cstr != nullptr);
+ name = name_cstr;
+ signature = sig_cstr;
+
+ char* klass_name;
+ jvmtiError klass_result = jvmti_env->GetClassSignature(klass, &klass_name, nullptr);
+ CheckJvmtiError(jvmti_env, klass_result);
+
+ mangled_names[0] = GetJniShortName(klass_name, name);
+ // TODO: Long JNI name.
+
+ CheckJvmtiError(jvmti_env, Deallocate(jvmti_env, name_cstr));
+ CheckJvmtiError(jvmti_env, Deallocate(jvmti_env, sig_cstr));
+ CheckJvmtiError(jvmti_env, Deallocate(jvmti_env, klass_name));
+ }
+
+ for (const std::string& mangled_name : mangled_names) {
+ if (mangled_name.empty()) {
+ continue;
+ }
+ void* sym = dlsym(RTLD_DEFAULT, mangled_name.c_str());
+ if (sym == nullptr) {
+ continue;
+ }
+
+ JNINativeMethod native_method;
+ native_method.fnPtr = sym;
+ native_method.name = name.c_str();
+ native_method.signature = signature.c_str();
+
+ env->RegisterNatives(klass, &native_method, 1);
+
+ return;
+ }
+
+ LOG(FATAL) << "Could not find " << mangled_names[0];
+}
+
+static std::string DescriptorToDot(const char* descriptor) {
+ size_t length = strlen(descriptor);
+ if (length > 1) {
+ if (descriptor[0] == 'L' && descriptor[length - 1] == ';') {
+ // Descriptors have the leading 'L' and trailing ';' stripped.
+ std::string result(descriptor + 1, length - 2);
+ std::replace(result.begin(), result.end(), '/', '.');
+ return result;
+ } else {
+ // For arrays the 'L' and ';' remain intact.
+ std::string result(descriptor);
+ std::replace(result.begin(), result.end(), '/', '.');
+ return result;
+ }
+ }
+ // Do nothing for non-class/array descriptors.
+ return descriptor;
+}
+
+static jobject GetSystemClassLoader(JNIEnv* env) {
+ ScopedLocalRef<jclass> cl_klass(env, env->FindClass("java/lang/ClassLoader"));
+ CHECK(cl_klass.get() != nullptr);
+ jmethodID getsystemclassloader_method = env->GetStaticMethodID(cl_klass.get(),
+ "getSystemClassLoader",
+ "()Ljava/lang/ClassLoader;");
+ CHECK(getsystemclassloader_method != nullptr);
+ return env->CallStaticObjectMethod(cl_klass.get(), getsystemclassloader_method);
+}
+
+static jclass FindClassWithClassLoader(JNIEnv* env, const char* class_name, jobject class_loader) {
+ // Create a String of the name.
+ std::string descriptor = android::base::StringPrintf("L%s;", class_name);
+ std::string dot_name = DescriptorToDot(descriptor.c_str());
+ ScopedLocalRef<jstring> name_str(env, env->NewStringUTF(dot_name.c_str()));
+
+ // Call Class.forName with it.
+ ScopedLocalRef<jclass> c_klass(env, env->FindClass("java/lang/Class"));
+ CHECK(c_klass.get() != nullptr);
+ jmethodID forname_method = env->GetStaticMethodID(
+ c_klass.get(),
+ "forName",
+ "(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;");
+ CHECK(forname_method != nullptr);
+
+ return static_cast<jclass>(env->CallStaticObjectMethod(c_klass.get(),
+ forname_method,
+ name_str.get(),
+ JNI_FALSE,
+ class_loader));
+}
+
+jclass FindClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
+ if (class_loader != nullptr) {
+ return FindClassWithClassLoader(env, class_name, class_loader);
+ }
+
+ jclass from_implied = env->FindClass(class_name);
+ if (from_implied != nullptr) {
+ return from_implied;
+ }
+ env->ExceptionClear();
+
+ ScopedLocalRef<jobject> system_class_loader(env, GetSystemClassLoader(env));
+ CHECK(system_class_loader.get() != nullptr);
+ jclass from_system = FindClassWithClassLoader(env, class_name, system_class_loader.get());
+ if (from_system != nullptr) {
+ return from_system;
+ }
+ env->ExceptionClear();
+
+ // Look at the context classloaders of all threads.
+ jint thread_count;
+ jthread* threads;
+ CheckJvmtiError(jvmti_env, jvmti_env->GetAllThreads(&thread_count, &threads));
+ JvmtiUniquePtr threads_uptr = MakeJvmtiUniquePtr(jvmti_env, threads);
+
+ jclass result = nullptr;
+ for (jint t = 0; t != thread_count; ++t) {
+ // Always loop over all elements, as we need to free the local references.
+ if (result == nullptr) {
+ jvmtiThreadInfo info;
+ CheckJvmtiError(jvmti_env, jvmti_env->GetThreadInfo(threads[t], &info));
+ CheckJvmtiError(jvmti_env, Deallocate(jvmti_env, info.name));
+ if (info.thread_group != nullptr) {
+ env->DeleteLocalRef(info.thread_group);
+ }
+ if (info.context_class_loader != nullptr) {
+ result = FindClassWithClassLoader(env, class_name, info.context_class_loader);
+ env->ExceptionClear();
+ env->DeleteLocalRef(info.context_class_loader);
+ }
+ }
+ env->DeleteLocalRef(threads[t]);
+ }
+
+ if (result != nullptr) {
+ return result;
+ }
+
+ // TODO: Implement scanning *all* classloaders.
+ LOG(FATAL) << "Unimplemented";
+
+ return nullptr;
+}
+
+void BindFunctionsOnClass(jvmtiEnv* jvmti_env, JNIEnv* env, jclass klass) {
+ // Use JVMTI to get the methods.
+ jint method_count;
+ jmethodID* methods;
+ jvmtiError methods_result = jvmti_env->GetClassMethods(klass, &method_count, &methods);
+ CheckJvmtiError(jvmti_env, methods_result);
+
+ // Check each method.
+ for (jint i = 0; i < method_count; ++i) {
+ jint modifiers;
+ jvmtiError mod_result = jvmti_env->GetMethodModifiers(methods[i], &modifiers);
+ CheckJvmtiError(jvmti_env, mod_result);
+ constexpr jint kNative = static_cast<jint>(0x0100);
+ if ((modifiers & kNative) != 0) {
+ BindMethod(jvmti_env, env, klass, methods[i]);
+ }
+ }
+
+ CheckJvmtiError(jvmti_env, Deallocate(jvmti_env, methods));
+}
+
+void BindFunctions(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
+ // Use JNI to load the class.
+ ScopedLocalRef<jclass> klass(env, FindClass(jvmti_env, env, class_name, class_loader));
+ CHECK(klass.get() != nullptr) << class_name;
+ BindFunctionsOnClass(jvmti_env, env, klass.get());
+}
+
+} // namespace art
diff --git a/test/ti-agent/jni_binder.h b/test/ti-agent/jni_binder.h
new file mode 100644
index 0000000..e998dc5
--- /dev/null
+++ b/test/ti-agent/jni_binder.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_JNI_BINDER_H_
+#define ART_TEST_TI_AGENT_JNI_BINDER_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace art {
+
+// Find the given classname. First try the implied classloader, then the system classloader,
+// then use JVMTI to find all classloaders.
+jclass FindClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader);
+
+// Load the class through JNI. Inspect it, find all native methods. Construct the corresponding
+// mangled name, run dlsym and bind the method.
+//
+// This will abort on failure.
+void BindFunctions(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ const char* class_name,
+ jobject class_loader = nullptr);
+
+void BindFunctionsOnClass(jvmtiEnv* jvmti_env, JNIEnv* env, jclass klass);
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_JNI_BINDER_H_
diff --git a/test/ti-agent/jni_helper.h b/test/ti-agent/jni_helper.h
new file mode 100644
index 0000000..0cbc634
--- /dev/null
+++ b/test/ti-agent/jni_helper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_JNI_HELPER_H_
+#define ART_TEST_TI_AGENT_JNI_HELPER_H_
+
+#include "jni.h"
+#include "scoped_local_ref.h"
+
+namespace art {
+
+// Create an object array using a lambda that returns a local ref for each element.
+template <typename T>
+static inline jobjectArray CreateObjectArray(JNIEnv* env,
+ jint length,
+ const char* component_type_descriptor,
+ T src) {
+ if (length < 0) {
+ return nullptr;
+ }
+
+ ScopedLocalRef<jclass> obj_class(env, env->FindClass(component_type_descriptor));
+ if (obj_class.get() == nullptr) {
+ return nullptr;
+ }
+
+ ScopedLocalRef<jobjectArray> ret(env, env->NewObjectArray(length, obj_class.get(), nullptr));
+ if (ret.get() == nullptr) {
+ return nullptr;
+ }
+
+ for (jint i = 0; i < length; ++i) {
+ jobject element = src(i);
+ env->SetObjectArrayElement(ret.get(), static_cast<jint>(i), element);
+ env->DeleteLocalRef(element);
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+ }
+
+ return ret.release();
+}
+
+inline bool JniThrowNullPointerException(JNIEnv* env, const char* msg) {
+ if (env->ExceptionCheck()) {
+ env->ExceptionClear();
+ }
+
+ ScopedLocalRef<jclass> exc_class(env, env->FindClass("java/lang/NullPointerException"));
+ if (exc_class.get() == nullptr) {
+ return -1;
+ }
+
+ return env->ThrowNew(exc_class.get(), msg) == JNI_OK;
+}
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_JNI_HELPER_H_
diff --git a/test/ti-agent/jvmti_helper.cc b/test/ti-agent/jvmti_helper.cc
new file mode 100644
index 0000000..598a30f
--- /dev/null
+++ b/test/ti-agent/jvmti_helper.cc
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jvmti_helper.h"
+
+#include <algorithm>
+#include <dlfcn.h>
+#include <stdio.h>
+#include <sstream>
+#include <string.h>
+
+#include "android-base/logging.h"
+#include "scoped_local_ref.h"
+
+namespace art {
+
+void CheckJvmtiError(jvmtiEnv* env, jvmtiError error) {
+ if (error != JVMTI_ERROR_NONE) {
+ char* error_name;
+ jvmtiError name_error = env->GetErrorName(error, &error_name);
+ if (name_error != JVMTI_ERROR_NONE) {
+ LOG(FATAL) << "Unable to get error name for " << error;
+ }
+ LOG(FATAL) << "Unexpected error: " << error_name;
+ }
+}
+
+void SetAllCapabilities(jvmtiEnv* env) {
+ jvmtiCapabilities caps;
+ jvmtiError error1 = env->GetPotentialCapabilities(&caps);
+ CheckJvmtiError(env, error1);
+ jvmtiError error2 = env->AddCapabilities(&caps);
+ CheckJvmtiError(env, error2);
+}
+
+bool JvmtiErrorToException(JNIEnv* env, jvmtiEnv* jvmti_env, jvmtiError error) {
+ if (error == JVMTI_ERROR_NONE) {
+ return false;
+ }
+
+ ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+ if (rt_exception.get() == nullptr) {
+ // CNFE should be pending.
+ return true;
+ }
+
+ char* err;
+ CheckJvmtiError(jvmti_env, jvmti_env->GetErrorName(error, &err));
+
+ env->ThrowNew(rt_exception.get(), err);
+
+ Deallocate(jvmti_env, err);
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const jvmtiError& rhs) {
+ switch (rhs) {
+ case JVMTI_ERROR_NONE:
+ return os << "NONE";
+ case JVMTI_ERROR_INVALID_THREAD:
+ return os << "INVALID_THREAD";
+ case JVMTI_ERROR_INVALID_THREAD_GROUP:
+ return os << "INVALID_THREAD_GROUP";
+ case JVMTI_ERROR_INVALID_PRIORITY:
+ return os << "INVALID_PRIORITY";
+ case JVMTI_ERROR_THREAD_NOT_SUSPENDED:
+ return os << "THREAD_NOT_SUSPENDED";
+ case JVMTI_ERROR_THREAD_SUSPENDED:
+ return os << "THREAD_SUSPENDED";
+ case JVMTI_ERROR_THREAD_NOT_ALIVE:
+ return os << "THREAD_NOT_ALIVE";
+ case JVMTI_ERROR_INVALID_OBJECT:
+ return os << "INVALID_OBJECT";
+ case JVMTI_ERROR_INVALID_CLASS:
+ return os << "INVALID_CLASS";
+ case JVMTI_ERROR_CLASS_NOT_PREPARED:
+ return os << "CLASS_NOT_PREPARED";
+ case JVMTI_ERROR_INVALID_METHODID:
+ return os << "INVALID_METHODID";
+ case JVMTI_ERROR_INVALID_LOCATION:
+ return os << "INVALID_LOCATION";
+ case JVMTI_ERROR_INVALID_FIELDID:
+ return os << "INVALID_FIELDID";
+ case JVMTI_ERROR_NO_MORE_FRAMES:
+ return os << "NO_MORE_FRAMES";
+ case JVMTI_ERROR_OPAQUE_FRAME:
+ return os << "OPAQUE_FRAME";
+ case JVMTI_ERROR_TYPE_MISMATCH:
+ return os << "TYPE_MISMATCH";
+ case JVMTI_ERROR_INVALID_SLOT:
+ return os << "INVALID_SLOT";
+ case JVMTI_ERROR_DUPLICATE:
+ return os << "DUPLICATE";
+ case JVMTI_ERROR_NOT_FOUND:
+ return os << "NOT_FOUND";
+ case JVMTI_ERROR_INVALID_MONITOR:
+ return os << "INVALID_MONITOR";
+ case JVMTI_ERROR_NOT_MONITOR_OWNER:
+ return os << "NOT_MONITOR_OWNER";
+ case JVMTI_ERROR_INTERRUPT:
+ return os << "INTERRUPT";
+ case JVMTI_ERROR_INVALID_CLASS_FORMAT:
+ return os << "INVALID_CLASS_FORMAT";
+ case JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION:
+ return os << "CIRCULAR_CLASS_DEFINITION";
+ case JVMTI_ERROR_FAILS_VERIFICATION:
+ return os << "FAILS_VERIFICATION";
+ case JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED:
+ return os << "UNSUPPORTED_REDEFINITION_METHOD_ADDED";
+ case JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED:
+ return os << "UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED";
+ case JVMTI_ERROR_INVALID_TYPESTATE:
+ return os << "INVALID_TYPESTATE";
+ case JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED:
+ return os << "UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED";
+ case JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED:
+ return os << "UNSUPPORTED_REDEFINITION_METHOD_DELETED";
+ case JVMTI_ERROR_UNSUPPORTED_VERSION:
+ return os << "UNSUPPORTED_VERSION";
+ case JVMTI_ERROR_NAMES_DONT_MATCH:
+ return os << "NAMES_DONT_MATCH";
+ case JVMTI_ERROR_UNSUPPORTED_REDEFINITION_CLASS_MODIFIERS_CHANGED:
+ return os << "UNSUPPORTED_REDEFINITION_CLASS_MODIFIERS_CHANGED";
+ case JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED:
+ return os << "UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED";
+ case JVMTI_ERROR_UNMODIFIABLE_CLASS:
+ return os << "JVMTI_ERROR_UNMODIFIABLE_CLASS";
+ case JVMTI_ERROR_NOT_AVAILABLE:
+ return os << "NOT_AVAILABLE";
+ case JVMTI_ERROR_MUST_POSSESS_CAPABILITY:
+ return os << "MUST_POSSESS_CAPABILITY";
+ case JVMTI_ERROR_NULL_POINTER:
+ return os << "NULL_POINTER";
+ case JVMTI_ERROR_ABSENT_INFORMATION:
+ return os << "ABSENT_INFORMATION";
+ case JVMTI_ERROR_INVALID_EVENT_TYPE:
+ return os << "INVALID_EVENT_TYPE";
+ case JVMTI_ERROR_ILLEGAL_ARGUMENT:
+ return os << "ILLEGAL_ARGUMENT";
+ case JVMTI_ERROR_NATIVE_METHOD:
+ return os << "NATIVE_METHOD";
+ case JVMTI_ERROR_CLASS_LOADER_UNSUPPORTED:
+ return os << "CLASS_LOADER_UNSUPPORTED";
+ case JVMTI_ERROR_OUT_OF_MEMORY:
+ return os << "OUT_OF_MEMORY";
+ case JVMTI_ERROR_ACCESS_DENIED:
+ return os << "ACCESS_DENIED";
+ case JVMTI_ERROR_WRONG_PHASE:
+ return os << "WRONG_PHASE";
+ case JVMTI_ERROR_INTERNAL:
+ return os << "INTERNAL";
+ case JVMTI_ERROR_UNATTACHED_THREAD:
+ return os << "UNATTACHED_THREAD";
+ case JVMTI_ERROR_INVALID_ENVIRONMENT:
+ return os << "INVALID_ENVIRONMENT";
+ }
+ LOG(FATAL) << "Unexpected error type " << static_cast<int>(rhs);
+ __builtin_unreachable();
+}
+
+} // namespace art
diff --git a/test/ti-agent/jvmti_helper.h b/test/ti-agent/jvmti_helper.h
new file mode 100644
index 0000000..66d88d0
--- /dev/null
+++ b/test/ti-agent/jvmti_helper.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_JVMTI_HELPER_H_
+#define ART_TEST_TI_AGENT_JVMTI_HELPER_H_
+
+#include "jni.h"
+#include "jvmti.h"
+#include <memory>
+#include <ostream>
+
+#include "android-base/logging.h"
+
+namespace art {
+
+// Add all capabilities to the given env.
+void SetAllCapabilities(jvmtiEnv* env);
+
+// Check whether the given error is NONE. If not, print out the corresponding error message
+// and abort.
+void CheckJvmtiError(jvmtiEnv* env, jvmtiError error);
+
+// Convert the given error to a RuntimeException with a message derived from the error. Returns
+// true on error, false if error is JVMTI_ERROR_NONE.
+bool JvmtiErrorToException(JNIEnv* env, jvmtiEnv* jvmti_env, jvmtiError error);
+
+class JvmtiDeleter {
+ public:
+ JvmtiDeleter() : env_(nullptr) {}
+ explicit JvmtiDeleter(jvmtiEnv* env) : env_(env) {}
+
+ JvmtiDeleter(JvmtiDeleter&) = default;
+ JvmtiDeleter(JvmtiDeleter&&) = default;
+ JvmtiDeleter& operator=(const JvmtiDeleter&) = default;
+
+ void operator()(unsigned char* ptr) const {
+ CHECK(env_ != nullptr);
+ jvmtiError ret = env_->Deallocate(ptr);
+ CheckJvmtiError(env_, ret);
+ }
+
+ private:
+ mutable jvmtiEnv* env_;
+};
+
+using JvmtiUniquePtr = std::unique_ptr<unsigned char, JvmtiDeleter>;
+
+template <typename T>
+static inline JvmtiUniquePtr MakeJvmtiUniquePtr(jvmtiEnv* env, T* mem) {
+ return JvmtiUniquePtr(reinterpret_cast<unsigned char*>(mem), JvmtiDeleter(env));
+}
+
+template <typename T>
+static inline jvmtiError Deallocate(jvmtiEnv* env, T* mem) {
+ return env->Deallocate(reinterpret_cast<unsigned char*>(mem));
+}
+
+// To print jvmtiError. Does not rely on GetErrorName, so is an approximation.
+std::ostream& operator<<(std::ostream& os, const jvmtiError& rhs);
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_JVMTI_HELPER_H_
diff --git a/test/ti-agent/scoped_local_ref.h b/test/ti-agent/scoped_local_ref.h
new file mode 100644
index 0000000..daa1583
--- /dev/null
+++ b/test/ti-agent/scoped_local_ref.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_SCOPED_LOCAL_REF_H_
+#define ART_TEST_TI_AGENT_SCOPED_LOCAL_REF_H_
+
+#include "jni.h"
+
+#include <stddef.h>
+
+#include "android-base/macros.h"
+
+namespace art {
+
+template<typename T>
+class ScopedLocalRef {
+ public:
+ ScopedLocalRef(JNIEnv* env, T localRef) : mEnv(env), mLocalRef(localRef) {
+ }
+
+ ~ScopedLocalRef() {
+ reset();
+ }
+
+ void reset(T ptr = nullptr) {
+ if (ptr != mLocalRef) {
+ if (mLocalRef != nullptr) {
+ mEnv->DeleteLocalRef(mLocalRef);
+ }
+ mLocalRef = ptr;
+ }
+ }
+
+ T release() __attribute__((warn_unused_result)) {
+ T localRef = mLocalRef;
+ mLocalRef = nullptr;
+ return localRef;
+ }
+
+ T get() const {
+ return mLocalRef;
+ }
+
+ private:
+ JNIEnv* const mEnv;
+ T mLocalRef;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedLocalRef);
+};
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_SCOPED_LOCAL_REF_H_
diff --git a/test/ti-agent/scoped_primitive_array.h b/test/ti-agent/scoped_primitive_array.h
new file mode 100644
index 0000000..1649ed9
--- /dev/null
+++ b/test/ti-agent/scoped_primitive_array.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_SCOPED_PRIMITIVE_ARRAY_H_
+#define ART_TEST_TI_AGENT_SCOPED_PRIMITIVE_ARRAY_H_
+
+#include "jni.h"
+
+#include "android-base/macros.h"
+
+#include "jni_helper.h"
+
+namespace art {
+
+#ifdef POINTER_TYPE
+#error POINTER_TYPE is defined.
+#else
+#define POINTER_TYPE(T) T* /* NOLINT */
+#endif
+
+#ifdef REFERENCE_TYPE
+#error REFERENCE_TYPE is defined.
+#else
+#define REFERENCE_TYPE(T) T& /* NOLINT */
+#endif
+
+// ScopedBooleanArrayRO, ScopedByteArrayRO, ScopedCharArrayRO, ScopedDoubleArrayRO,
+// ScopedFloatArrayRO, ScopedIntArrayRO, ScopedLongArrayRO, and ScopedShortArrayRO provide
+// convenient read-only access to Java arrays from JNI code. This is cheaper than read-write
+// access and should be used by default.
+#define INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(PRIMITIVE_TYPE, NAME) \
+ class Scoped ## NAME ## ArrayRO { \
+ public: \
+ explicit Scoped ## NAME ## ArrayRO(JNIEnv* env) \
+ : mEnv(env), mJavaArray(nullptr), mRawArray(nullptr), mSize(0) {} \
+ Scoped ## NAME ## ArrayRO(JNIEnv* env, PRIMITIVE_TYPE ## Array javaArray) \
+ : mEnv(env) { \
+ if (javaArray == nullptr) { \
+ mJavaArray = nullptr; \
+ mSize = 0; \
+ mRawArray = nullptr; \
+ JniThrowNullPointerException(env, nullptr); \
+ } else { \
+ reset(javaArray); \
+ } \
+ } \
+ ~Scoped ## NAME ## ArrayRO() { \
+ if (mRawArray != nullptr && mRawArray != mBuffer) { \
+ mEnv->Release ## NAME ## ArrayElements(mJavaArray, mRawArray, JNI_ABORT); \
+ } \
+ } \
+ void reset(PRIMITIVE_TYPE ## Array javaArray) { \
+ mJavaArray = javaArray; \
+ mSize = mEnv->GetArrayLength(mJavaArray); \
+ if (mSize <= kBufferSize) { \
+ mEnv->Get ## NAME ## ArrayRegion(mJavaArray, 0, mSize, mBuffer); \
+ mRawArray = mBuffer; \
+ } else { \
+ mRawArray = mEnv->Get ## NAME ## ArrayElements(mJavaArray, nullptr); \
+ } \
+ } \
+ const PRIMITIVE_TYPE* get() const { return mRawArray; } \
+ PRIMITIVE_TYPE ## Array getJavaArray() const { return mJavaArray; } \
+ const PRIMITIVE_TYPE& operator[](size_t n) const { return mRawArray[n]; } \
+ size_t size() const { return mSize; } \
+ private: \
+ static constexpr jsize kBufferSize = 1024; \
+ JNIEnv* const mEnv; \
+ PRIMITIVE_TYPE ## Array mJavaArray; \
+ POINTER_TYPE(PRIMITIVE_TYPE) mRawArray; \
+ jsize mSize; \
+ PRIMITIVE_TYPE mBuffer[kBufferSize]; \
+ DISALLOW_COPY_AND_ASSIGN(Scoped ## NAME ## ArrayRO); \
+ }
+
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jboolean, Boolean);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jbyte, Byte);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jchar, Char);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jdouble, Double);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jfloat, Float);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jint, Int);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jlong, Long);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jshort, Short);
+
+#undef INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO
+
+// ScopedBooleanArrayRW, ScopedByteArrayRW, ScopedCharArrayRW, ScopedDoubleArrayRW,
+// ScopedFloatArrayRW, ScopedIntArrayRW, ScopedLongArrayRW, and ScopedShortArrayRW provide
+// convenient read-write access to Java arrays from JNI code. These are more expensive,
+// since they entail a copy back onto the Java heap, and should only be used when necessary.
+#define INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(PRIMITIVE_TYPE, NAME) \
+ class Scoped ## NAME ## ArrayRW { \
+ public: \
+ explicit Scoped ## NAME ## ArrayRW(JNIEnv* env) \
+ : mEnv(env), mJavaArray(nullptr), mRawArray(nullptr) {} \
+ Scoped ## NAME ## ArrayRW(JNIEnv* env, PRIMITIVE_TYPE ## Array javaArray) \
+ : mEnv(env), mJavaArray(javaArray), mRawArray(nullptr) { \
+ if (mJavaArray == nullptr) { \
+ JniThrowNullPointerException(env, nullptr); \
+ } else { \
+ mRawArray = mEnv->Get ## NAME ## ArrayElements(mJavaArray, nullptr); \
+ } \
+ } \
+ ~Scoped ## NAME ## ArrayRW() { \
+ if (mRawArray) { \
+ mEnv->Release ## NAME ## ArrayElements(mJavaArray, mRawArray, 0); \
+ } \
+ } \
+ void reset(PRIMITIVE_TYPE ## Array javaArray) { \
+ mJavaArray = javaArray; \
+ mRawArray = mEnv->Get ## NAME ## ArrayElements(mJavaArray, nullptr); \
+ } \
+ const PRIMITIVE_TYPE* get() const { return mRawArray; } \
+ PRIMITIVE_TYPE ## Array getJavaArray() const { return mJavaArray; } \
+ const PRIMITIVE_TYPE& operator[](size_t n) const { return mRawArray[n]; } \
+ POINTER_TYPE(PRIMITIVE_TYPE) get() { return mRawArray; } \
+ REFERENCE_TYPE(PRIMITIVE_TYPE) operator[](size_t n) { return mRawArray[n]; } \
+ size_t size() const { return mEnv->GetArrayLength(mJavaArray); } \
+ private: \
+ JNIEnv* const mEnv; \
+ PRIMITIVE_TYPE ## Array mJavaArray; \
+ POINTER_TYPE(PRIMITIVE_TYPE) mRawArray; \
+ DISALLOW_COPY_AND_ASSIGN(Scoped ## NAME ## ArrayRW); \
+ }
+
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jboolean, Boolean);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jbyte, Byte);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jchar, Char);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jdouble, Double);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jfloat, Float);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jint, Int);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jlong, Long);
+INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jshort, Short);
+
+#undef INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW
+#undef POINTER_TYPE
+#undef REFERENCE_TYPE
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_SCOPED_PRIMITIVE_ARRAY_H_
diff --git a/test/ti-agent/scoped_utf_chars.h b/test/ti-agent/scoped_utf_chars.h
new file mode 100644
index 0000000..422caaf
--- /dev/null
+++ b/test/ti-agent/scoped_utf_chars.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_SCOPED_UTF_CHARS_H_
+#define ART_TEST_TI_AGENT_SCOPED_UTF_CHARS_H_
+
+#include "jni.h"
+
+#include <string.h>
+
+#include "android-base/macros.h"
+
+#include "jni_helper.h"
+
+namespace art {
+
+class ScopedUtfChars {
+ public:
+ ScopedUtfChars(JNIEnv* env, jstring s) : env_(env), string_(s) {
+ if (s == nullptr) {
+ utf_chars_ = nullptr;
+ JniThrowNullPointerException(env, nullptr);
+ } else {
+ utf_chars_ = env->GetStringUTFChars(s, nullptr);
+ }
+ }
+
+ ScopedUtfChars(ScopedUtfChars&& rhs) :
+ env_(rhs.env_), string_(rhs.string_), utf_chars_(rhs.utf_chars_) {
+ rhs.env_ = nullptr;
+ rhs.string_ = nullptr;
+ rhs.utf_chars_ = nullptr;
+ }
+
+ ~ScopedUtfChars() {
+ if (utf_chars_) {
+ env_->ReleaseStringUTFChars(string_, utf_chars_);
+ }
+ }
+
+ ScopedUtfChars& operator=(ScopedUtfChars&& rhs) {
+ if (this != &rhs) {
+ // Delete the currently owned UTF chars.
+ this->~ScopedUtfChars();
+
+ // Move the rhs ScopedUtfChars and zero it out.
+ env_ = rhs.env_;
+ string_ = rhs.string_;
+ utf_chars_ = rhs.utf_chars_;
+ rhs.env_ = nullptr;
+ rhs.string_ = nullptr;
+ rhs.utf_chars_ = nullptr;
+ }
+ return *this;
+ }
+
+ const char* c_str() const {
+ return utf_chars_;
+ }
+
+ size_t size() const {
+ return strlen(utf_chars_);
+ }
+
+ const char& operator[](size_t n) const {
+ return utf_chars_[n];
+ }
+
+ private:
+ JNIEnv* env_;
+ jstring string_;
+ const char* utf_chars_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedUtfChars);
+};
+
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_SCOPED_UTF_CHARS_H_
diff --git a/test/ti-agent/common_load.h b/test/ti-agent/test_env.cc
similarity index 78%
copy from test/ti-agent/common_load.h
copy to test/ti-agent/test_env.cc
index e79a006..cf47f22 100644
--- a/test/ti-agent/common_load.h
+++ b/test/ti-agent/test_env.cc
@@ -14,15 +14,20 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
-
-#include "jvmti.h"
+#include "test_env.h"
namespace art {
-extern jvmtiEnv* jvmti_env;
+jvmtiEnv* jvmti_env = nullptr;
+
+static bool gRuntimeIsJVM = false;
+
+bool IsJVM() {
+ return gRuntimeIsJVM;
+}
+
+void SetJVM(bool b) {
+ gRuntimeIsJVM = b;
+}
} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
diff --git a/test/ti-agent/common_load.h b/test/ti-agent/test_env.h
similarity index 81%
rename from test/ti-agent/common_load.h
rename to test/ti-agent/test_env.h
index e79a006..2eb631c 100644
--- a/test/ti-agent/common_load.h
+++ b/test/ti-agent/test_env.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#ifndef ART_TEST_TI_AGENT_TEST_ENV_H_
+#define ART_TEST_TI_AGENT_TEST_ENV_H_
#include "jvmti.h"
@@ -23,6 +23,9 @@
extern jvmtiEnv* jvmti_env;
+bool IsJVM();
+void SetJVM(bool b);
+
} // namespace art
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#endif // ART_TEST_TI_AGENT_TEST_ENV_H_
diff --git a/test/ti-agent/common_load.h b/test/ti-agent/ti_macros.h
similarity index 65%
copy from test/ti-agent/common_load.h
copy to test/ti-agent/ti_macros.h
index e79a006..d913383 100644
--- a/test/ti-agent/common_load.h
+++ b/test/ti-agent/ti_macros.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,15 +14,13 @@
* limitations under the License.
*/
-#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
-#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#ifndef ART_TEST_TI_AGENT_TI_MACROS_H_
+#define ART_TEST_TI_AGENT_TI_MACROS_H_
-#include "jvmti.h"
+#include "android-base/macros.h"
-namespace art {
+#define FINAL final
+#define OVERRIDE override
+#define UNREACHABLE __builtin_unreachable
-extern jvmtiEnv* jvmti_env;
-
-} // namespace art
-
-#endif // ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#endif // ART_TEST_TI_AGENT_TI_MACROS_H_
diff --git a/test/ti-agent/ti_utf.h b/test/ti-agent/ti_utf.h
new file mode 100644
index 0000000..341e106
--- /dev/null
+++ b/test/ti-agent/ti_utf.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_TI_UTF_H_
+#define ART_TEST_TI_AGENT_TI_UTF_H_
+
+#include <inttypes.h>
+#include <string.h>
+
+#include "android-base/logging.h"
+
+namespace art {
+namespace ti {
+
+inline size_t CountModifiedUtf8Chars(const char* utf8, size_t byte_count) {
+ DCHECK_LE(byte_count, strlen(utf8));
+ size_t len = 0;
+ const char* end = utf8 + byte_count;
+ for (; utf8 < end; ++utf8) {
+ int ic = *utf8;
+ len++;
+ if (LIKELY((ic & 0x80) == 0)) {
+ // One-byte encoding.
+ continue;
+ }
+ // Two- or three-byte encoding.
+ utf8++;
+ if ((ic & 0x20) == 0) {
+ // Two-byte encoding.
+ continue;
+ }
+ utf8++;
+ if ((ic & 0x10) == 0) {
+ // Three-byte encoding.
+ continue;
+ }
+
+ // Four-byte encoding: needs to be converted into a surrogate
+ // pair.
+ utf8++;
+ len++;
+ }
+ return len;
+}
+
+inline uint16_t GetTrailingUtf16Char(uint32_t maybe_pair) {
+ return static_cast<uint16_t>(maybe_pair >> 16);
+}
+
+inline uint16_t GetLeadingUtf16Char(uint32_t maybe_pair) {
+ return static_cast<uint16_t>(maybe_pair & 0x0000FFFF);
+}
+
+inline uint32_t GetUtf16FromUtf8(const char** utf8_data_in) {
+ const uint8_t one = *(*utf8_data_in)++;
+ if ((one & 0x80) == 0) {
+ // one-byte encoding
+ return one;
+ }
+
+ const uint8_t two = *(*utf8_data_in)++;
+ if ((one & 0x20) == 0) {
+ // two-byte encoding
+ return ((one & 0x1f) << 6) | (two & 0x3f);
+ }
+
+ const uint8_t three = *(*utf8_data_in)++;
+ if ((one & 0x10) == 0) {
+ return ((one & 0x0f) << 12) | ((two & 0x3f) << 6) | (three & 0x3f);
+ }
+
+ // Four byte encodings need special handling. We'll have
+ // to convert them into a surrogate pair.
+ const uint8_t four = *(*utf8_data_in)++;
+
+ // Since this is a 4 byte UTF-8 sequence, it will lie between
+ // U+10000 and U+1FFFFF.
+ //
+ // TODO: What do we do about values in (U+10FFFF, U+1FFFFF) ? The
+ // spec says they're invalid but nobody appears to check for them.
+ const uint32_t code_point = ((one & 0x0f) << 18) | ((two & 0x3f) << 12)
+ | ((three & 0x3f) << 6) | (four & 0x3f);
+
+ uint32_t surrogate_pair = 0;
+ // Step two: Write out the high (leading) surrogate to the bottom 16 bits
+ // of the of the 32 bit type.
+ surrogate_pair |= ((code_point >> 10) + 0xd7c0) & 0xffff;
+ // Step three : Write out the low (trailing) surrogate to the top 16 bits.
+ surrogate_pair |= ((code_point & 0x03ff) + 0xdc00) << 16;
+
+ return surrogate_pair;
+}
+
+inline void ConvertUtf16ToModifiedUtf8(char* utf8_out,
+ size_t byte_count,
+ const uint16_t* utf16_in,
+ size_t char_count) {
+ if (LIKELY(byte_count == char_count)) {
+ // Common case where all characters are ASCII.
+ const uint16_t *utf16_end = utf16_in + char_count;
+ for (const uint16_t *p = utf16_in; p < utf16_end;) {
+ *utf8_out++ = static_cast<char>(*p++);
+ }
+ return;
+ }
+
+ // String contains non-ASCII characters.
+ while (char_count--) {
+ const uint16_t ch = *utf16_in++;
+ if (ch > 0 && ch <= 0x7f) {
+ *utf8_out++ = ch;
+ } else {
+ // Char_count == 0 here implies we've encountered an unpaired
+ // surrogate and we have no choice but to encode it as 3-byte UTF
+ // sequence. Note that unpaired surrogates can occur as a part of
+ // "normal" operation.
+ if ((ch >= 0xd800 && ch <= 0xdbff) && (char_count > 0)) {
+ const uint16_t ch2 = *utf16_in;
+
+ // Check if the other half of the pair is within the expected
+ // range. If it isn't, we will have to emit both "halves" as
+ // separate 3 byte sequences.
+ if (ch2 >= 0xdc00 && ch2 <= 0xdfff) {
+ utf16_in++;
+ char_count--;
+ const uint32_t code_point = (ch << 10) + ch2 - 0x035fdc00;
+ *utf8_out++ = (code_point >> 18) | 0xf0;
+ *utf8_out++ = ((code_point >> 12) & 0x3f) | 0x80;
+ *utf8_out++ = ((code_point >> 6) & 0x3f) | 0x80;
+ *utf8_out++ = (code_point & 0x3f) | 0x80;
+ continue;
+ }
+ }
+
+ if (ch > 0x07ff) {
+ // Three byte encoding.
+ *utf8_out++ = (ch >> 12) | 0xe0;
+ *utf8_out++ = ((ch >> 6) & 0x3f) | 0x80;
+ *utf8_out++ = (ch & 0x3f) | 0x80;
+ } else /*(ch > 0x7f || ch == 0)*/ {
+ // Two byte encoding.
+ *utf8_out++ = (ch >> 6) | 0xc0;
+ *utf8_out++ = (ch & 0x3f) | 0x80;
+ }
+ }
+ }
+}
+
+inline size_t CountUtf8Bytes(const uint16_t* chars, size_t char_count) {
+ size_t result = 0;
+ const uint16_t *end = chars + char_count;
+ while (chars < end) {
+ const uint16_t ch = *chars++;
+ if (LIKELY(ch != 0 && ch < 0x80)) {
+ result++;
+ continue;
+ }
+ if (ch < 0x800) {
+ result += 2;
+ continue;
+ }
+ if (ch >= 0xd800 && ch < 0xdc00) {
+ if (chars < end) {
+ const uint16_t ch2 = *chars;
+ // If we find a properly paired surrogate, we emit it as a 4 byte
+ // UTF sequence. If we find an unpaired leading or trailing surrogate,
+ // we emit it as a 3 byte sequence like would have done earlier.
+ if (ch2 >= 0xdc00 && ch2 < 0xe000) {
+ chars++;
+ result += 4;
+ continue;
+ }
+ }
+ }
+ result += 3;
+ }
+ return result;
+}
+
+} // namespace ti
+} // namespace art
+
+#endif // ART_TEST_TI_AGENT_TI_UTF_H_
diff --git a/tools/art b/tools/art
index 91d6e27..933ad7a 100644
--- a/tools/art
+++ b/tools/art
@@ -16,106 +16,261 @@
# shell dialect that should work on the host (e.g. bash), and
# Android (e.g. mksh).
-function follow_links() {
- if [ z"$BASH_SOURCE" != z ]; then
- file="$BASH_SOURCE"
- else
- file="$0"
- fi
- while [ -h "$file" ]; do
- # On Mac OS, readlink -f doesn't work.
- file="$(readlink "$file")"
- done
- echo "$file"
-}
+# Globals
+ARCHS={arm,arm64,mips,mips64,x86,x86_64}
+ART_BINARY=dalvikvm
+DELETE_ANDROID_DATA="no"
+LAUNCH_WRAPPER=
+LIBART=libart.so
+JIT_PROFILE="no"
+VERBOSE="no"
+
+# Follow all sym links to get the program name.
+if [ z"$BASH_SOURCE" != z ]; then
+ PROG_NAME="$BASH_SOURCE"
+else
+ PROG_NAME="$0"
+fi
+while [ -h "$PROG_NAME" ]; do
+ # On Mac OS, readlink -f doesn't work.
+ PROG_NAME="$(readlink "$PROG_NAME")"
+done
function find_libdir() {
- # Get the actual file, $DALVIKVM may be a symbolic link.
+ # Get the actual file, $1 is the ART_BINARY_PATH and may be a symbolic link.
# Use realpath instead of readlink because Android does not have a readlink.
- if [[ "$(realpath "$ANDROID_ROOT/bin/$DALVIKVM")" == *dalvikvm64 ]]; then
+ if [[ "$(realpath "$1")" == *dalvikvm64 ]]; then
echo "lib64"
else
echo "lib"
fi
}
-invoke_with=
-DALVIKVM=dalvikvm
-LIBART=libart.so
+function replace_compiler_filter_with_interepret_only() {
+ ARGS_WITH_INTERPRET_ONLY=("$@")
-while true; do
- if [ "$1" = "--invoke-with" ]; then
+ found="false"
+ ((index=0))
+ while ((index <= $#)); do
+ what="${ARGS_WITH_INTERPRET_ONLY[$index]}"
+
+ case "$what" in
+ --compiler-filter=*)
+ ARGS_WITH_INTERPRET_ONLY[$index]="--compiler-filter=interpret-only"
+ found="true"
+ ;;
+ esac
+
+ ((index++))
shift
- invoke_with="$invoke_with $1"
- shift
- elif [ "$1" = "-d" ]; then
- LIBART="libartd.so"
- shift
- elif [ "$1" = "--32" ]; then
- DALVIKVM=dalvikvm32
- shift
- elif [ "$1" = "--64" ]; then
- DALVIKVM=dalvikvm64
- shift
- elif [ "$1" = "--perf" ]; then
- PERF="record"
- shift
- elif [ "$1" = "--perf-report" ]; then
- PERF="report"
- shift
- elif expr "$1" : "--" >/dev/null 2>&1; then
- echo "unknown option: $1" 1>&2
- exit 1
- else
- break
+ done
+ if [ "$found" != "true" ]; then
+ ARGS_WITH_INTERPRET_ONLY=(-Xcompiler-option --compiler-filter=interpret-only "${ARGS_WITH_INTERPRET_ONLY[@]}")
fi
+}
+
+function usage() {
+ cat 1>&2 <<EOF
+Usage: art [OPTIONS] [--] [ART_OPTIONS] CLASS
+
+Supported OPTIONS include:
+ --32 Use the 32-bit Android Runtime.
+ --64 Use the 64-bit Android Runtime.
+ --callgrind Launch the Android Runtime in callgrind.
+ -d Use the debug ART library (libartd.so).
+ --debug Equivalent to -d.
+ --gdb Launch the Android Runtime in gdb.
+ --help Display usage message.
+ --invoke-with <program> Launch the Android Runtime in <program>.
+ --perf Launch the Android Runtime with perf recording.
+ --perf-report Launch the Android Runtime with perf recording with
+ report upon completion.
+ --profile Run with profiling, then run using profile data.
+ --verbose Run script verbosely.
+
+The ART_OPTIONS are passed directly to the Android Runtime.
+
+Example:
+ art --32 -cp my_classes.dex MainClass
+
+Common errors:
+ 1) Not having core.art available (see $ANDROID_BUILD_TOP/art/Android.mk).
+ eg m -j32 build-art-host
+ 2) Not having boot.art available (see $ANDROID_BUILD_TOP/build/make/core/dex_preopt_libart_boot.mk)
+ eg m -j32 out/target/product/generic_x86_64/dex_bootjars/system/framework/x86_64/boot.art
+EOF
+}
+
+function clean_android_data() {
+ if [ "$DELETE_ANDROID_DATA" = "yes" ]; then
+ rm -rf $ANDROID_DATA
+ fi
+}
+
+function verbose_run() {
+ if [ "$VERBOSE" = "yes" ]; then
+ echo "$@"
+ fi
+ eval "$@"
+}
+
+function run_art() {
+ verbose_run ANDROID_DATA=$ANDROID_DATA \
+ ANDROID_ROOT=$ANDROID_ROOT \
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
+ PATH=$ANDROID_ROOT/bin:$PATH \
+ LD_USE_LOAD_BIAS=1 \
+ $LAUNCH_WRAPPER $ART_BINARY_PATH $lib \
+ -XXlib:$LIBART \
+ -Xnorelocate \
+ -Ximage:$ANDROID_ROOT/framework/core.art \
+ "$@"
+}
+
+while [[ "$1" = "-"* ]]; do
+ case $1 in
+ --)
+ # No more arguments for this script.
+ shift
+ break
+ ;;
+ --32)
+ ART_BINARY=dalvikvm32
+ ;;
+ --64)
+ ART_BINARY=dalvikvm64
+ ;;
+ --callgrind)
+ LAUNCH_WRAPPER="valgrind --tool=callgrind"
+ ;;
+ -d)
+ ;& # Fallthrough
+ --debug)
+ LIBART="libartd.so"
+ ;;
+ --gdb)
+ LIBART="libartd.so"
+ LAUNCH_WRAPPER="gdb --args"
+ ;;
+ --help)
+ usage
+ exit 0
+ ;;
+ --invoke-with)
+ LAUNCH_WRAPPER=$2
+ shift
+ ;;
+ --perf)
+ PERF="record"
+ ;;
+ --perf-report)
+ PERF="report"
+ ;;
+ --profile)
+ JIT_PROFILE="yes"
+ ;;
+ --verbose)
+ VERBOSE="yes"
+ ;;
+ --*)
+ echo "unknown option: $1" 1>&2
+ usage
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
done
-PROG_NAME="$(follow_links)"
+if [ $# -eq 0 ]; then
+ usage
+ exit 1
+fi
+
PROG_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
ANDROID_ROOT=$PROG_DIR/..
-LIBDIR=$(find_libdir)
-LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
-DEBUG_OPTION=""
+ART_BINARY_PATH=$ANDROID_ROOT/bin/$ART_BINARY
-DELETE_ANDROID_DATA=false
+if [ ! -x "$ART_BINARY_PATH" ]; then
+ cat 1>&2 <<EOF
+Android Runtime not found: $ART_BINARY_PATH
+This script should be in the same directory as the Android Runtime ($ART_BINARY).
+EOF
+ exit 1
+fi
+
+LIBDIR="$(find_libdir $ART_BINARY_PATH)"
+LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
+EXTRA_OPTIONS=""
+
# If ANDROID_DATA is the system ANDROID_DATA or is not set, use our own,
# and ensure we delete it at the end.
if [ "$ANDROID_DATA" = "/data" ] || [ "$ANDROID_DATA" = "" ]; then
ANDROID_DATA=$PWD/android-data$$
- mkdir -p $ANDROID_DATA/dalvik-cache/{arm,arm64,x86,x86_64}
- DELETE_ANDROID_DATA=true
+ mkdir -p $ANDROID_DATA/dalvik-cache/$ARCHS
+ DELETE_ANDROID_DATA="yes"
fi
-if [ z"$PERF" != z ]; then
- invoke_with="perf record -g -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
- DEBUG_OPTION="-Xcompiler-option --generate-debug-info"
+if [ "$PERF" != "" ]; then
+ LAUNCH_WRAPPER="perf record -g -o $ANDROID_DATA/perf.data -e cycles:u $LAUNCH_WRAPPER"
+ EXTRA_OPTIONS="-Xcompiler-option --generate-debug-info"
fi
-# We use the PIC core image to work with perf.
-ANDROID_DATA=$ANDROID_DATA \
- ANDROID_ROOT=$ANDROID_ROOT \
- LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
- PATH=$ANDROID_ROOT/bin:$PATH \
- LD_USE_LOAD_BIAS=1 \
- $invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \
- -XXlib:$LIBART \
- -Xnorelocate \
- -Ximage:$ANDROID_ROOT/framework/core.art \
- $DEBUG_OPTION \
- "$@"
+if [ "$JIT_PROFILE" = "yes" ]; then
+ # Create the profile. The runtime expects profiles to be created before
+ # execution.
+ PROFILE_PATH="$ANDROID_DATA/primary.prof"
+ touch $PROFILE_PATH
+ # Replace the compiler filter with interpret-only so that we
+ # can capture the profile.
+ ARGS_WITH_INTERPRET_ONLY=
+ replace_compiler_filter_with_interepret_only "$@"
+
+ run_art -Xjitsaveprofilinginfo \
+ -Xps-min-methods-to-save:1 \
+ -Xps-min-classes-to-save:1 \
+ -Xps-min-notification-before-wake:10 \
+ -Xps-profile-path:$PROFILE_PATH \
+ -Xusejit:true \
+ "${ARGS_WITH_INTERPRET_ONLY[@]}" \
+ "&>" "$ANDROID_DATA/profile_gen.log"
+ EXIT_STATUS=$?
+
+ if [ $EXIT_STATUS != 0 ]; then
+ cat "$ANDROID_DATA/profile_gen.log"
+ clean_android_data
+ exit $EXIT_STATUS
+ fi
+
+ # Wipe dalvik-cache to prepare it for the next invocation.
+ rm -rf $ANDROID_DATA/dalvik-cache/$ARCHS/*
+
+ # Append arguments so next invocation of run_art uses the profile.
+ EXTRA_OPTIONS="$EXTRA_OPTIONS -Xcompiler-option --profile-file=$PROFILE_PATH"
+fi
+
+# Protect additional arguments in quotes to preserve whitespaces when evaluated.
+# This is for run-jdwp-test.sh which uses this script and has arguments with
+# whitespaces when running on device.
+while [ $# -gt 0 ]; do
+ EXTRA_OPTIONS="$EXTRA_OPTIONS \"$1\""
+ shift
+done
+
+run_art $EXTRA_OPTIONS
EXIT_STATUS=$?
-if [ z"$PERF" != z ]; then
- if [ z"$PERF" = zreport ]; then
+if [ "$PERF" != "" ]; then
+ if [ "$PERF" = report ]; then
perf report -i $ANDROID_DATA/perf.data
fi
echo "Perf data saved in: $ANDROID_DATA/perf.data"
else
- if [ "$DELETE_ANDROID_DATA" = "true" ]; then
- rm -rf $ANDROID_DATA
- fi
+ # Perf output is placed under $ANDROID_DATA so not cleaned when perf options used.
+ clean_android_data
fi
exit $EXIT_STATUS
diff --git a/tools/cpplint_presubmit.py b/tools/cpplint_presubmit.py
new file mode 100755
index 0000000..4781517
--- /dev/null
+++ b/tools/cpplint_presubmit.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python3
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO We should unify this with build/Android.cpplint.mk.
+
+import os
+import pathlib
+import subprocess
+import sys
+
+IGNORED_FILES = {"runtime/elf.h", "runtime/openjdkjvmti/include/jvmti.h"}
+
+INTERESTING_SUFFIXES = {".h", ".cc"}
+
+CPPLINT_FLAGS = [
+ '--filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf',
+ '--quiet',
+]
+
+def is_interesting(f):
+ """
+ Returns true if this is a file we want to run through cpplint before uploading. False otherwise.
+ """
+ path = pathlib.Path(f)
+ return f not in IGNORED_FILES and path.suffix in INTERESTING_SUFFIXES and path.exists()
+
+def get_changed_files(commit):
+ """
+ Gets the files changed in the given commit.
+ """
+ return subprocess.check_output(
+ ["git", 'diff-tree', '--no-commit-id', '--name-only', '-r', commit],
+ stderr=subprocess.STDOUT,
+ universal_newlines=True).split()
+
+def run_cpplint(files):
+ """
+ Runs cpplint on the given files.
+ """
+ if len(files) == 0:
+ return
+ sys.exit(subprocess.call(['tools/cpplint.py'] + CPPLINT_FLAGS + files))
+
+def main():
+ if 'PREUPLOAD_COMMIT' in os.environ:
+ commit = os.environ['PREUPLOAD_COMMIT']
+ else:
+ print("WARNING: Not running as a pre-upload hook. Assuming commit to check = 'HEAD'")
+ commit = "HEAD"
+ files_to_check = [f for f in get_changed_files(commit) if is_interesting(f)]
+ run_cpplint(files_to_check)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/golem/build-target.sh b/tools/golem/build-target.sh
new file mode 100755
index 0000000..8d8e2bb
--- /dev/null
+++ b/tools/golem/build-target.sh
@@ -0,0 +1,384 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ ! -d art ]]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+ALL_CONFIGS=(linux-ia32 linux-x64 linux-armv8 linux-armv7 android-armv8 android-armv7)
+
+usage() {
+ local config
+ local golem_target
+
+ (cat << EOF
+ Usage: $(basename "${BASH_SOURCE[0]}") [--golem=<target>] --machine-type=MACHINE_TYPE
+ [--tarball[=<target>.tar.gz]]
+
+ Build minimal art binaries required to run golem benchmarks either
+ locally or on the golem servers.
+
+ Creates the \$MACHINE_TYPE binaries in your \$OUT_DIR, and if --tarball was specified,
+ it also tars the results of the build together into your <target.tar.gz> file.
+ --------------------------------------------------------
+ Required Flags:
+ --machine-type=MT Specify the machine type that will be built.
+
+ Optional Flags":
+ --golem=<target> Builds with identical commands that Golem servers use.
+ --tarball[=o.tgz] Tar/gz the results. File name defaults to <machine_type>.tar.gz
+ -j<num> Specify how many jobs to use for parallelism.
+ --help Print this help listing.
+ --showcommands Show commands as they are being executed.
+ --simulate Print commands only, don't execute commands.
+EOF
+ ) | sed -e 's/^[[:space:]][[:space:]]//g' >&2 # Strip leading whitespace from heredoc.
+
+ echo >&2 "Available machine types:"
+ for config in "${ALL_CONFIGS[@]}"; do
+ echo >&2 " $config"
+ done
+
+ echo >&2
+ echo >&2 "Available Golem targets:"
+ while IFS='' read -r golem_target; do
+ echo >&2 " $golem_target"
+ done < <("$(thisdir)/env" --list-targets)
+}
+
+# Check if $1 element is in array $2
+contains_element() {
+ local e
+ for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
+ return 1
+}
+
+# Display a command, but don't execute it, if --showcommands was set.
+show_command() {
+ if [[ $showcommands == "showcommands" ]]; then
+ echo "$@"
+ fi
+}
+
+# Execute a command, displaying it if --showcommands was set.
+# If --simulate is used, command is not executed.
+execute() {
+ show_command "$@"
+ execute_noshow "$@"
+}
+
+# Execute a command unless --simulate was used.
+execute_noshow() {
+ if [[ $simulate == "simulate" ]]; then
+ return 0
+ fi
+
+ local prog="$1"
+ shift
+ "$prog" "$@"
+}
+
+# Export environment variable, echoing it to screen.
+setenv() {
+ local name="$1"
+ local value="$2"
+
+ export $name="$value"
+ echo export $name="$value"
+}
+
+# Export environment variable, echoing $3 to screen ($3 is meant to be unevaluated).
+setenv_escape() {
+ local name="$1"
+ local value="$2"
+ local escaped_value="$3"
+
+ export $name="$value"
+ echo export $name="$escaped_value"
+}
+
+log_usage_error() {
+ echo >&2 "ERROR: " "$@"
+ echo >&2 " See --help for the correct usage information."
+ exit 1
+}
+
+log_fatal() {
+ echo >&2 "FATAL: " "$@"
+ exit 2
+}
+
+# Get the directory of this script.
+thisdir() {
+ (\cd "$(dirname "${BASH_SOURCE[0]}")" && pwd )
+}
+
+# Get the path to the top of the Android source tree.
+gettop() {
+ if [[ "x$ANDROID_BUILD_TOP" != "x" ]]; then
+ echo "$ANDROID_BUILD_TOP";
+ else
+ echo "$(thisdir)/../../.."
+ fi
+}
+
+# Get a build variable from the Android build system.
+get_build_var() {
+ local varname="$1"
+
+ # include the desired target product/build-variant
+ # which won't be set in our env if neither we nor the user first executed
+ # source build/envsetup.sh (e.g. if simulating from a fresh shell).
+ local extras
+ [[ -n $target_product ]] && extras+=" TARGET_PRODUCT=$target_product"
+ [[ -n $target_build_variant ]] && extras+=" TARGET_BUILD_VARIANT=$target_build_variant"
+
+ # call dumpvar-$name from the makefile system.
+ (\cd "$(gettop)";
+ CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core \
+ command make --no-print-directory -f build/core/config.mk \
+ $extras \
+ dumpvar-$varname)
+}
+
+# Defaults from command-line.
+
+mode="" # blank or 'golem' if --golem was specified.
+golem_target="" # --golem=$golem_target
+config="" # --machine-type=$config
+j_arg="-j8"
+showcommands=""
+simulate=""
+make_tarball=""
+tarball=""
+
+# Parse command line arguments
+
+while [[ "$1" != "" ]]; do
+ case "$1" in
+ --help)
+ usage
+ exit 1
+ ;;
+ --golem=*)
+ mode="golem"
+ golem_target="${1##--golem=}"
+
+ if [[ "x$golem_target" == x ]]; then
+ log_usage_error "Missing --golem target type."
+ fi
+
+ shift
+ ;;
+ --machine-type=*)
+ config="${1##--machine-type=}"
+ if ! contains_element "$config" "${ALL_CONFIGS[@]}"; then
+ log_usage_error "Invalid --machine-type value '$config'"
+ fi
+ shift
+ ;;
+ --tarball)
+ tarball="" # reuse the machine type name.
+ make_tarball="make_tarball"
+ shift
+ ;;
+ --tarball=*)
+ tarball="${1##--tarball=}"
+ make_tarball="make_tarball"
+ shift
+ ;;
+ -j*)
+ j_arg="$1"
+ shift
+ ;;
+ --showcommands)
+ showcommands="showcommands"
+ shift
+ ;;
+ --simulate)
+ simulate="simulate"
+ shift
+ ;;
+ *)
+ log_usage_error "Unknown options $1"
+ ;;
+ esac
+done
+
+###################################
+###################################
+###################################
+
+if [[ -z $config ]]; then
+ log_usage_error "--machine-type option is required."
+fi
+
+# --tarball defaults to the --machine-type value with .tar.gz.
+tarball="${tarball:-$config.tar.gz}"
+
+target_product="$TARGET_PRODUCT"
+target_build_variant="$TARGET_BUILD_VARIANT"
+
+# If not using --golem, use whatever the user had lunch'd prior to this script.
+if [[ $mode == "golem" ]]; then
+ # This section is intended solely to be executed by a golem build server.
+
+ target_build_variant=eng
+ case "$config" in
+ *-armv7)
+ target_product="arm_krait"
+ ;;
+ *-armv8)
+ target_product="armv8"
+ ;;
+ *)
+ target_product="sdk"
+ ;;
+ esac
+
+ if [[ $target_product = arm* ]]; then
+ # If using the regular manifest, e.g. 'master'
+ # The lunch command for arm will assuredly fail because we don't have device/generic/art.
+ #
+ # Print a human-readable error message instead of trying to lunch and failing there.
+ if ! [[ -d "$(gettop)/device/generic/art" ]]; then
+ log_fatal "Missing device/generic/art directory. Perhaps try master-art repo manifest?\n" \
+ " Cannot build ARM targets (arm_krait, armv8) for Golem." >&2
+ fi
+ # We could try to keep on simulating but it seems brittle because we won't have the proper
+ # build variables to output the right strings.
+ fi
+
+ # Get this particular target's environment variables (e.g. ART read barrier on/off).
+ source "$(thisdir)"/env "$golem_target" || exit 1
+
+ lunch_target="$target_product-$target_build_variant"
+
+ execute 'source' build/envsetup.sh
+ # Build generic targets (as opposed to something specific like aosp_angler-eng).
+ execute lunch "$lunch_target"
+ setenv JACK_SERVER false
+ setenv_escape JACK_REPOSITORY "$PWD/prebuilts/sdk/tools/jacks" '$PWD/prebuilts/sdk/tools/jacks'
+ # Golem uses master-art repository which is missing a lot of other libraries.
+ setenv SOONG_ALLOW_MISSING_DEPENDENCIES true
+ # Golem may be missing tools such as javac from its path.
+ setenv_escape PATH "/usr/lib/jvm/java-8-openjdk-amd64/bin/:$PATH" '/usr/lib/jvm/java-8-openjdk-amd64/bin/:$PATH'
+else
+ # Look up the default variables from the build system if they weren't set already.
+ [[ -z $target_product ]] && target_product="$(get_build_var TARGET_PRODUCT)"
+ [[ -z $target_build_variant ]] && target_build_variant="$(get_build_var TARGET_BUILD_VARIANT)"
+fi
+
+# Defaults for all machine types.
+make_target="build-art-target-golem"
+out_dir="out/x86_64"
+root_dir_var="PRODUCT_OUT"
+strip_symbols=false
+bit64_suffix=""
+tar_directories=(system data/art-test)
+
+# Per-machine type overrides
+if [[ $config == linux-arm* ]]; then
+ setenv ART_TARGET_LINUX true
+fi
+
+case "$config" in
+ linux-ia32|linux-x64)
+ root_dir_var="HOST_OUT"
+ # Android strips target builds automatically, but not host builds.
+ strip_symbols=true
+ make_target="build-art-host-golem"
+
+ if [[ $config == linux-ia32 ]]; then
+ out_dir="out/x86"
+ setenv HOST_PREFER_32_BIT true
+ else
+ bit64_suffix="64"
+ fi
+
+ tar_directories=(bin framework usr lib${bit64_suffix})
+ ;;
+ *-armv8)
+ bit64_suffix="64"
+ ;;
+ *-armv7)
+ ;;
+ *)
+ log_fatal "Unsupported machine-type '$config'"
+esac
+
+# Golem benchmark run commands expect a certain $OUT_DIR to be set,
+# so specify it here.
+#
+# Note: It is questionable if we want to customize this since users
+# could alternatively probably use their own build directly (and forgo this script).
+setenv OUT_DIR "$out_dir"
+root_dir="$(get_build_var "$root_dir_var")"
+
+if [[ $mode == "golem" ]]; then
+ # For golem-style running only.
+ # Sets the DT_INTERP to this path in every .so we can run the
+ # non-system version of dalvikvm with our own copies of the dependencies (e.g. our own libc++).
+ if [[ $config == android-* ]]; then
+ # TODO: the linker can be relative to the binaries
+ # (which is what we do for linux-armv8 and linux-armv7)
+ golem_run_path="/data/local/tmp/runner/"
+ else
+ golem_run_path=""
+ fi
+
+ # Only do this for target builds. Host doesn't need this.
+ if [[ $config == *-arm* ]]; then
+ setenv CUSTOM_TARGET_LINKER "${golem_run_path}${root_dir}/system/bin/linker${bit64_suffix}"
+ fi
+fi
+
+#
+# Main command execution below here.
+# (everything prior to this just sets up environment variables,
+# and maybe calls lunch).
+#
+
+execute make "${j_arg}" "${make_target}"
+
+if $strip_symbols; then
+ # Further reduce size by stripping symbols.
+ execute_noshow strip $root_dir/bin/* || true
+ show_command strip $root_dir/bin/'*' '|| true'
+ execute_noshow strip $root_dir/lib${bit64_suffix}/'*'
+ show_command strip $root_dir/lib${bit64_suffix}/'*'
+fi
+
+if [[ "$make_tarball" == "make_tarball" ]]; then
+ # Create a tarball which is required for the golem build resource.
+ # (In particular, each golem benchmark's run commands depend on a list of resource files
+ # in order to have all the files it needs to actually execute,
+ # and this tarball would satisfy that particular target+machine-type's requirements).
+ dirs_rooted=()
+ for tar_dir in "${tar_directories[@]}"; do
+ dirs_rooted+=("$root_dir/$tar_dir")
+ done
+
+ execute tar -czf "${tarball}" "${dirs_rooted[@]}" --exclude .git --exclude .gitignore
+ tar_result=$?
+ if [[ $tar_result -ne 0 ]]; then
+ [[ -f $tarball ]] && rm $tarball
+ fi
+
+ show_command '[[ $? -ne 0 ]] && rm' "$tarball"
+fi
+
diff --git a/tools/golem/env b/tools/golem/env
new file mode 100755
index 0000000..187ba3a
--- /dev/null
+++ b/tools/golem/env
@@ -0,0 +1,117 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Export some environment variables used by ART's Android.mk/Android.bp
+# build systems to configure ART [to use a different implementation].
+#
+# Currently only varies on ART_USE_READ_BARRIER for a concurrent/non-concurrent
+# flavor of the ART garbage collector.
+#
+# Only meant for golem use since when building ART directly, one can/should set
+# these environment flags themselves.
+#
+# These environment flags are not really meant here to be for "correctness",
+# but rather telling the ART C++ to use alternative algorithms.
+# In other words, the same exact binary build with a different "target"
+# should run in the same context (e.g. it does not change arch or the OS it's built for).
+#
+
+setenv() {
+ local name="$1"
+ local value="$2"
+
+ export $name="$value"
+ echo export $name="$value"
+}
+
+# Enforce specified target-name is one of these.
+# Perhaps we should be less strict?
+ALL_TARGETS=(art-interpreter art-opt art-jit art-jit-cc art-opt-cc art-opt-debuggable art-vdex)
+
+usage() {
+ echo >&2 "Usage: $(basename $0) (--list-targets | <target-name>)"
+ echo >&2
+ echo >&2 "Exports the necessary ART environment variables"
+ echo >&2 "to pass to the Golem build to correctly configure ART."
+ echo >&2 "--------------------------------------------------------"
+ echo >&2 "Required Arguments:"
+ echo >&2 " <target-name> Specify the golem target to get environment variables for."
+ echo >&2
+ echo >&2 "Optional Flags":
+ echo >&2 " --list-targets Display all the targets. Do not require the main target-name."
+ echo >&2 " --help Print this help listing."
+ echo >&2
+ echo >&2 "Available Targets:"
+
+ list_targets 2 " "
+}
+
+list_targets() {
+ local out_fd="${1:-1}" # defaults to 1 if no param was set
+ local prefix="$2"
+
+ for target in "${ALL_TARGETS[@]}"; do
+ echo >&$out_fd "${prefix}${target}"
+ done
+}
+
+
+# Check if $1 element is in array $2
+contains_element() {
+ local e
+ for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
+ return 1
+}
+
+main() {
+ if [[ $# -lt 1 ]]; then
+ usage
+ exit 1
+ fi
+
+ if [[ "$1" == "--help" ]]; then
+ usage
+ exit 1
+ fi
+
+ if [[ "$1" == "--list-targets" ]]; then
+ list_targets
+ exit 0
+ fi
+
+ local selected_target="$1"
+ if ! contains_element "$selected_target" "${ALL_TARGETS[@]}"; then
+ echo "ERROR: Invalid target value '$selected_target'" >&2
+ exit 1
+ fi
+
+ case "$selected_target" in
+ *-cc)
+ setenv ART_USE_READ_BARRIER true
+ ;;
+ *)
+ setenv ART_USE_READ_BARRIER false
+ ;;
+ esac
+
+ # Make smaller .tar.gz files by excluding debug targets.
+ setenv ART_BUILD_TARGET_DEBUG false
+ setenv ART_BUILD_HOST_DEBUG false
+ setenv USE_DEX2OAT_DEBUG false
+}
+
+main "$@"
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index e0aae46..07d7fb8 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -141,13 +141,6 @@
names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"]
},
{
- description: "TimeZoneTest.testAllDisplayNames times out, needs investigation",
- result: EXEC_TIMEOUT,
- modes: [device],
- names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"],
- bug: 22786792
-},
-{
description: "Lack of IPv6 on some buildbot slaves",
result: EXEC_FAILED,
names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
@@ -216,55 +209,5 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
-},
-{
- description: "Linker issues with libjavacoretests",
- result: EXEC_FAILED,
- bug: 35417197,
- modes: [device],
- names: [
- "dalvik.system.JniTest#testGetSuperclass",
- "dalvik.system.JniTest#testPassingBooleans",
- "dalvik.system.JniTest#testPassingBytes",
- "dalvik.system.JniTest#testPassingChars",
- "dalvik.system.JniTest#testPassingClass",
- "dalvik.system.JniTest#testPassingDoubles",
- "dalvik.system.JniTest#testPassingFloats",
- "dalvik.system.JniTest#testPassingInts",
- "dalvik.system.JniTest#testPassingLongs",
- "dalvik.system.JniTest#testPassingObjectReferences",
- "dalvik.system.JniTest#testPassingShorts",
- "dalvik.system.JniTest#testPassingThis",
- "libcore.java.lang.OldSystemTest#test_load",
- "libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited",
- "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull",
- "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups",
- "libcore.java.lang.ThreadTest#testGetStackTrace",
- "libcore.java.lang.ThreadTest#testJavaContextClassLoader",
- "libcore.java.lang.ThreadTest#testLeakingStartedThreads",
- "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads",
- "libcore.java.lang.ThreadTest#testNativeThreadNames",
- "libcore.java.lang.ThreadTest#testParkUntilWithUnderflowValue",
- "libcore.java.lang.ThreadTest#testThreadDoubleStart",
- "libcore.java.lang.ThreadTest#testThreadInterrupted",
- "libcore.java.lang.ThreadTest#testThreadRestart",
- "libcore.java.lang.ThreadTest#testThreadSleep",
- "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments",
- "libcore.java.lang.ThreadTest#testThreadWakeup",
- "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
- "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
- "libcore.java.util.TimeZoneTest#testDisplayNamesWithScript",
- "libcore.java.util.zip.ZipEntryTest#testCommentAndExtraInSameOrder",
- "libcore.java.util.zip.ZipEntryTest#testMaxLengthExtra",
- "libcore.util.NativeAllocationRegistryTest#testBadSize",
- "libcore.util.NativeAllocationRegistryTest#testEarlyFree",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNullArguments",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_y",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_yy"
- ]
}
]
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 7eaaaf9..6c2c072 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -61,6 +61,9 @@
echo -e "${green}Battery info${nc}"
adb shell dumpsys battery
+echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
+adb shell killall -9 /system/bin/logd
+
echo -e "${green}Setting adb buffer size to 32MB${nc}"
adb logcat -G 32M
adb logcat -g